[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH] x86/vmx: Fold VMCS logic in vmx_{get,set}_segment_register()


  • To: Xen-devel <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
  • Date: Fri, 21 Jan 2022 11:08:34 +0000
  • Authentication-results: esa1.hc3370-68.iphmx.com; dkim=none (message not signed) header.i=none
  • Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Jan Beulich <JBeulich@xxxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>, Wei Liu <wl@xxxxxxx>, Jun Nakajima <jun.nakajima@xxxxxxxxx>, Kevin Tian <kevin.tian@xxxxxxxxx>
  • Delivery-date: Fri, 21 Jan 2022 11:09:18 +0000
  • Ironport-data: A9a23:HuYUE6+uCz9nM0EHqEPdDrUDBXmTJUtcMsCJ2f8bNWPcYEJGY0x3m mMXXT+BM62PY2uketkibd7loEpV6MWAy4JlHlRo+3o8E34SpcT7XtnIdU2Y0wF+jyHgoOCLy +1EN7Es+ehtFie0Si9AttENlFEkvU2ybuOU5NXsZ2YhFWeIdA970Ug5w7dj29Yy6TSEK1jlV e3a8pW31GCNg1aYAkpMg05UgEoy1BhakGpwUm0WPZinjneH/5UmJMt3yZWKB2n5WuFp8tuSH I4v+l0bElTxpH/BAvv9+lryn9ZjrrT6ZWBigVIOM0Sub4QrSoXfHc/XOdJFAXq7hQllkPhz5 NJL6JuAVzwAEf3vvtsSWBtdNAdXaPguFL/veRBTsOSWxkzCNXDt3+9vHAc9OohwFuRfWD8Us 6ZCcXZUM07F17neLLGTE4GAguwKKsXxMZxZkXZn1TzDVt4tQIzZQrWM7thdtNs1rp4UTaaON 5RGAdZpRBTfRxx3eQoxNKN9msaN2zr/QhsA+U3A8MLb5ECMlVcsgdABKuH9eMGMA8NcnU+ap 2fP12X/HhwecteYzFKt8X+yh+mJgSLyXqoTEqG18rhhh1j77nMXIA0bUx28u/bRol6zXZdTJ lIZ/gIqrLMu7wq7Q9/lRRq6rXWY+BkGVLJ4Mcc39QWMwar8+BuCCy4PSTspVTA9nJZoH3pwj AbPxo63Q2w02FGIdZ6D3rW4qjXvPhgLFkYtNQwicgQO4dzEhqhm23ojUe1fOKKyi9T0HxT5z DaLsDUyit0vsCIb60mo1QuZ2mzx//AlWiZwv1yKBTz9smuVcab8P9TwgWU3+8qsO2pworOpm HEf0/aT4+kVZX1mvHzcGb5ddF1FChvsDdE9vbKNN8R5n9hO0yT6FWy13N2YDB04WirjUWW4C HI/QSsLuPdu0IKCNMebmb6ZBcUw1rTHHt/4TP3SZdcmSsEvKFXYpHo/NBfMgTiFfK0QfUcXY s/znSGEVi5yNEia5GDuG7d1PUEDm0jSOl8/tbiklk/6gNJylVaeSKsfMUvmUwzKxPjsnekhy P4Gb5Hi40wGCIXWO3CLmaZOcwxiBSVlVPje9pwGHsbec1EOMDxwVJfsLUYJJtYNc1J9zLmYp xlQmyZwlTLCuJEwAVzaMy84MOK2A88XQLBSFXVEAGtEEkMLOe6HhJrzvbNsFVX+3OA8n/NyU dcffMCMXqZGRjjdomxPZpjhto1yMh+sgFvWbSaiZTE+eb9mRhDIpYC4Lle+qnFWA3rlr9Y6r p2hyhjfHcgJSTN9AZuEc/mo1V6w4yQQwbogQ0vSL9BPU0zw64w2eTfph/o6LphUexXOzzeXz SiMBhIcqbWfqoM56oCR16uFs52oA611GU8DRzvX6rO/NC/7+Gu/wNAfDLbULG6FDG6tofesf +RYyf34IcYrplcSvtouCatvwII/+8Dr++1QwDN7ESiZdF+sEL5hfCWLhJEdqq1Xy7ZFkgKqQ UbTqMJCMLCENc65Ql4cIA0pMraK2f0Ow2SA6P00JAPx5TNt/arBWkJXZkHehCtYJbpzEYUk3 eZ+55JGt13h0kInYoSckyRZ12WQNXhRAawou6YTDJLvlgd2mEpJZobRC3Ou7ZyCAzmW3pLG/ tNAaHL+uolh
  • Ironport-hdrordr: A9a23:mYccnqj8ryCvPKISmQJX1+gcY3BQXtQji2hC6mlwRA09TySZ// rOoB19726TtN9xYgBGpTnuAtjifZqxz/FICOoqTNOftWvdyQmVxehZhOOIqVCNJ8SXzJ8l6U 4KSchD4bPLY2SS9fyKhTWFLw==
  • Ironport-sdr: 7OetraLyTe8VlY6e1aTH1oBlLyDpUrI50GQLTVY35+hMRm60MUpB/ZLE0R5A0VBXLt3JYmdzaW UP8jKYYHAO6bHd4aiCH/0Mv9r0Gymbisn1sRRT4NUfWjWnMhQRHRapnJPMMvSSNii9G4pbUkVw T4StMO2pUro0UcWhwtnJK3xLDRtkCIbgUrCjyNgibHWBFacbJkXwXrK9RNNObhaDFhiAlKuTTm frcM8ydoGkTuXhHrXciXRXxR7RgcJyRDjcxVDW8UTnhq4yK22PMkCEUtMQUG0RHi1bm0XIYuze YQr5HHk5+tgKoM6ITo034JRU
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

Xen's segment enumeration almost matches the VMCS encoding order, while the
VMCS encoding order has the system segments immediately following the user
segments for all relevant attributes.

Use a sneaky xor to hide the difference in encoding order to fold the switch
statements, dropping 10 __vmread() and 10 __vmwrite() calls.  Bloat-o-meter
reports:

  add/remove: 0/0 grow/shrink: 0/2 up/down: 0/-433 (-433)
  Function                                     old     new   delta
  vmx_set_segment_register                     804     593    -211
  vmx_get_segment_register                     778     556    -222

showing that these wrappers aren't trivial.  In addition, 20 BUGs worth of
metadata are dropped.

No functional change.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
CC: Wei Liu <wl@xxxxxxx>
CC: Jun Nakajima <jun.nakajima@xxxxxxxxx>
CC: Kevin Tian <kevin.tian@xxxxxxxxx>

We could make x86_seg_* match the VMCS encoding order if we're willing to make
v->arch.hvm.vmx.vm86_saved_seg[] one entry longer, but I don't think the added
complexity to the vmx_realmode logic is worth it.
---
 xen/arch/x86/hvm/vmx/vmx.c | 77 ++++++++++++++++++++--------------------------
 1 file changed, 34 insertions(+), 43 deletions(-)

diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index c44cf8f5d425..9765cfd90a0a 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -986,6 +986,7 @@ static void vmx_get_segment_register(struct vcpu *v, enum 
x86_segment seg,
                                      struct segment_register *reg)
 {
     unsigned long attr = 0, sel = 0, limit;
+    unsigned int tmp_seg;
 
     /*
      * We may get here in the context of dump_execstate(), which may have
@@ -1009,34 +1010,34 @@ static void vmx_get_segment_register(struct vcpu *v, 
enum x86_segment seg,
         return;
     }
 
-    switch ( seg )
+    /*
+     * Xen's x86_seg_* enumeration *almost* matches the VMCS encoding order.
+     *
+     * tr and ldtr are reversed, and other areas of code rely on this, so we
+     * can't just re-enumerate.
+     */
+    BUILD_BUG_ON(x86_seg_tr   != 6);
+    BUILD_BUG_ON(x86_seg_ldtr != 7);
+    BUILD_BUG_ON(x86_seg_gdtr != 8);
+    BUILD_BUG_ON(x86_seg_idtr != 9);
+    switch ( tmp_seg = seg )
     {
-    case x86_seg_es ... x86_seg_gs:
-        __vmread(GUEST_SEG_SELECTOR(seg), &sel);
-        __vmread(GUEST_SEG_LIMIT(seg),    &limit);
-        __vmread(GUEST_SEG_BASE(seg),     &reg->base);
-        __vmread(GUEST_SEG_AR_BYTES(seg), &attr);
-        break;
     case x86_seg_tr:
-        __vmread(GUEST_TR_SELECTOR, &sel);
-        __vmread(GUEST_TR_LIMIT,    &limit);
-        __vmread(GUEST_TR_BASE,     &reg->base);
-        __vmread(GUEST_TR_AR_BYTES, &attr);
-        break;
+    case x86_seg_ldtr:
+        tmp_seg ^= 1; /* Flip tr and ldtr so GUEST_SEG_*() works. */
+        fallthrough;
+
+    case x86_seg_es ... x86_seg_gs:
+        __vmread(GUEST_SEG_SELECTOR(tmp_seg), &sel);
+        __vmread(GUEST_SEG_AR_BYTES(tmp_seg), &attr);
+        fallthrough;
+
     case x86_seg_gdtr:
-        __vmread(GUEST_GDTR_LIMIT, &limit);
-        __vmread(GUEST_GDTR_BASE,  &reg->base);
-        break;
     case x86_seg_idtr:
-        __vmread(GUEST_IDTR_LIMIT, &limit);
-        __vmread(GUEST_IDTR_BASE,  &reg->base);
-        break;
-    case x86_seg_ldtr:
-        __vmread(GUEST_LDTR_SELECTOR, &sel);
-        __vmread(GUEST_LDTR_LIMIT,    &limit);
-        __vmread(GUEST_LDTR_BASE,     &reg->base);
-        __vmread(GUEST_LDTR_AR_BYTES, &attr);
+        __vmread(GUEST_SEG_LIMIT(tmp_seg),    &limit);
+        __vmread(GUEST_SEG_BASE(tmp_seg),     &reg->base);
         break;
+
     default:
         BUG();
         return;
@@ -1150,32 +1151,22 @@ static void vmx_set_segment_register(struct vcpu *v, 
enum x86_segment seg,
 
     switch ( seg )
     {
+    case x86_seg_tr:
+    case x86_seg_ldtr:
+        seg ^= 1; /* Flip tr and ldtr so GUEST_SEG_*() works. */
+        fallthrough;
+
     case x86_seg_es ... x86_seg_gs:
         __vmwrite(GUEST_SEG_SELECTOR(seg), sel);
-        __vmwrite(GUEST_SEG_LIMIT(seg),    limit);
-        __vmwrite(GUEST_SEG_BASE(seg),     base);
         __vmwrite(GUEST_SEG_AR_BYTES(seg), attr);
-        break;
-    case x86_seg_tr:
-        __vmwrite(GUEST_TR_SELECTOR, sel);
-        __vmwrite(GUEST_TR_LIMIT, limit);
-        __vmwrite(GUEST_TR_BASE, base);
-        __vmwrite(GUEST_TR_AR_BYTES, attr);
-        break;
+        fallthrough;
+
     case x86_seg_gdtr:
-        __vmwrite(GUEST_GDTR_LIMIT, limit);
-        __vmwrite(GUEST_GDTR_BASE, base);
-        break;
     case x86_seg_idtr:
-        __vmwrite(GUEST_IDTR_LIMIT, limit);
-        __vmwrite(GUEST_IDTR_BASE, base);
-        break;
-    case x86_seg_ldtr:
-        __vmwrite(GUEST_LDTR_SELECTOR, sel);
-        __vmwrite(GUEST_LDTR_LIMIT, limit);
-        __vmwrite(GUEST_LDTR_BASE, base);
-        __vmwrite(GUEST_LDTR_AR_BYTES, attr);
+        __vmwrite(GUEST_SEG_LIMIT(seg),    limit);
+        __vmwrite(GUEST_SEG_BASE(seg),     base);
         break;
+
     default:
         BUG();
     }
-- 
2.11.0




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.