[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v5 10/15] x86/altp2m: add remaining support routines.



>From: dunlapg@xxxxxxxxx [mailto:dunlapg@xxxxxxxxx] On Behalf Of George
>Dunlap
>Sent: Thursday, July 16, 2015 7:45 AM
>
>On Tue, Jul 14, 2015 at 1:14 AM, Ed White <edmund.h.white@xxxxxxxxx>
>wrote:
>> Add the remaining routines required to support enabling the alternate
>> p2m functionality.
>>
>> Signed-off-by: Ed White <edmund.h.white@xxxxxxxxx>
>>
>> Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
>> ---
>>  xen/arch/x86/hvm/hvm.c           |  58 +++++-
>>  xen/arch/x86/mm/hap/Makefile     |   1 +
>>  xen/arch/x86/mm/hap/altp2m_hap.c |  98 ++++++++++
>>  xen/arch/x86/mm/p2m-ept.c        |   3 +
>>  xen/arch/x86/mm/p2m.c            | 385
>+++++++++++++++++++++++++++++++++++++++
>>  xen/include/asm-x86/hvm/altp2m.h |   4 +
>>  xen/include/asm-x86/p2m.h        |  33 ++++
>>  7 files changed, 576 insertions(+), 6 deletions(-)  create mode
>> 100644 xen/arch/x86/mm/hap/altp2m_hap.c
>>
>> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index
>> 38deedc..a9f4b1b 100644
>> --- a/xen/arch/x86/hvm/hvm.c
>> +++ b/xen/arch/x86/hvm/hvm.c
>> @@ -2802,10 +2802,11 @@ int hvm_hap_nested_page_fault(paddr_t gpa,
>unsigned long gla,
>>      mfn_t mfn;
>>      struct vcpu *curr = current;
>>      struct domain *currd = curr->domain;
>> -    struct p2m_domain *p2m;
>> +    struct p2m_domain *p2m, *hostp2m;
>>      int rc, fall_through = 0, paged = 0;
>>      int sharing_enomem = 0;
>>      vm_event_request_t *req_ptr = NULL;
>> +    bool_t ap2m_active = 0;
>>
>>      /* On Nested Virtualization, walk the guest page table.
>>       * If this succeeds, all is fine.
>> @@ -2865,11 +2866,31 @@ int hvm_hap_nested_page_fault(paddr_t gpa,
>unsigned long gla,
>>          goto out;
>>      }
>>
>> -    p2m = p2m_get_hostp2m(currd);
>> -    mfn = get_gfn_type_access(p2m, gfn, &p2mt, &p2ma,
>> +    ap2m_active = altp2m_active(currd);
>> +
>> +    /* Take a lock on the host p2m speculatively, to avoid potential
>> +     * locking order problems later and to handle unshare etc.
>> +     */
>> +    hostp2m = p2m_get_hostp2m(currd);
>> +    mfn = get_gfn_type_access(hostp2m, gfn, &p2mt, &p2ma,
>>                                P2M_ALLOC | (npfec.write_access ? P2M_UNSHARE 
>> : 0),
>>                                NULL);
>>
>> +    if ( ap2m_active )
>> +    {
>> +        if ( altp2m_hap_nested_page_fault(curr, gpa, gla, npfec, &p2m) == 1 
>> )
>> +        {
>> +            /* entry was lazily copied from host -- retry */
>> +            __put_gfn(hostp2m, gfn);
>> +            rc = 1;
>> +            goto out;
>> +        }
>> +
>> +        mfn = get_gfn_type_access(p2m, gfn, &p2mt, &p2ma, 0, NULL);
>> +    }
>> +    else
>> +        p2m = hostp2m;
>> +
>>      /* Check access permissions first, then handle faults */
>>      if ( mfn_x(mfn) != INVALID_MFN )
>>      {
>> @@ -2909,6 +2930,20 @@ int hvm_hap_nested_page_fault(paddr_t gpa,
>> unsigned long gla,
>>
>>          if ( violation )
>>          {
>> +            /* Should #VE be emulated for this fault? */
>> +            if ( p2m_is_altp2m(p2m) && !cpu_has_vmx_virt_exceptions )
>> +            {
>> +                bool_t sve;
>> +
>> +                p2m->get_entry(p2m, gfn, &p2mt, &p2ma, 0, NULL,
>> + &sve);
>> +
>> +                if ( !sve && altp2m_vcpu_emulate_ve(curr) )
>> +                {
>> +                    rc = 1;
>> +                    goto out_put_gfn;
>> +                }
>> +            }
>> +
>>              if ( p2m_mem_access_check(gpa, gla, npfec, &req_ptr) )
>>              {
>>                  fall_through = 1;
>> @@ -2928,7 +2963,9 @@ int hvm_hap_nested_page_fault(paddr_t gpa,
>unsigned long gla,
>>           (npfec.write_access &&
>>            (p2m_is_discard_write(p2mt) || (p2mt == p2m_mmio_write_dm))) )
>>      {
>> -        put_gfn(currd, gfn);
>> +        __put_gfn(p2m, gfn);
>> +        if ( ap2m_active )
>> +            __put_gfn(hostp2m, gfn);
>>
>>          rc = 0;
>>          if ( unlikely(is_pvh_domain(currd)) ) @@ -2957,6 +2994,7 @@
>> int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
>>      /* Spurious fault? PoD and log-dirty also take this path. */
>>      if ( p2m_is_ram(p2mt) )
>>      {
>> +        rc = 1;
>>          /*
>>           * Page log dirty is always done with order 0. If this mfn resides 
>> in
>>           * a large page, we do not change other pages type within
>> that large @@ -2965,9 +3003,15 @@ int
>hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
>>          if ( npfec.write_access )
>>          {
>>              paging_mark_dirty(currd, mfn_x(mfn));
>> +            /* If p2m is really an altp2m, unlock here to avoid lock 
>> ordering
>> +             * violation when the change below is propagated from host p2m 
>> */
>> +            if ( ap2m_active )
>> +                __put_gfn(p2m, gfn);
>>              p2m_change_type_one(currd, gfn, p2m_ram_logdirty,
>> p2m_ram_rw);
>> +            __put_gfn(ap2m_active ? hostp2m : p2m, gfn);
>> +
>> +            goto out;
>>          }
>> -        rc = 1;
>>          goto out_put_gfn;
>>      }
>>
>> @@ -2977,7 +3021,9 @@ int hvm_hap_nested_page_fault(paddr_t gpa,
>unsigned long gla,
>>      rc = fall_through;
>>
>>  out_put_gfn:
>> -    put_gfn(currd, gfn);
>> +    __put_gfn(p2m, gfn);
>> +    if ( ap2m_active )
>> +        __put_gfn(hostp2m, gfn);
>>  out:
>>      /* All of these are delayed until we exit, since we might
>>       * sleep on event ring wait queues, and we must not hold diff
>> --git a/xen/arch/x86/mm/hap/Makefile b/xen/arch/x86/mm/hap/Makefile
>> index 68f2bb5..216cd90 100644
>> --- a/xen/arch/x86/mm/hap/Makefile
>> +++ b/xen/arch/x86/mm/hap/Makefile
>> @@ -4,6 +4,7 @@ obj-y += guest_walk_3level.o
>>  obj-$(x86_64) += guest_walk_4level.o
>>  obj-y += nested_hap.o
>>  obj-y += nested_ept.o
>> +obj-y += altp2m_hap.o
>>
>>  guest_walk_%level.o: guest_walk.c Makefile
>>         $(CC) $(CFLAGS) -DGUEST_PAGING_LEVELS=$* -c $< -o $@ diff
>> --git a/xen/arch/x86/mm/hap/altp2m_hap.c
>> b/xen/arch/x86/mm/hap/altp2m_hap.c
>> new file mode 100644
>> index 0000000..52c7877
>> --- /dev/null
>> +++ b/xen/arch/x86/mm/hap/altp2m_hap.c
>> @@ -0,0 +1,98 @@
>>
>+/*********************************************************
>***********
>> +**********
>> + * arch/x86/mm/hap/altp2m_hap.c
>> + *
>> + * Copyright (c) 2014 Intel Corporation.
>> + *
>> + * This program is free software; you can redistribute it and/or
>> +modify
>> + * it under the terms of the GNU General Public License as published
>> +by
>> + * the Free Software Foundation; either version 2 of the License, or
>> + * (at your option) any later version.
>> + *
>> + * This program is distributed in the hope that it will be useful,
>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
>> + * GNU General Public License for more details.
>> + *
>> + * You should have received a copy of the GNU General Public License
>> + * along with this program; if not, write to the Free Software
>> + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
>> +02111-1307  USA  */
>> +
>> +#include <asm/domain.h>
>> +#include <asm/page.h>
>> +#include <asm/paging.h>
>> +#include <asm/p2m.h>
>> +#include <asm/hap.h>
>> +#include <asm/hvm/altp2m.h>
>> +
>> +#include "private.h"
>> +
>> +/*
>> + * If the fault is for a not present entry:
>> + *     if the entry in the host p2m has a valid mfn, copy it and retry
>> + *     else indicate that outer handler should handle fault
>> + *
>> + * If the fault is for a present entry:
>> + *     indicate that outer handler should handle fault
>> + */
>> +
>> +int
>> +altp2m_hap_nested_page_fault(struct vcpu *v, paddr_t gpa,
>> +                                unsigned long gla, struct npfec npfec,
>> +                                struct p2m_domain **ap2m) {
>> +    struct p2m_domain *hp2m = p2m_get_hostp2m(v->domain);
>> +    p2m_type_t p2mt;
>> +    p2m_access_t p2ma;
>> +    unsigned int page_order;
>> +    gfn_t gfn = _gfn(paddr_to_pfn(gpa));
>> +    unsigned long mask;
>> +    mfn_t mfn;
>> +    int rv;
>> +
>> +    *ap2m = p2m_get_altp2m(v);
>> +
>> +    mfn = get_gfn_type_access(*ap2m, gfn_x(gfn), &p2mt, &p2ma,
>> +                              0, &page_order);
>> +    __put_gfn(*ap2m, gfn_x(gfn));
>> +
>> +    if ( mfn_x(mfn) != INVALID_MFN )
>> +        return 0;
>> +
>> +    mfn = get_gfn_type_access(hp2m, gfn_x(gfn), &p2mt, &p2ma,
>> +                              P2M_ALLOC | P2M_UNSHARE, &page_order);
>> +    put_gfn(hp2m->domain, gfn_x(gfn));
>
>So the reason I said my comments weren't blockers for v3 was so that it could
>be checked in before the code freeze last Friday if that turned out to be
>possible.
>
>Please do at least give this function a name that reflects what it does (i.e., 
>try
>to propagate changes from the host p2m to the altp2m), and change this
>put_gfn() to match the __put_gfn() above.
>
>I'd prefer it if you moved this into mm/p2m.c as well.
>
> -George

Will do

Ravi

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.