[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] Re: [PATCH 06/18] Nested Virtualization: p2m phystable



At 13:29 +0100 on 15 Apr (1271338157), Christoph Egger wrote:
> 
> Signed-off-by: Christoph Egger <Christoph.Egger@xxxxxxx>

Acked-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>


Content-Description: xen_nh06_p2m_phystable.diff
> # HG changeset patch
> # User cegger
> # Date 1271330296 -7200
> Move phys_table from struct domain to struct p2m_domain.
> This prepares p2m code to deal with multiple p2m tables per-domain.
> Multiple p2m tables are needed to use hap with nested virtualization.
> 
> diff -r 96efed7dedaf -r 96cbc9586732 xen/arch/x86/hvm/svm/svm.c
> --- a/xen/arch/x86/hvm/svm/svm.c
> +++ b/xen/arch/x86/hvm/svm/svm.c
> @@ -212,6 +212,7 @@ static int svm_vmcb_restore(struct vcpu 
>      unsigned long mfn = 0;
>      p2m_type_t p2mt;
>      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
> +    struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
>  
>      if ( c->pending_valid &&
>           ((c->pending_type == 1) || (c->pending_type > 6) ||
> @@ -257,7 +258,7 @@ static int svm_vmcb_restore(struct vcpu 
>      {
>          vmcb->np_enable = 1;
>          vmcb->g_pat = MSR_IA32_CR_PAT_RESET; /* guest PAT */
> -        vmcb->h_cr3 = pagetable_get_paddr(v->domain->arch.phys_table);
> +        vmcb->h_cr3 = pagetable_get_paddr(p2m_get_pagetable(p2m));
>      }
>  
>      if ( c->pending_valid ) 
> diff -r 96efed7dedaf -r 96cbc9586732 xen/arch/x86/hvm/svm/vmcb.c
> --- a/xen/arch/x86/hvm/svm/vmcb.c
> +++ b/xen/arch/x86/hvm/svm/vmcb.c
> @@ -26,7 +26,7 @@
>  #include <asm/cpufeature.h>
>  #include <asm/processor.h>
>  #include <asm/msr.h>
> -#include <asm/paging.h>
> +#include <asm/p2m.h>
>  #include <asm/hvm/hvm.h>
>  #include <asm/hvm/io.h>
>  #include <asm/hvm/support.h>
> @@ -232,7 +232,7 @@ static int construct_vmcb(struct vcpu *v
>      {
>          vmcb->np_enable = 1; /* enable nested paging */
>          vmcb->g_pat = MSR_IA32_CR_PAT_RESET; /* guest PAT */
> -        vmcb->h_cr3 = pagetable_get_paddr(v->domain->arch.phys_table);
> +        vmcb->h_cr3 = 
> pagetable_get_paddr(p2m_get_pagetable(p2m_get_hostp2m(v->domain)));
>  
>          /* No point in intercepting CR3 reads/writes. */
>          vmcb->cr_intercepts &= 
> ~(CR_INTERCEPT_CR3_READ|CR_INTERCEPT_CR3_WRITE);
> diff -r 96efed7dedaf -r 96cbc9586732 xen/arch/x86/hvm/vmx/vmx.c
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -81,7 +81,7 @@ static int vmx_domain_initialise(struct 
>      d->arch.hvm_domain.vmx.ept_control.etmt = EPT_DEFAULT_MT;
>      d->arch.hvm_domain.vmx.ept_control.gaw  = EPT_DEFAULT_GAW;
>      d->arch.hvm_domain.vmx.ept_control.asr  =
> -        pagetable_get_pfn(d->arch.phys_table);
> +        pagetable_get_pfn(p2m_get_pagetable(p2m_get_hostp2m(d)));
>  
>  
>      if ( (rc = vmx_alloc_vlapic_mapping(d)) != 0 )
> diff -r 96efed7dedaf -r 96cbc9586732 xen/arch/x86/mm/hap/hap.c
> --- a/xen/arch/x86/mm/hap/hap.c
> +++ b/xen/arch/x86/mm/hap/hap.c
> @@ -410,7 +410,7 @@ static void hap_install_xen_entries_in_l
>  
>      /* Install the domain-specific P2M table */
>      l4e[l4_table_offset(RO_MPT_VIRT_START)] =
> -        l4e_from_pfn(mfn_x(pagetable_get_mfn(d->arch.phys_table)),
> +        
> l4e_from_pfn(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)))),
>                       __PAGE_HYPERVISOR);
>  
>      hap_unmap_domain_page(l4e);
> @@ -421,6 +421,7 @@ static void hap_install_xen_entries_in_l
>  static void hap_install_xen_entries_in_l2h(struct vcpu *v, mfn_t l2hmfn)
>  {
>      struct domain *d = v->domain;
> +    struct p2m_domain *hostp2m = p2m_get_hostp2m(d);
>      l2_pgentry_t *l2e;
>      l3_pgentry_t *p2m;
>      int i;
> @@ -446,8 +447,8 @@ static void hap_install_xen_entries_in_l
>              l2e_empty();
>  
>      /* Install the domain-specific p2m table */
> -    ASSERT(pagetable_get_pfn(d->arch.phys_table) != 0);
> -    p2m = hap_map_domain_page(pagetable_get_mfn(d->arch.phys_table));
> +    ASSERT(pagetable_get_pfn(p2m_get_pagetable(hostp2m)) != 0);
> +    p2m = hap_map_domain_page(pagetable_get_mfn(p2m_get_pagetable(hostp2m)));
>      for ( i = 0; i < MACHPHYS_MBYTES>>1; i++ )
>      {
>          l2e[l2_table_offset(RO_MPT_VIRT_START) + i] =
> diff -r 96efed7dedaf -r 96cbc9586732 xen/arch/x86/mm/hap/p2m-ept.c
> --- a/xen/arch/x86/mm/hap/p2m-ept.c
> +++ b/xen/arch/x86/mm/hap/p2m-ept.c
> @@ -240,12 +240,13 @@ ept_set_entry(struct domain *d, unsigned
>      int direct_mmio = (p2mt == p2m_mmio_direct);
>      uint8_t ipat = 0;
>      int need_modify_vtd_table = 1;
> +    struct p2m_domain *p2m = p2m_get_hostp2m(d);
>  
>      if (  order != 0 )
>          if ( (gfn & ((1UL << order) - 1)) )
>              return 1;
>  
> -    table = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
> +    table = 
> map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
>  
>      ASSERT(table != NULL);
>  
> @@ -368,7 +369,7 @@ static mfn_t ept_get_entry(struct domain
>                             p2m_query_t q)
>  {
>      ept_entry_t *table =
> -        map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
> +        
> map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)))));
>      unsigned long gfn_remainder = gfn;
>      ept_entry_t *ept_entry;
>      u32 index;
> @@ -462,7 +463,7 @@ out:
>  static ept_entry_t ept_get_entry_content(struct domain *d, unsigned long 
> gfn, int *level)
>  {
>      ept_entry_t *table =
> -        map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
> +        
> map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)))));
>      unsigned long gfn_remainder = gfn;
>      ept_entry_t *ept_entry;
>      ept_entry_t content = { .epte = 0 };
> @@ -497,7 +498,7 @@ static ept_entry_t ept_get_entry_content
>  void ept_walk_table(struct domain *d, unsigned long gfn)
>  {
>      ept_entry_t *table =
> -        map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
> +        
> map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)))));
>      unsigned long gfn_remainder = gfn;
>  
>      int i;
> @@ -631,12 +632,12 @@ static void ept_change_entry_type_global
>      int i2;
>      int i1;
>  
> -    if ( pagetable_get_pfn(d->arch.phys_table) == 0 )
> +    if ( pagetable_get_pfn(p2m_get_pagetable(p2m_get_hostp2m(d))) == 0 )
>          return;
>  
>      BUG_ON(EPT_DEFAULT_GAW != 3);
>  
> -    l4e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
> +    l4e = 
> map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)))));
>      for (i4 = 0; i4 < EPT_PAGETABLE_ENTRIES; i4++ )
>      {
>          if ( !l4e[i4].epte )
> diff -r 96efed7dedaf -r 96cbc9586732 xen/arch/x86/mm/p2m.c
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -1253,7 +1253,7 @@ p2m_set_entry(struct domain *d, unsigned
>                unsigned int page_order, p2m_type_t p2mt)
>  {
>      // XXX -- this might be able to be faster iff current->domain == d
> -    mfn_t table_mfn = pagetable_get_mfn(d->arch.phys_table);
> +    mfn_t table_mfn = 
> pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)));
>      void *table =map_domain_page(mfn_x(table_mfn));
>      unsigned long i, gfn_remainder = gfn;
>      l1_pgentry_t *p2m_entry;
> @@ -1408,7 +1408,7 @@ p2m_gfn_to_mfn(struct domain *d, unsigne
>       * XXX we will return p2m_invalid for unmapped gfns */
>      *t = p2m_mmio_dm;
>  
> -    mfn = pagetable_get_mfn(d->arch.phys_table);
> +    mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)));
>  
>      if ( gfn > d->arch.p2m->max_mapped_pfn )
>          /* This pfn is higher than the highest the p2m map currently holds */
> @@ -1622,11 +1622,11 @@ int p2m_alloc_table(struct domain *d,
>      struct page_info *page, *p2m_top;
>      unsigned int page_count = 0;
>      unsigned long gfn = -1UL;
> -    struct p2m_domain *p2m = d->arch.p2m;
> +    struct p2m_domain *p2m = p2m_get_hostp2m(d);
>  
>      p2m_lock(p2m);
>  
> -    if ( pagetable_get_pfn(d->arch.phys_table) != 0 )
> +    if ( pagetable_get_pfn(p2m_get_pagetable(p2m)) != 0 )
>      {
>          P2M_ERROR("p2m already allocated for this domain\n");
>          p2m_unlock(p2m);
> @@ -1655,7 +1655,7 @@ int p2m_alloc_table(struct domain *d,
>  #endif
>          | 1 | PGT_validated;
>  
> -    d->arch.phys_table = pagetable_from_mfn(page_to_mfn(p2m_top));
> +    p2m->phys_table = pagetable_from_mfn(page_to_mfn(p2m_top));
>  
>      P2M_PRINTK("populating p2m table\n");
>  
> @@ -1699,7 +1699,7 @@ void p2m_teardown(struct domain *d)
>   * We know we don't have any extra mappings to these pages */
>  {
>      struct page_info *pg;
> -    struct p2m_domain *p2m = d->arch.p2m;
> +    struct p2m_domain *p2m = p2m_get_hostp2m(d);
>      unsigned long gfn;
>      p2m_type_t t;
>      mfn_t mfn;
> @@ -1711,7 +1711,7 @@ void p2m_teardown(struct domain *d)
>          if(mfn_valid(mfn) && (t == p2m_ram_shared))
>              BUG_ON(mem_sharing_unshare_page(d, gfn, 
> MEM_SHARING_DESTROY_GFN));
>      }
> -    d->arch.phys_table = pagetable_null();
> +    p2m->phys_table = pagetable_null();
>  
>      while ( (pg = page_list_remove_head(&p2m->pages)) )
>          p2m->free_page(d, pg);
> @@ -1822,7 +1822,7 @@ static void audit_p2m(struct domain *d)
>      spin_unlock(&d->page_alloc_lock);
>  
>      /* Audit part two: walk the domain's p2m table, checking the entries. */
> -    if ( pagetable_get_pfn(d->arch.phys_table) != 0 )
> +    if ( pagetable_get_pfn(p2m_get_pagetable(p2m_get_hostp2m(d)) != 0 )
>      {
>          l2_pgentry_t *l2e;
>          l1_pgentry_t *l1e;
> @@ -1832,11 +1832,11 @@ static void audit_p2m(struct domain *d)
>          l4_pgentry_t *l4e;
>          l3_pgentry_t *l3e;
>          int i3, i4;
> -        l4e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
> +        l4e = 
> map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)))));
>  #else /* CONFIG_PAGING_LEVELS == 3 */
>          l3_pgentry_t *l3e;
>          int i3;
> -        l3e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
> +        l3e = 
> map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)))));
>  #endif
>  
>          gfn = 0;
> @@ -2248,22 +2248,23 @@ void p2m_change_type_global(struct domai
>      l4_pgentry_t *l4e;
>      unsigned long i4;
>  #endif /* CONFIG_PAGING_LEVELS == 4 */
> +    struct p2m_domain *p2m = p2m_get_hostp2m(d);
>  
>      BUG_ON(p2m_is_grant(ot) || p2m_is_grant(nt));
>  
>      if ( !paging_mode_translate(d) )
>          return;
>  
> -    if ( pagetable_get_pfn(d->arch.phys_table) == 0 )
> +    if ( pagetable_get_pfn(p2m_get_pagetable(p2m)) == 0 )
>          return;
>  
>      ASSERT(p2m_locked_by_me(d->arch.p2m));
>  
>  #if CONFIG_PAGING_LEVELS == 4
> -    l4e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
> +    l4e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
>  #else /* CONFIG_PAGING_LEVELS == 3 */
> -    l3mfn = _mfn(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
> -    l3e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
> +    l3mfn = _mfn(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
> +    l3e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
>  #endif
>  
>  #if CONFIG_PAGING_LEVELS >= 4
> diff -r 96efed7dedaf -r 96cbc9586732 xen/arch/x86/mm/shadow/common.c
> --- a/xen/arch/x86/mm/shadow/common.c
> +++ b/xen/arch/x86/mm/shadow/common.c
> @@ -3177,7 +3177,7 @@ int shadow_enable(struct domain *d, u32 
>   out_locked:
>      shadow_unlock(d);
>   out_unlocked:
> -    if ( rv != 0 && !pagetable_is_null(d->arch.phys_table) )
> +    if ( rv != 0 && 
> !pagetable_is_null(p2m_get_pagetable(p2m_get_hostp2m(d))) )
>          p2m_teardown(d);
>      if ( rv != 0 && pg != NULL )
>          shadow_free_p2m_page(d, pg);
> diff -r 96efed7dedaf -r 96cbc9586732 xen/arch/x86/mm/shadow/multi.c
> --- a/xen/arch/x86/mm/shadow/multi.c
> +++ b/xen/arch/x86/mm/shadow/multi.c
> @@ -1473,7 +1473,7 @@ void sh_install_xen_entries_in_l4(struct
>      {
>          /* install domain-specific P2M table */
>          sl4e[shadow_l4_table_offset(RO_MPT_VIRT_START)] =
> -            shadow_l4e_from_mfn(pagetable_get_mfn(d->arch.phys_table),
> +            
> shadow_l4e_from_mfn(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d))),
>                                  __PAGE_HYPERVISOR);
>      }
>  
> @@ -1532,8 +1532,8 @@ static void sh_install_xen_entries_in_l2
>      {
>          /* Install the domain-specific p2m table */
>          l3_pgentry_t *p2m;
> -        ASSERT(pagetable_get_pfn(d->arch.phys_table) != 0);
> -        p2m = sh_map_domain_page(pagetable_get_mfn(d->arch.phys_table));
> +        ASSERT(pagetable_get_pfn(p2m_get_pagetable(p2m_get_hostp2m(d))) != 
> 0);
> +        p2m = 
> sh_map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d))));
>          for ( i = 0; i < MACHPHYS_MBYTES>>1; i++ )
>          {
>              sl2e[shadow_l2_table_offset(RO_MPT_VIRT_START) + i] =
> diff -r 96efed7dedaf -r 96cbc9586732 xen/include/asm-x86/domain.h
> --- a/xen/include/asm-x86/domain.h
> +++ b/xen/include/asm-x86/domain.h
> @@ -272,9 +272,6 @@ struct arch_domain
>      struct paging_domain paging;
>      struct p2m_domain *p2m;
>  
> -    /* Shadow translated domain: P2M mapping */
> -    pagetable_t phys_table;
> -
>      /* NB. protected by d->event_lock and by irq_desc[irq].lock */
>      int *irq_pirq;
>      int *pirq_irq;
> diff -r 96efed7dedaf -r 96cbc9586732 xen/include/asm-x86/p2m.h
> --- a/xen/include/asm-x86/p2m.h
> +++ b/xen/include/asm-x86/p2m.h
> @@ -29,6 +29,7 @@
>  #include <xen/config.h>
>  #include <xen/paging.h>
>  #include <asm/mem_sharing.h>
> +#include <asm/page.h>    /* for pagetable_t */
>  
>  /*
>   * The phys_to_machine_mapping maps guest physical frame numbers 
> @@ -166,6 +167,9 @@ struct p2m_domain {
>      int                locker;   /* processor which holds the lock */
>      const char        *locker_function; /* Func that took it */
>  
> +    /* Shadow translated domain: p2m mapping */
> +    pagetable_t        phys_table;
> +
>      /* Pages used to construct the p2m */
>      struct page_list_head pages;
>  
> @@ -212,6 +216,11 @@ struct p2m_domain {
>      } pod;
>  };
>  
> +/* get host p2m table */
> +#define p2m_get_hostp2m(d)      ((d)->arch.p2m)
> +
> +#define p2m_get_pagetable(p2m)  ((p2m)->phys_table)
> +
>  /*
>   * The P2M lock.  This protects all updates to the p2m table.
>   * Updates are expected to be safe against concurrent reads,


-- 
Tim Deegan <Tim.Deegan@xxxxxxxxxx>
Principal Software Engineer, XenServer Engineering
Citrix Systems UK Ltd.  (Company #02937203, SL9 0BG)

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.