[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] Re: [PATCH 12/13] Nested Virtualization: vram



Hi, 

Is this needed as part of the nested-HVM series or just an independent
interface change?

Tim.

Content-Description: xen_nh12_vram.diff
> # HG changeset patch
> # User cegger
> # Date 1283345891 -7200
> Move dirty_vram from struct hvm_domain to struct p2m_domain
> 
> diff -r c1a95c7ef858 -r 50b3e6c73d7c xen/arch/x86/mm/hap/hap.c
> --- a/xen/arch/x86/mm/hap/hap.c
> +++ b/xen/arch/x86/mm/hap/hap.c
> @@ -58,7 +58,8 @@
>  static int hap_enable_vram_tracking(struct domain *d)
>  {
>      int i;
> -    struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram;
> +    struct p2m_domain *p2m = p2m_get_hostp2m(d);
> +    struct sh_dirty_vram *dirty_vram = p2m->dirty_vram;
>  
>      if ( !dirty_vram )
>          return -EINVAL;
> @@ -70,7 +71,7 @@ static int hap_enable_vram_tracking(stru
>  
>      /* set l1e entries of P2M table to be read-only. */
>      for (i = dirty_vram->begin_pfn; i < dirty_vram->end_pfn; i++)
> -        p2m_change_type(p2m_get_hostp2m(d), i, p2m_ram_rw, p2m_ram_logdirty);
> +        p2m_change_type(p2m, i, p2m_ram_rw, p2m_ram_logdirty);
>  
>      flush_tlb_mask(&d->domain_dirty_cpumask);
>      return 0;
> @@ -79,7 +80,8 @@ static int hap_enable_vram_tracking(stru
>  static int hap_disable_vram_tracking(struct domain *d)
>  {
>      int i;
> -    struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram;
> +    struct p2m_domain *p2m = p2m_get_hostp2m(d);
> +    struct sh_dirty_vram *dirty_vram = p2m->dirty_vram;
>  
>      if ( !dirty_vram )
>          return -EINVAL;
> @@ -90,7 +92,7 @@ static int hap_disable_vram_tracking(str
>  
>      /* set l1e entries of P2M table with normal mode */
>      for (i = dirty_vram->begin_pfn; i < dirty_vram->end_pfn; i++)
> -        p2m_change_type(p2m_get_hostp2m(d), i, p2m_ram_logdirty, p2m_ram_rw);
> +        p2m_change_type(p2m, i, p2m_ram_logdirty, p2m_ram_rw);
>  
>      flush_tlb_mask(&d->domain_dirty_cpumask);
>      return 0;
> @@ -99,14 +101,15 @@ static int hap_disable_vram_tracking(str
>  static void hap_clean_vram_tracking(struct domain *d)
>  {
>      int i;
> -    struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram;
> +    struct p2m_domain *p2m = p2m_get_hostp2m(d);
> +    struct sh_dirty_vram *dirty_vram = p2m->dirty_vram;
>  
>      if ( !dirty_vram )
>          return;
>  
>      /* set l1e entries of P2M table to be read-only. */
>      for (i = dirty_vram->begin_pfn; i < dirty_vram->end_pfn; i++)
> -        p2m_change_type(p2m_get_hostp2m(d), i, p2m_ram_rw, p2m_ram_logdirty);
> +        p2m_change_type(p2m, i, p2m_ram_rw, p2m_ram_logdirty);
>  
>      flush_tlb_mask(&d->domain_dirty_cpumask);
>  }
> @@ -124,7 +127,8 @@ int hap_track_dirty_vram(struct domain *
>                           XEN_GUEST_HANDLE_64(uint8) dirty_bitmap)
>  {
>      long rc = 0;
> -    struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram;
> +    struct p2m_domain *p2m = p2m_get_hostp2m(d);
> +    struct sh_dirty_vram *dirty_vram = p2m->dirty_vram;
>  
>      if ( nr )
>      {
> @@ -149,7 +153,7 @@ int hap_track_dirty_vram(struct domain *
>  
>              dirty_vram->begin_pfn = begin_pfn;
>              dirty_vram->end_pfn = begin_pfn + nr;
> -            d->arch.hvm_domain.dirty_vram = dirty_vram;
> +            p2m->dirty_vram = dirty_vram;
>              hap_vram_tracking_init(d);
>              rc = paging_log_dirty_enable(d);
>              if (rc != 0)
> @@ -171,7 +175,7 @@ int hap_track_dirty_vram(struct domain *
>          if ( paging_mode_log_dirty(d) && dirty_vram ) {
>              rc = paging_log_dirty_disable(d);
>              xfree(dirty_vram);
> -            dirty_vram = d->arch.hvm_domain.dirty_vram = NULL;
> +            dirty_vram = p2m->dirty_vram = NULL;
>          } else
>              rc = 0;
>      }
> @@ -182,7 +186,7 @@ param_fail:
>      if ( dirty_vram )
>      {
>          xfree(dirty_vram);
> -        dirty_vram = d->arch.hvm_domain.dirty_vram = NULL;
> +        dirty_vram = p2m->dirty_vram = NULL;
>      }
>      return rc;
>  }
> @@ -228,12 +232,13 @@ static void hap_clean_dirty_bitmap(struc
>  
>  void hap_logdirty_init(struct domain *d)
>  {
> -    struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram;
> +    struct p2m_domain *p2m = p2m_get_hostp2m(d);
> +    struct sh_dirty_vram *dirty_vram = p2m->dirty_vram;
>      if ( paging_mode_log_dirty(d) && dirty_vram )
>      {
>          paging_log_dirty_disable(d);
>          xfree(dirty_vram);
> -        dirty_vram = d->arch.hvm_domain.dirty_vram = NULL;
> +        dirty_vram = p2m->dirty_vram = NULL;
>      }
>  
>      /* Reinitialize logdirty mechanism */
> diff -r c1a95c7ef858 -r 50b3e6c73d7c xen/arch/x86/mm/shadow/common.c
> --- a/xen/arch/x86/mm/shadow/common.c
> +++ b/xen/arch/x86/mm/shadow/common.c
> @@ -3211,11 +3211,11 @@ void shadow_teardown(struct domain *d)
>       * calls now that we've torn down the bitmap */
>      d->arch.paging.mode &= ~PG_log_dirty;
>  
> -    if (d->arch.hvm_domain.dirty_vram) {
> -        xfree(d->arch.hvm_domain.dirty_vram->sl1ma);
> -        xfree(d->arch.hvm_domain.dirty_vram->dirty_bitmap);
> -        xfree(d->arch.hvm_domain.dirty_vram);
> -        d->arch.hvm_domain.dirty_vram = NULL;
> +    if (p2m->dirty_vram) {
> +        xfree(p2m->dirty_vram->sl1ma);
> +        xfree(p2m->dirty_vram->dirty_bitmap);
> +        xfree(p2m->dirty_vram);
> +        p2m->dirty_vram = NULL;
>      }
>  
>      shadow_unlock(d);
> @@ -3559,8 +3559,8 @@ int shadow_track_dirty_vram(struct domai
>      int flush_tlb = 0;
>      unsigned long i;
>      p2m_type_t t;
> -    struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram;
>      struct p2m_domain *p2m = p2m_get_hostp2m(d);
> +    struct sh_dirty_vram *dirty_vram = p2m->dirty_vram;
>  
>      if (end_pfn < begin_pfn
>              || begin_pfn > p2m->max_mapped_pfn
> @@ -3574,11 +3574,12 @@ int shadow_track_dirty_vram(struct domai
>              || end_pfn   != dirty_vram->end_pfn )) )
>      {
>          /* Different tracking, tear the previous down. */
> -        gdprintk(XENLOG_INFO, "stopping tracking VRAM %lx - %lx\n", 
> dirty_vram->begin_pfn, dirty_vram->end_pfn);
> +        gdprintk(XENLOG_INFO, "stopping tracking VRAM %lx - %lx\n",
> +            dirty_vram->begin_pfn, dirty_vram->end_pfn);
>          xfree(dirty_vram->sl1ma);
>          xfree(dirty_vram->dirty_bitmap);
>          xfree(dirty_vram);
> -        dirty_vram = d->arch.hvm_domain.dirty_vram = NULL;
> +        dirty_vram = p2m->dirty_vram = NULL;
>      }
>  
>      if ( !nr )
> @@ -3602,7 +3603,7 @@ int shadow_track_dirty_vram(struct domai
>              goto out;
>          dirty_vram->begin_pfn = begin_pfn;
>          dirty_vram->end_pfn = end_pfn;
> -        d->arch.hvm_domain.dirty_vram = dirty_vram;
> +        p2m->dirty_vram = dirty_vram;
>  
>          if ( (dirty_vram->sl1ma = xmalloc_array(paddr_t, nr)) == NULL )
>              goto out_dirty_vram;
> @@ -3735,7 +3736,7 @@ out_sl1ma:
>      xfree(dirty_vram->sl1ma);
>  out_dirty_vram:
>      xfree(dirty_vram);
> -    dirty_vram = d->arch.hvm_domain.dirty_vram = NULL;
> +    dirty_vram = p2m->dirty_vram = NULL;
>  
>  out:
>      shadow_unlock(d);
> diff -r c1a95c7ef858 -r 50b3e6c73d7c xen/arch/x86/mm/shadow/multi.c
> --- a/xen/arch/x86/mm/shadow/multi.c
> +++ b/xen/arch/x86/mm/shadow/multi.c
> @@ -515,7 +515,7 @@ _sh_propagate(struct vcpu *v,
>      guest_l1e_t guest_entry = { guest_intpte };
>      shadow_l1e_t *sp = shadow_entry_ptr;
>      struct domain *d = v->domain;
> -    struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram;
> +    struct sh_dirty_vram *dirty_vram = p2m_get_hostp2m(d)->dirty_vram;
>      gfn_t target_gfn = guest_l1e_get_gfn(guest_entry);
>      u32 pass_thru_flags;
>      u32 gflags, sflags;
> @@ -1105,7 +1105,7 @@ static inline void shadow_vram_get_l1e(s
>      mfn_t mfn = shadow_l1e_get_mfn(new_sl1e);
>      int flags = shadow_l1e_get_flags(new_sl1e);
>      unsigned long gfn;
> -    struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram;
> +    struct sh_dirty_vram *dirty_vram = p2m_get_hostp2m(d)->dirty_vram;
>  
>      if ( !dirty_vram         /* tracking disabled? */
>           || !(flags & _PAGE_RW) /* read-only mapping? */
> @@ -1136,7 +1136,7 @@ static inline void shadow_vram_put_l1e(s
>      mfn_t mfn = shadow_l1e_get_mfn(old_sl1e);
>      int flags = shadow_l1e_get_flags(old_sl1e);
>      unsigned long gfn;
> -    struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram;
> +    struct sh_dirty_vram *dirty_vram = p2m_get_hostp2m(d)->dirty_vram;
>  
>      if ( !dirty_vram         /* tracking disabled? */
>           || !(flags & _PAGE_RW) /* read-only mapping? */
> diff -r c1a95c7ef858 -r 50b3e6c73d7c xen/include/asm-x86/hvm/domain.h
> --- a/xen/include/asm-x86/hvm/domain.h
> +++ b/xen/include/asm-x86/hvm/domain.h
> @@ -69,9 +69,6 @@ struct hvm_domain {
>      /* Memory ranges with pinned cache attributes. */
>      struct list_head       pinned_cacheattr_ranges;
>  
> -    /* VRAM dirty support. */
> -    struct sh_dirty_vram *dirty_vram;
> -
>      /* If one of vcpus of this domain is in no_fill_mode or
>       * mtrr/pat between vcpus is not the same, set is_in_uc_mode
>       */
> diff -r c1a95c7ef858 -r 50b3e6c73d7c xen/include/asm-x86/p2m.h
> --- a/xen/include/asm-x86/p2m.h
> +++ b/xen/include/asm-x86/p2m.h
> @@ -172,6 +172,9 @@ struct p2m_domain {
>      /* Shadow translated domain: p2m mapping */
>      pagetable_t        phys_table;
>  
> +    /* VRAM dirty support. */
> +    struct sh_dirty_vram *dirty_vram;
> +
>      struct domain     *domain;   /* back pointer to domain */
>  
>      /* Pages used to construct the p2m */


-- 
Tim Deegan <Tim.Deegan@xxxxxxxxxx>
Principal Software Engineer, XenServer Engineering
Citrix Systems UK Ltd.  (Company #02937203, SL9 0BG)

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.