[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH] use clear_domain_page() instead of open coding it



On 19/10/15 15:51, Jan Beulich wrote:
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

mm bits:

Acked-by: George Dunlap <george.dunlap@xxxxxxxxxx>

> 
> --- a/xen/arch/x86/hvm/stdvga.c
> +++ b/xen/arch/x86/hvm/stdvga.c
> @@ -552,8 +552,7 @@ void stdvga_init(struct domain *d)
>  {
>      struct hvm_hw_stdvga *s = &d->arch.hvm_domain.stdvga;
>      struct page_info *pg;
> -    void *p;
> -    int i;
> +    unsigned int i;
>  
>      memset(s, 0, sizeof(*s));
>      spin_lock_init(&s->lock);
> @@ -564,9 +563,7 @@ void stdvga_init(struct domain *d)
>          if ( pg == NULL )
>              break;
>          s->vram_page[i] = pg;
> -        p = __map_domain_page(pg);
> -        clear_page(p);
> -        unmap_domain_page(p);
> +        clear_domain_page(_mfn(page_to_mfn(pg)));
>      }
>  
>      if ( i == ARRAY_SIZE(s->vram_page) )
> --- a/xen/arch/x86/hvm/vmx/vvmx.c
> +++ b/xen/arch/x86/hvm/vmx/vvmx.c
> @@ -68,7 +68,7 @@ int nvmx_vcpu_initialise(struct vcpu *v)
>      if ( cpu_has_vmx_vmcs_shadowing )
>      {
>          struct page_info *vmread_bitmap, *vmwrite_bitmap;
> -        unsigned long *vr, *vw;
> +        unsigned long *vw;
>  
>          vmread_bitmap = alloc_domheap_page(NULL, 0);
>          if ( !vmread_bitmap )
> @@ -78,6 +78,8 @@ int nvmx_vcpu_initialise(struct vcpu *v)
>          }
>          v->arch.hvm_vmx.vmread_bitmap = vmread_bitmap;
>  
> +        clear_domain_page(_mfn(page_to_mfn(vmread_bitmap)));
> +
>          vmwrite_bitmap = alloc_domheap_page(NULL, 0);
>          if ( !vmwrite_bitmap )
>          {
> @@ -86,10 +88,7 @@ int nvmx_vcpu_initialise(struct vcpu *v)
>          }
>          v->arch.hvm_vmx.vmwrite_bitmap = vmwrite_bitmap;
>  
> -        vr = __map_domain_page(vmread_bitmap);
>          vw = __map_domain_page(vmwrite_bitmap);
> -
> -        clear_page(vr);
>          clear_page(vw);
>  
>          /*
> @@ -101,7 +100,6 @@ int nvmx_vcpu_initialise(struct vcpu *v)
>          set_bit(IO_BITMAP_B, vw);
>          set_bit(VMCS_HIGH(IO_BITMAP_B), vw);
>  
> -        unmap_domain_page(vr);
>          unmap_domain_page(vw);
>      }
>  
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -1907,7 +1907,7 @@ p2m_flush_table(struct p2m_domain *p2m)
>  {
>      struct page_info *top, *pg;
>      struct domain *d = p2m->domain;
> -    void *p;
> +    mfn_t mfn;
>  
>      p2m_lock(p2m);
>  
> @@ -1928,15 +1928,14 @@ p2m_flush_table(struct p2m_domain *p2m)
>      p2m->np2m_base = P2M_BASE_EADDR;
>      
>      /* Zap the top level of the trie */
> -    top = mfn_to_page(pagetable_get_mfn(p2m_get_pagetable(p2m)));
> -    p = __map_domain_page(top);
> -    clear_page(p);
> -    unmap_domain_page(p);
> +    mfn = pagetable_get_mfn(p2m_get_pagetable(p2m));
> +    clear_domain_page(mfn);
>  
>      /* Make sure nobody else is using this p2m table */
>      nestedhvm_vmcx_flushtlb(p2m);
>  
>      /* Free the rest of the trie pages back to the paging pool */
> +    top = mfn_to_page(mfn);
>      while ( (pg = page_list_remove_head(&p2m->pages)) )
>          if ( pg != top ) 
>              d->arch.paging.free_page(d, pg);
> --- a/xen/arch/x86/mm/paging.c
> +++ b/xen/arch/x86/mm/paging.c
> @@ -78,12 +78,10 @@ static mfn_t paging_new_log_dirty_page(s
>  static mfn_t paging_new_log_dirty_leaf(struct domain *d)
>  {
>      mfn_t mfn = paging_new_log_dirty_page(d);
> +
>      if ( mfn_valid(mfn) )
> -    {
> -        void *leaf = map_domain_page(mfn);
> -        clear_page(leaf);
> -        unmap_domain_page(leaf);
> -    }
> +        clear_domain_page(mfn);
> +
>      return mfn;
>  }
>  
> --- a/xen/arch/x86/mm/shadow/common.c
> +++ b/xen/arch/x86/mm/shadow/common.c
> @@ -1437,8 +1437,7 @@ mfn_t shadow_alloc(struct domain *d,
>      unsigned int pages = shadow_size(shadow_type);
>      struct page_list_head tmp_list;
>      cpumask_t mask;
> -    void *p;
> -    int i;
> +    unsigned int i;
>  
>      ASSERT(paging_locked_by_me(d));
>      ASSERT(shadow_type != SH_type_none);
> @@ -1484,10 +1483,7 @@ mfn_t shadow_alloc(struct domain *d,
>              flush_tlb_mask(&mask);
>          }
>          /* Now safe to clear the page for reuse */
> -        p = __map_domain_page(sp);
> -        ASSERT(p != NULL);
> -        clear_page(p);
> -        unmap_domain_page(p);
> +        clear_domain_page(page_to_mfn(sp));
>          INIT_PAGE_LIST_ENTRY(&sp->list);
>          page_list_add(sp, &tmp_list);
>          sp->u.sh.type = shadow_type;
> --- a/xen/common/page_alloc.c
> +++ b/xen/common/page_alloc.c
> @@ -1959,22 +1959,16 @@ __initcall(pagealloc_keyhandler_init);
>  
>  void scrub_one_page(struct page_info *pg)
>  {
> -    void *p;
> -
>      if ( unlikely(pg->count_info & PGC_broken) )
>          return;
>  
> -    p = __map_domain_page(pg);
> -
>  #ifndef NDEBUG
>      /* Avoid callers relying on allocations returning zeroed pages. */
> -    memset(p, 0xc2, PAGE_SIZE);
> +    unmap_domain_page(memset(__map_domain_page(pg), 0xc2, PAGE_SIZE));
>  #else
>      /* For a production build, clear_page() is the fastest way to scrub. */
> -    clear_page(p);
> +    clear_domain_page(_mfn(page_to_mfn(pg)));
>  #endif
> -
> -    unmap_domain_page(p);
>  }
>  
>  static void dump_heap(unsigned char key)
> 
> 


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.