[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v2 4/4] x86/shadow: refactor shadow_vram_{get,put}_l1e()


  • To: Jan Beulich <jbeulich@xxxxxxxx>
  • From: Roger Pau Monné <roger.pau@xxxxxxxxxx>
  • Date: Thu, 8 Oct 2020 17:15:56 +0200
  • Authentication-results: esa4.hc3370-68.iphmx.com; dkim=none (message not signed) header.i=none
  • Cc: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>, "Andrew Cooper" <andrew.cooper3@xxxxxxxxxx>, Wei Liu <wl@xxxxxxx>, George Dunlap <George.Dunlap@xxxxxxxxxxxxx>, Tim Deegan <tim@xxxxxxx>
  • Delivery-date: Thu, 08 Oct 2020 15:21:40 +0000
  • Ironport-sdr: H8zRRZJ0JgMpUM1nL1yViZ95Dv7T1ujS7DvVa57D+K7tUaiR4qCa43k4Rq/Edf/un+7ijIbUmj LcE3pkWo8pYUUM1LUpia/pHv4Ynk/DaraR1ph40leMyeH1ygiKkO28s5/5LaYJAeVGaqxmkYIr N6q2t0lQLu+yH/h5F/dSmcgwEn+2aYS2s8ja/ZSXRDJxoIGiw1BLG/mdYfi2Jwu5XweDTI6XRW t2Cy25HCus+zI65T5GSKd6ZOuC3ERPNtYW9Cq7yMkP1FnF5ztzZG+dcO7BG3aB4KSbx3cXrX0b RmQ=
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

On Wed, Sep 16, 2020 at 03:08:40PM +0200, Jan Beulich wrote:
> By passing the functions an MFN and flags, only a single instance of
                           ^ a
> each is needed; they were pretty large for being inline functions
> anyway.
> 
> While moving the code, also adjust coding style and add const where
> sensible / possible.
> 
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
> ---
> v2: New.
> 
> --- a/xen/arch/x86/mm/shadow/hvm.c
> +++ b/xen/arch/x86/mm/shadow/hvm.c
> @@ -903,6 +903,104 @@ int shadow_track_dirty_vram(struct domai
>      return rc;
>  }
>  
> +void shadow_vram_get_mfn(mfn_t mfn, unsigned int l1f,
> +                         mfn_t sl1mfn, const void *sl1e,
> +                         const struct domain *d)
> +{
> +    unsigned long gfn;
> +    struct sh_dirty_vram *dirty_vram = d->arch.hvm.dirty_vram;
> +
> +    ASSERT(is_hvm_domain(d));
> +
> +    if ( !dirty_vram /* tracking disabled? */ ||
> +         !(l1f & _PAGE_RW) /* read-only mapping? */ ||
> +         !mfn_valid(mfn) /* mfn can be invalid in mmio_direct */)
> +        return;
> +
> +    gfn = gfn_x(mfn_to_gfn(d, mfn));
> +    /* Page sharing not supported on shadow PTs */
> +    BUG_ON(SHARED_M2P(gfn));
> +
> +    if ( (gfn >= dirty_vram->begin_pfn) && (gfn < dirty_vram->end_pfn) )
> +    {
> +        unsigned long i = gfn - dirty_vram->begin_pfn;
> +        const struct page_info *page = mfn_to_page(mfn);
> +
> +        if ( (page->u.inuse.type_info & PGT_count_mask) == 1 )
> +            /* Initial guest reference, record it */
> +            dirty_vram->sl1ma[i] = mfn_to_maddr(sl1mfn) |
> +                                   PAGE_OFFSET(sl1e);
> +    }
> +}
> +
> +void shadow_vram_put_mfn(mfn_t mfn, unsigned int l1f,
> +                         mfn_t sl1mfn, const void *sl1e,
> +                         const struct domain *d)
> +{
> +    unsigned long gfn;
> +    struct sh_dirty_vram *dirty_vram = d->arch.hvm.dirty_vram;
> +
> +    ASSERT(is_hvm_domain(d));
> +
> +    if ( !dirty_vram /* tracking disabled? */ ||
> +         !(l1f & _PAGE_RW) /* read-only mapping? */ ||
> +         !mfn_valid(mfn) /* mfn can be invalid in mmio_direct */)
> +        return;
> +
> +    gfn = gfn_x(mfn_to_gfn(d, mfn));
> +    /* Page sharing not supported on shadow PTs */
> +    BUG_ON(SHARED_M2P(gfn));
> +
> +    if ( (gfn >= dirty_vram->begin_pfn) && (gfn < dirty_vram->end_pfn) )
> +    {
> +        unsigned long i = gfn - dirty_vram->begin_pfn;
> +        const struct page_info *page = mfn_to_page(mfn);
> +        bool dirty = false;
> +        paddr_t sl1ma = mfn_to_maddr(sl1mfn) | PAGE_OFFSET(sl1e);
> +
> +        if ( (page->u.inuse.type_info & PGT_count_mask) == 1 )
> +        {
> +            /* Last reference */
> +            if ( dirty_vram->sl1ma[i] == INVALID_PADDR )
> +            {
> +                /* We didn't know it was that one, let's say it is dirty */
> +                dirty = true;
> +            }
> +            else
> +            {
> +                ASSERT(dirty_vram->sl1ma[i] == sl1ma);
> +                dirty_vram->sl1ma[i] = INVALID_PADDR;
> +                if ( l1f & _PAGE_DIRTY )
> +                    dirty = true;
> +            }
> +        }
> +        else
> +        {
> +            /* We had more than one reference, just consider the page dirty. 
> */
> +            dirty = true;
> +            /* Check that it's not the one we recorded. */
> +            if ( dirty_vram->sl1ma[i] == sl1ma )
> +            {
> +                /* Too bad, we remembered the wrong one... */
> +                dirty_vram->sl1ma[i] = INVALID_PADDR;
> +            }
> +            else
> +            {
> +                /*
> +                 * Ok, our recorded sl1e is still pointing to this page, 
> let's
> +                 * just hope it will remain.
> +                 */
> +            }
> +        }
> +
> +        if ( dirty )
> +        {
> +            dirty_vram->dirty_bitmap[i / 8] |= 1 << (i % 8);

Could you use _set_bit here?

> +            dirty_vram->last_dirty = NOW();
> +        }
> +    }
> +}
> +
>  /*
>   * Local variables:
>   * mode: C
> --- a/xen/arch/x86/mm/shadow/multi.c
> +++ b/xen/arch/x86/mm/shadow/multi.c
> @@ -1047,107 +1047,6 @@ static int shadow_set_l2e(struct domain
>      return flags;
>  }
>  
> -static inline void shadow_vram_get_l1e(shadow_l1e_t new_sl1e,
> -                                       shadow_l1e_t *sl1e,
> -                                       mfn_t sl1mfn,
> -                                       struct domain *d)
> -{
> -#ifdef CONFIG_HVM
> -    mfn_t mfn = shadow_l1e_get_mfn(new_sl1e);
> -    int flags = shadow_l1e_get_flags(new_sl1e);
> -    unsigned long gfn;
> -    struct sh_dirty_vram *dirty_vram = d->arch.hvm.dirty_vram;
> -
> -    if ( !is_hvm_domain(d) || !dirty_vram /* tracking disabled? */
> -         || !(flags & _PAGE_RW) /* read-only mapping? */
> -         || !mfn_valid(mfn) )   /* mfn can be invalid in mmio_direct */
> -        return;
> -
> -    gfn = gfn_x(mfn_to_gfn(d, mfn));
> -    /* Page sharing not supported on shadow PTs */
> -    BUG_ON(SHARED_M2P(gfn));
> -
> -    if ( (gfn >= dirty_vram->begin_pfn) && (gfn < dirty_vram->end_pfn) )
> -    {
> -        unsigned long i = gfn - dirty_vram->begin_pfn;
> -        struct page_info *page = mfn_to_page(mfn);
> -
> -        if ( (page->u.inuse.type_info & PGT_count_mask) == 1 )
> -            /* Initial guest reference, record it */
> -            dirty_vram->sl1ma[i] = mfn_to_maddr(sl1mfn)
> -                | ((unsigned long)sl1e & ~PAGE_MASK);
> -    }
> -#endif
> -}
> -
> -static inline void shadow_vram_put_l1e(shadow_l1e_t old_sl1e,
> -                                       shadow_l1e_t *sl1e,
> -                                       mfn_t sl1mfn,
> -                                       struct domain *d)
> -{
> -#ifdef CONFIG_HVM
> -    mfn_t mfn = shadow_l1e_get_mfn(old_sl1e);
> -    int flags = shadow_l1e_get_flags(old_sl1e);
> -    unsigned long gfn;
> -    struct sh_dirty_vram *dirty_vram = d->arch.hvm.dirty_vram;
> -
> -    if ( !is_hvm_domain(d) || !dirty_vram /* tracking disabled? */
> -         || !(flags & _PAGE_RW) /* read-only mapping? */
> -         || !mfn_valid(mfn) )   /* mfn can be invalid in mmio_direct */
> -        return;
> -
> -    gfn = gfn_x(mfn_to_gfn(d, mfn));
> -    /* Page sharing not supported on shadow PTs */
> -    BUG_ON(SHARED_M2P(gfn));
> -
> -    if ( (gfn >= dirty_vram->begin_pfn) && (gfn < dirty_vram->end_pfn) )
> -    {
> -        unsigned long i = gfn - dirty_vram->begin_pfn;
> -        struct page_info *page = mfn_to_page(mfn);
> -        int dirty = 0;
> -        paddr_t sl1ma = mfn_to_maddr(sl1mfn)
> -            | ((unsigned long)sl1e & ~PAGE_MASK);
> -
> -        if ( (page->u.inuse.type_info & PGT_count_mask) == 1 )
> -        {
> -            /* Last reference */
> -            if ( dirty_vram->sl1ma[i] == INVALID_PADDR ) {
> -                /* We didn't know it was that one, let's say it is dirty */
> -                dirty = 1;
> -            }
> -            else
> -            {
> -                ASSERT(dirty_vram->sl1ma[i] == sl1ma);
> -                dirty_vram->sl1ma[i] = INVALID_PADDR;
> -                if ( flags & _PAGE_DIRTY )
> -                    dirty = 1;
> -            }
> -        }
> -        else
> -        {
> -            /* We had more than one reference, just consider the page dirty. 
> */
> -            dirty = 1;
> -            /* Check that it's not the one we recorded. */
> -            if ( dirty_vram->sl1ma[i] == sl1ma )
> -            {
> -                /* Too bad, we remembered the wrong one... */
> -                dirty_vram->sl1ma[i] = INVALID_PADDR;
> -            }
> -            else
> -            {
> -                /* Ok, our recorded sl1e is still pointing to this page, 
> let's
> -                 * just hope it will remain. */
> -            }
> -        }
> -        if ( dirty )
> -        {
> -            dirty_vram->dirty_bitmap[i / 8] |= 1 << (i % 8);
> -            dirty_vram->last_dirty = NOW();
> -        }
> -    }
> -#endif
> -}
> -
>  static int shadow_set_l1e(struct domain *d,
>                            shadow_l1e_t *sl1e,
>                            shadow_l1e_t new_sl1e,
> @@ -1156,6 +1055,7 @@ static int shadow_set_l1e(struct domain
>  {
>      int flags = 0;
>      shadow_l1e_t old_sl1e;
> +    unsigned int old_sl1f;
>  #if SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC
>      mfn_t new_gmfn = shadow_l1e_get_mfn(new_sl1e);
>  #endif
> @@ -1194,7 +1094,9 @@ static int shadow_set_l1e(struct domain
>                  new_sl1e = shadow_l1e_flip_flags(new_sl1e, rc);
>                  /* fall through */
>              case 0:
> -                shadow_vram_get_l1e(new_sl1e, sl1e, sl1mfn, d);
> +                shadow_vram_get_mfn(shadow_l1e_get_mfn(new_sl1e),
> +                                    shadow_l1e_get_flags(new_sl1e),
> +                                    sl1mfn, sl1e, d);

As you have moved this function into a HVM build time file, don't you
need to guard this call, or alternatively provide a dummy handler for
!CONFIG_HVM in private.h?

Same for shadow_vram_put_mfn.

Thanks, Roger.



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.