[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2 5/7] x86/mm: split p2m ioreq server pages special handling into helper



> -----Original Message-----
> From: Roger Pau Monne [mailto:roger.pau@xxxxxxxxxx]
> Sent: 11 February 2019 17:47
> To: xen-devel@xxxxxxxxxxxxxxxxxxxx
> Cc: Roger Pau Monne <roger.pau@xxxxxxxxxx>; George Dunlap
> <George.Dunlap@xxxxxxxxxx>; Jan Beulich <jbeulich@xxxxxxxx>; Andrew Cooper
> <Andrew.Cooper3@xxxxxxxxxx>; Wei Liu <wei.liu2@xxxxxxxxxx>; Jun Nakajima
> <jun.nakajima@xxxxxxxxx>; Kevin Tian <kevin.tian@xxxxxxxxx>; Paul Durrant
> <Paul.Durrant@xxxxxxxxxx>
> Subject: [PATCH v2 5/7] x86/mm: split p2m ioreq server pages special
> handling into helper
> 
> So that it can be shared by both ept, npt and shadow code, instead of
> duplicating it.
> 
> No change in functionality intended.
> 
> Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>

LGTM.

Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx>

> ---
> Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
> Cc: Jan Beulich <jbeulich@xxxxxxxx>
> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
> Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
> Cc: Jun Nakajima <jun.nakajima@xxxxxxxxx>
> Cc: Kevin Tian <kevin.tian@xxxxxxxxx>
> Cc: Paul Durrant <paul.durrant@xxxxxxxxxx>
> ---
> Changes since v1:
>  - Remove unused p2mt_old from p2m_pt_set_entry.
> ---
>  xen/arch/x86/mm/hap/hap.c       |  3 ++
>  xen/arch/x86/mm/p2m-ept.c       | 55 +++++++++++----------------------
>  xen/arch/x86/mm/p2m-pt.c        | 24 --------------
>  xen/arch/x86/mm/shadow/common.c |  3 ++
>  xen/include/asm-x86/p2m.h       | 32 +++++++++++++++++++
>  5 files changed, 56 insertions(+), 61 deletions(-)
> 
> diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
> index 3d651b94c3..dc46d5e14f 100644
> --- a/xen/arch/x86/mm/hap/hap.c
> +++ b/xen/arch/x86/mm/hap/hap.c
> @@ -734,6 +734,9 @@ hap_write_p2m_entry(struct domain *d, unsigned long
> gfn, l1_pgentry_t *p,
>              && perms_strictly_increased(old_flags, l1e_get_flags(new)) );
>      }
> 
> +    p2m_entry_modify(p2m_get_hostp2m(d),
> p2m_flags_to_type(l1e_get_flags(new)),
> +                     p2m_flags_to_type(old_flags), level);
> +
>      safe_write_pte(p, new);
>      if ( old_flags & _PAGE_PRESENT )
>          flush_tlb_mask(d->dirty_cpumask);
> diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
> index bb562607f7..0ece6608cb 100644
> --- a/xen/arch/x86/mm/p2m-ept.c
> +++ b/xen/arch/x86/mm/p2m-ept.c
> @@ -46,7 +46,8 @@ static inline bool_t is_epte_valid(ept_entry_t *e)
>  }
> 
>  /* returns : 0 for success, -errno otherwise */
> -static int atomic_write_ept_entry(ept_entry_t *entryptr, ept_entry_t new,
> +static int atomic_write_ept_entry(struct p2m_domain *p2m,
> +                                  ept_entry_t *entryptr, ept_entry_t new,
>                                    int level)
>  {
>      int rc;
> @@ -89,6 +90,8 @@ static int atomic_write_ept_entry(ept_entry_t *entryptr,
> ept_entry_t new,
>      if ( unlikely(p2m_is_foreign(entryptr->sa_p2mt)) && check_foreign )
>          oldmfn = entryptr->mfn;
> 
> +    p2m_entry_modify(p2m, new.sa_p2mt, entryptr->sa_p2mt, level);
> +
>      write_atomic(&entryptr->epte, new.epte);
> 
>      if ( unlikely(oldmfn != mfn_x(INVALID_MFN)) )
> @@ -390,7 +393,8 @@ static int ept_next_level(struct p2m_domain *p2m,
> bool_t read_only,
>   * present entries in the given page table, optionally marking the
> entries
>   * also for their subtrees needing P2M type re-calculation.
>   */
> -static bool_t ept_invalidate_emt(mfn_t mfn, bool_t recalc, int level)
> +static bool_t ept_invalidate_emt(struct p2m_domain *p2m, mfn_t mfn,
> +                                 bool_t recalc, int level)
>  {
>      int rc;
>      ept_entry_t *epte = map_domain_page(mfn);
> @@ -408,7 +412,7 @@ static bool_t ept_invalidate_emt(mfn_t mfn, bool_t
> recalc, int level)
>          e.emt = MTRR_NUM_TYPES;
>          if ( recalc )
>              e.recalc = 1;
> -        rc = atomic_write_ept_entry(&epte[i], e, level);
> +        rc = atomic_write_ept_entry(p2m, &epte[i], e, level);
>          ASSERT(rc == 0);
>          changed = 1;
>      }
> @@ -459,7 +463,7 @@ static int ept_invalidate_emt_range(struct p2m_domain
> *p2m,
>              rc = -ENOMEM;
>              goto out;
>          }
> -        wrc = atomic_write_ept_entry(&table[index], split_ept_entry, i);
> +        wrc = atomic_write_ept_entry(p2m, &table[index], split_ept_entry,
> i);
>          ASSERT(wrc == 0);
> 
>          for ( ; i > target; --i )
> @@ -479,7 +483,7 @@ static int ept_invalidate_emt_range(struct p2m_domain
> *p2m,
>          {
>              e.emt = MTRR_NUM_TYPES;
>              e.recalc = 1;
> -            wrc = atomic_write_ept_entry(&table[index], e, target);
> +            wrc = atomic_write_ept_entry(p2m, &table[index], e, target);
>              ASSERT(wrc == 0);
>              rc = 1;
>          }
> @@ -549,17 +553,11 @@ static int resolve_misconfig(struct p2m_domain *p2m,
> unsigned long gfn)
>                      nt = p2m_recalc_type(e.recalc, e.sa_p2mt, p2m, gfn +
> i);
>                      if ( nt != e.sa_p2mt )
>                      {
> -                        if ( e.sa_p2mt == p2m_ioreq_server )
> -                        {
> -                            ASSERT(p2m->ioreq.entry_count > 0);
> -                            p2m->ioreq.entry_count--;
> -                        }
> -
>                          e.sa_p2mt = nt;
>                          ept_p2m_type_to_flags(p2m, &e, e.sa_p2mt,
> e.access);
>                      }
>                      e.recalc = 0;
> -                    wrc = atomic_write_ept_entry(&epte[i], e, level);
> +                    wrc = atomic_write_ept_entry(p2m, &epte[i], e,
> level);
>                      ASSERT(wrc == 0);
>                  }
>              }
> @@ -595,7 +593,7 @@ static int resolve_misconfig(struct p2m_domain *p2m,
> unsigned long gfn)
>                  {
>                      if ( ept_split_super_page(p2m, &e, level, level - 1)
> )
>                      {
> -                        wrc = atomic_write_ept_entry(&epte[i], e, level);
> +                        wrc = atomic_write_ept_entry(p2m, &epte[i], e,
> level);
>                          ASSERT(wrc == 0);
>                          unmap_domain_page(epte);
>                          mfn = e.mfn;
> @@ -610,7 +608,7 @@ static int resolve_misconfig(struct p2m_domain *p2m,
> unsigned long gfn)
>                  e.recalc = 0;
>                  if ( recalc && p2m_is_changeable(e.sa_p2mt) )
>                      ept_p2m_type_to_flags(p2m, &e, e.sa_p2mt, e.access);
> -                wrc = atomic_write_ept_entry(&epte[i], e, level);
> +                wrc = atomic_write_ept_entry(p2m, &epte[i], e, level);
>                  ASSERT(wrc == 0);
>              }
> 
> @@ -621,11 +619,11 @@ static int resolve_misconfig(struct p2m_domain *p2m,
> unsigned long gfn)
>          if ( e.emt == MTRR_NUM_TYPES )
>          {
>              ASSERT(is_epte_present(&e));
> -            ept_invalidate_emt(_mfn(e.mfn), e.recalc, level);
> +            ept_invalidate_emt(p2m, _mfn(e.mfn), e.recalc, level);
>              smp_wmb();
>              e.emt = 0;
>              e.recalc = 0;
> -            wrc = atomic_write_ept_entry(&epte[i], e, level);
> +            wrc = atomic_write_ept_entry(p2m, &epte[i], e, level);
>              ASSERT(wrc == 0);
>              unmap_domain_page(epte);
>              rc = 1;
> @@ -786,7 +784,7 @@ ept_set_entry(struct p2m_domain *p2m, gfn_t gfn_,
> mfn_t mfn,
> 
>          /* now install the newly split ept sub-tree */
>          /* NB: please make sure domian is paused and no in-fly VT-d DMA.
> */
> -        rc = atomic_write_ept_entry(ept_entry, split_ept_entry, i);
> +        rc = atomic_write_ept_entry(p2m, ept_entry, split_ept_entry, i);
>          ASSERT(rc == 0);
> 
>          /* then move to the level we want to make real changes */
> @@ -833,24 +831,7 @@ ept_set_entry(struct p2m_domain *p2m, gfn_t gfn_,
> mfn_t mfn,
>          new_entry.suppress_ve = is_epte_valid(&old_entry) ?
>                                      old_entry.suppress_ve : 1;
> 
> -    /*
> -     * p2m_ioreq_server is only used for 4K pages, so the
> -     * count is only done on ept page table entries.
> -     */
> -    if ( p2mt == p2m_ioreq_server )
> -    {
> -        ASSERT(i == 0);
> -        p2m->ioreq.entry_count++;
> -    }
> -
> -    if ( ept_entry->sa_p2mt == p2m_ioreq_server )
> -    {
> -        ASSERT(i == 0);
> -        ASSERT(p2m->ioreq.entry_count > 0);
> -        p2m->ioreq.entry_count--;
> -    }
> -
> -    rc = atomic_write_ept_entry(ept_entry, new_entry, target);
> +    rc = atomic_write_ept_entry(p2m, ept_entry, new_entry, target);
>      if ( unlikely(rc) )
>          old_entry.epte = 0;
>      else
> @@ -1070,7 +1051,7 @@ static void ept_change_entry_type_global(struct
> p2m_domain *p2m,
>      if ( !mfn )
>          return;
> 
> -    if ( ept_invalidate_emt(_mfn(mfn), 1, p2m->ept.wl) )
> +    if ( ept_invalidate_emt(p2m, _mfn(mfn), 1, p2m->ept.wl) )
>          ept_sync_domain(p2m);
>  }
> 
> @@ -1128,7 +1109,7 @@ static void ept_memory_type_changed(struct
> p2m_domain *p2m)
>      if ( !mfn )
>          return;
> 
> -    if ( ept_invalidate_emt(_mfn(mfn), 0, p2m->ept.wl) )
> +    if ( ept_invalidate_emt(p2m, _mfn(mfn), 0, p2m->ept.wl) )
>          ept_sync_domain(p2m);
>  }
> 
> diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
> index 791600f6ba..fd6386b8fd 100644
> --- a/xen/arch/x86/mm/p2m-pt.c
> +++ b/xen/arch/x86/mm/p2m-pt.c
> @@ -436,13 +436,6 @@ static int do_recalc(struct p2m_domain *p2m, unsigned
> long gfn)
>                  flags |= _PAGE_PSE;
>              }
> 
> -            if ( ot == p2m_ioreq_server )
> -            {
> -                ASSERT(p2m->ioreq.entry_count > 0);
> -                ASSERT(level == 0);
> -                p2m->ioreq.entry_count--;
> -            }
> -
>              e = l1e_from_pfn(mfn, flags);
>              p2m_add_iommu_flags(&e, level,
>                                  (nt == p2m_ram_rw)
> @@ -608,8 +601,6 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_,
> mfn_t mfn,
> 
>      if ( page_order == PAGE_ORDER_4K )
>      {
> -        p2m_type_t p2mt_old;
> -
>          rc = p2m_next_level(p2m, &table, &gfn_remainder, gfn,
>                              L2_PAGETABLE_SHIFT - PAGE_SHIFT,
>                              L2_PAGETABLE_ENTRIES, 1, 1);
> @@ -633,21 +624,6 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_,
> mfn_t mfn,
>          if ( entry_content.l1 != 0 )
>              p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
> 
> -        p2mt_old = p2m_flags_to_type(l1e_get_flags(*p2m_entry));
> -
> -        /*
> -         * p2m_ioreq_server is only used for 4K pages, so
> -         * the count is only done for level 1 entries.
> -         */
> -        if ( p2mt == p2m_ioreq_server )
> -            p2m->ioreq.entry_count++;
> -
> -        if ( p2mt_old == p2m_ioreq_server )
> -        {
> -            ASSERT(p2m->ioreq.entry_count > 0);
> -            p2m->ioreq.entry_count--;
> -        }
> -
>          /* level 1 entry */
>          p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 1);
>          /* NB: paging_write_p2m_entry() handles tlb flushes properly */
> diff --git a/xen/arch/x86/mm/shadow/common.c
> b/xen/arch/x86/mm/shadow/common.c
> index c49aeb5e60..6d8a950054 100644
> --- a/xen/arch/x86/mm/shadow/common.c
> +++ b/xen/arch/x86/mm/shadow/common.c
> @@ -3180,6 +3180,9 @@ shadow_write_p2m_entry(struct domain *d, unsigned
> long gfn,
>      if ( likely(d->arch.paging.shadow.total_pages != 0) )
>           sh_unshadow_for_p2m_change(d, gfn, p, new, level);
> 
> +    p2m_entry_modify(p2m_get_hostp2m(d),
> p2m_flags_to_type(l1e_get_flags(new)),
> +                     p2m_flags_to_type(l1e_get_flags(*p)), level);
> +
>      /* Update the entry with new content */
>      safe_write_pte(p, new);
> 
> diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
> index 2095076556..834d49d2d4 100644
> --- a/xen/include/asm-x86/p2m.h
> +++ b/xen/include/asm-x86/p2m.h
> @@ -932,6 +932,38 @@ int p2m_set_ioreq_server(struct domain *d, unsigned
> int flags,
>  struct hvm_ioreq_server *p2m_get_ioreq_server(struct domain *d,
>                                                unsigned int *flags);
> 
> +static inline void p2m_entry_modify(struct p2m_domain *p2m, p2m_type_t
> nt,
> +                                    p2m_type_t ot, unsigned int level)
> +{
> +    if ( level != 1 || nt == ot )
> +        return;
> +
> +    switch ( nt )
> +    {
> +    case p2m_ioreq_server:
> +        /*
> +         * p2m_ioreq_server is only used for 4K pages, so
> +         * the count is only done for level 1 entries.
> +         */
> +        p2m->ioreq.entry_count++;
> +        break;
> +
> +    default:
> +        break;
> +    }
> +
> +    switch ( ot )
> +    {
> +    case p2m_ioreq_server:
> +        ASSERT(p2m->ioreq.entry_count > 0);
> +        p2m->ioreq.entry_count--;
> +        break;
> +
> +    default:
> +        break;
> +    }
> +}
> +
>  #endif /* _XEN_ASM_X86_P2M_H */
> 
>  /*
> --
> 2.20.1

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.