[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v2 02/12] x86/p2m: {,un}map_mmio_regions() are HVM-only


  • To: Jan Beulich <jbeulich@xxxxxxxx>
  • From: Roger Pau Monné <roger.pau@xxxxxxxxxx>
  • Date: Thu, 29 Apr 2021 16:48:27 +0200
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=citrix.com; dmarc=pass action=none header.from=citrix.com; dkim=pass header.d=citrix.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck; bh=8Ts9uf6fqgeWYkiCVUmkIi2paOGwarj6Nlyuw81TH8Y=; b=XibxoVjY7h8qY92/SE7wNu5mpOa3Ibg+Gfc0ky+ScJKVTAclLa20SE4jPzsH9zDA6W+QeiwPvPeSGSh/Es8K/lasZX+NwbFeMCw2lWPhnGiO4ZsB8cs7zjSLiPErHqiZSxFHdnCWOSbGtqTIjgJOGwZdG4uSmLISJJUea0CqH/eCsu2sdA9QgLlbNm0INjA68aXYnCqNrX72AHYB6Nq89HOQi0PpLSPYk1fhke1jX7cfTh/8UU74y4R+sFfckQRAX1dthUuYC16CBVDWoW7wE5/RVvNynmx2mI8B/Qsr3AS9yYqaOEasguXMAeQUj0lAm2otrFPiD/T5hvynpeK5Jg==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=OAg/1RJr+GYM6XkerJ5xaWiQ+alps+Yrkm+/ZdNH4j6wMPP511DCVInnjHsZ39g81j43j4Yl5F4j9W9DCwHYXo1o3G59AixIzT2agGxz7VFb5xSH+SBDvodS/YHoekBWsmWyJsZ4t3SiGvTTx/QRg86LZ1sQfVflua+S/QP7uQqYjENKzHt0PCdXRFxqH+6163UNsbaobm1+YS9wx9S1hRl03g95CdHQZW5XoOdpVr+1KVdMkBEqNJI220ZwxNXJcTMoFJUM7P/D1zuhqEa18HdlIXMLfvGS3LWrd804cHv2RlqPQ6R9a95M16lyY9NUupJEwdZO1W8KVpHbQyFEUA==
  • Authentication-results: esa4.hc3370-68.iphmx.com; dkim=pass (signature verified) header.i=@citrix.onmicrosoft.com
  • Cc: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>, "Andrew Cooper" <andrew.cooper3@xxxxxxxxxx>, George Dunlap <george.dunlap@xxxxxxxxxx>, Ian Jackson <iwj@xxxxxxxxxxxxxx>, Julien Grall <julien@xxxxxxx>, "Stefano Stabellini" <sstabellini@xxxxxxxxxx>, Wei Liu <wl@xxxxxxx>
  • Delivery-date: Thu, 29 Apr 2021 14:48:41 +0000
  • Ironport-hdrordr: A9a23:IKQkzK1jWom5Bk+CmG5S6QqjBWByeYIsi2QD101hICF9WtCEls yogfQQ3QL1jjFUY307hdWcIsC7LE/035Qd2+ksFJ2lWxTrv3btEZF64eLZslndMgDd1soY76 dvdKBiFMb9ZGIRse/W6BS1euxO/PCp66at7N2x815IbSVHL55t9B14DAHzKDwSeCBjCYAiHJ SRouprzgDQGkg/VciwCnkbU+WrnbSi+K7OWhIaGwUhrDCHkDLA0tXHOiKF1RQTWS4n+8ZBzU H5lWXCh5mLgrWA5TL3k0TS6JlXn9WJ8Ko/OOW8zvI7Bx+ppgKhZIFKU6CPsjYvsIiUmSoXue iJmTgMFYBe7G7QY3GUrHLWqnbd+Qdr0VDO4xu5hmbuusPwTj5SMbs+uatpNiH3xmBlmfMU6t Mt40up86B5IDmFoQHGo//PbB1unlrcmwtYrccjy0ZxfKFbVKVctuUkjSVoOaZFJg3WwqY9Ht JjCcnNjcwmC2+yXjTism5jzMfEZAVLIj62BkwLusmi2zNLhnxOz0wB2MwFnnsbnahNM6V52w ==
  • Ironport-sdr: j6M3DB7Bbs+8es7ITWchEyYVbv1jP8VrAUkGn/BjMoKxmXNX818WYV9VwRNIJwlEK6QH42SG8K g/sU2AVculHLus3MZu7XUzTJkGOe8b+Zg5oAbXeG1AO6odBhURzKag5xE4kW7oPdeFNQBKWlp4 WezRtiWHTXTjdN45fokeXqDUW7p9ug52/KUqOY8Bax4CuVcyYG5hxJMDjK6Qr8FgC+juILM9gi n0BiM21AizN2nBmp1CP7JKAUcTh38BKRgSvxcQITAarbgM0ddGGNB3FV539bVpEf0D/Vf4I8Td rJs=
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

On Mon, Apr 12, 2021 at 04:06:34PM +0200, Jan Beulich wrote:
> Mirror the "translated" check the functions do to do_domctl(), allowing
> the calls to be DCEd by the compiler. Add ASSERT_UNREACHABLE() to the
> original checks.
> 
> Also arrange for {set,clear}_mmio_p2m_entry() and
> {set,clear}_identity_p2m_entry() to respectively live next to each
> other, such that clear_mmio_p2m_entry() can also be covered by the
> #ifdef already covering set_mmio_p2m_entry().

Seeing the increase in HVM specific regions, would it make sense to
consider splitting the HVM bits into p2m-hvm.c or some such?

> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
> ---
> v2: Fix build.
> ---
> Arguably the original checks, returning success, could also be dropped
> at this point.
> 
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -1352,52 +1352,6 @@ int set_mmio_p2m_entry(struct domain *d,
>                                 p2m_get_hostp2m(d)->default_access);
>  }
>  
> -#endif /* CONFIG_HVM */
> -
> -int set_identity_p2m_entry(struct domain *d, unsigned long gfn_l,
> -                           p2m_access_t p2ma, unsigned int flag)
> -{
> -    p2m_type_t p2mt;
> -    p2m_access_t a;
> -    gfn_t gfn = _gfn(gfn_l);
> -    mfn_t mfn;
> -    struct p2m_domain *p2m = p2m_get_hostp2m(d);
> -    int ret;
> -
> -    if ( !paging_mode_translate(p2m->domain) )
> -    {
> -        if ( !is_iommu_enabled(d) )
> -            return 0;
> -        return iommu_legacy_map(d, _dfn(gfn_l), _mfn(gfn_l),
> -                                1ul << PAGE_ORDER_4K,
> -                                IOMMUF_readable | IOMMUF_writable);
> -    }
> -
> -    gfn_lock(p2m, gfn, 0);
> -
> -    mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL, NULL);
> -
> -    if ( p2mt == p2m_invalid || p2mt == p2m_mmio_dm )
> -        ret = p2m_set_entry(p2m, gfn, _mfn(gfn_l), PAGE_ORDER_4K,
> -                            p2m_mmio_direct, p2ma);
> -    else if ( mfn_x(mfn) == gfn_l && p2mt == p2m_mmio_direct && a == p2ma )
> -        ret = 0;
> -    else
> -    {
> -        if ( flag & XEN_DOMCTL_DEV_RDM_RELAXED )
> -            ret = 0;
> -        else
> -            ret = -EBUSY;
> -        printk(XENLOG_G_WARNING
> -               "Cannot setup identity map d%d:%lx,"
> -               " gfn already mapped to %lx.\n",
> -               d->domain_id, gfn_l, mfn_x(mfn));
> -    }
> -
> -    gfn_unlock(p2m, gfn, 0);
> -    return ret;
> -}
> -
>  /*
>   * Returns:
>   *    0        for success
> @@ -1447,6 +1401,52 @@ int clear_mmio_p2m_entry(struct domain *
>      return rc;
>  }
>  
> +#endif /* CONFIG_HVM */
> +
> +int set_identity_p2m_entry(struct domain *d, unsigned long gfn_l,
> +                           p2m_access_t p2ma, unsigned int flag)
> +{
> +    p2m_type_t p2mt;
> +    p2m_access_t a;
> +    gfn_t gfn = _gfn(gfn_l);
> +    mfn_t mfn;
> +    struct p2m_domain *p2m = p2m_get_hostp2m(d);
> +    int ret;
> +
> +    if ( !paging_mode_translate(p2m->domain) )
> +    {
> +        if ( !is_iommu_enabled(d) )
> +            return 0;
> +        return iommu_legacy_map(d, _dfn(gfn_l), _mfn(gfn_l),
> +                                1ul << PAGE_ORDER_4K,
> +                                IOMMUF_readable | IOMMUF_writable);
> +    }
> +
> +    gfn_lock(p2m, gfn, 0);
> +
> +    mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL, NULL);
> +
> +    if ( p2mt == p2m_invalid || p2mt == p2m_mmio_dm )
> +        ret = p2m_set_entry(p2m, gfn, _mfn(gfn_l), PAGE_ORDER_4K,
> +                            p2m_mmio_direct, p2ma);
> +    else if ( mfn_x(mfn) == gfn_l && p2mt == p2m_mmio_direct && a == p2ma )
> +        ret = 0;
> +    else
> +    {
> +        if ( flag & XEN_DOMCTL_DEV_RDM_RELAXED )
> +            ret = 0;
> +        else
> +            ret = -EBUSY;
> +        printk(XENLOG_G_WARNING
> +               "Cannot setup identity map d%d:%lx,"
> +               " gfn already mapped to %lx.\n",
> +               d->domain_id, gfn_l, mfn_x(mfn));
> +    }
> +
> +    gfn_unlock(p2m, gfn, 0);
> +    return ret;
> +}
> +
>  int clear_identity_p2m_entry(struct domain *d, unsigned long gfn_l)
>  {
>      p2m_type_t p2mt;
> @@ -1892,6 +1892,8 @@ void *map_domain_gfn(struct p2m_domain *
>      return map_domain_page(*mfn);
>  }
>  
> +#ifdef CONFIG_HVM
> +
>  static unsigned int mmio_order(const struct domain *d,
>                                 unsigned long start_fn, unsigned long nr)
>  {
> @@ -1932,7 +1934,10 @@ int map_mmio_regions(struct domain *d,
>      unsigned int iter, order;
>  
>      if ( !paging_mode_translate(d) )
> +    {
> +        ASSERT_UNREACHABLE();
>          return 0;
> +    }
>  
>      for ( iter = i = 0; i < nr && iter < MAP_MMIO_MAX_ITER;
>            i += 1UL << order, ++iter )
> @@ -1964,7 +1969,10 @@ int unmap_mmio_regions(struct domain *d,
>      unsigned int iter, order;
>  
>      if ( !paging_mode_translate(d) )
> +    {
> +        ASSERT_UNREACHABLE();
>          return 0;

Maybe consider returning an error here now instead of silently
failing? It's not supposed to be reached, so getting here likely means
something else has gone wrong and it's best to just report an error?

The rest LGTM:

Acked-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>

Thanks, Roger.



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.