|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v6 06/11] xen, x86: factor out map and unmap from the memory_mapping DOMCTL
>>> On 21.04.14 at 15:44, <avanzini.arianna@xxxxxxxxx> wrote:
> --- a/xen/arch/x86/domctl.c
> +++ b/xen/arch/x86/domctl.c
> @@ -646,46 +646,45 @@ long arch_do_domctl(
> unsigned long gfn = domctl->u.memory_mapping.first_gfn;
> unsigned long mfn = domctl->u.memory_mapping.first_mfn;
> unsigned long nr_mfns = domctl->u.memory_mapping.nr_mfns;
> + unsigned long mfn_end = mfn + nr_mfns - 1;
> + unsigned long gfn_end = gfn + nr_mfns - 1;
> int add = domctl->u.memory_mapping.add_mapping;
> - unsigned long i;
>
> ret = -EINVAL;
> - if ( (mfn + nr_mfns - 1) < mfn || /* wrap? */
> - ((mfn | (mfn + nr_mfns - 1)) >> (paddr_bits - PAGE_SHIFT)) ||
> - (gfn + nr_mfns - 1) < gfn ) /* wrap? */
> - break;
> + if ( mfn_end < mfn || /* wrap? */
> + ((mfn | mfn_end) >> (paddr_bits - PAGE_SHIFT)) ||
> + gfn_end < gfn ) /* wrap? */
> + return ret;
>
> ret = -EPERM;
> - if ( !iomem_access_permitted(current->domain, mfn, mfn + nr_mfns -
> 1) )
> + if ( !iomem_access_permitted(current->domain, mfn, mfn_end) )
> break;
>
> - ret = xsm_iomem_mapping(XSM_HOOK, d, mfn, mfn + nr_mfns - 1, add);
> + ret = xsm_iomem_mapping(XSM_HOOK, d, mfn, mfn_end, add);
> if ( ret )
> break;
>
> if ( add )
> {
> printk(XENLOG_G_INFO
> - "memory_map:add: dom%d gfn=%lx mfn=%lx nr=%lx\n",
> + "memory_map: add: dom%d gfn=%lx mfn=%lx nr=%lx\n",
> d->domain_id, gfn, mfn, nr_mfns);
>
> - ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
> - if ( !ret && paging_mode_translate(d) )
> + ret = iomem_permit_access(d, mfn, mfn_end);
> + if ( !ret )
> {
> - for ( i = 0; !ret && i < nr_mfns; i++ )
> - ret = set_mmio_p2m_entry(d, gfn + i, _mfn(mfn + i));
> + ret = map_mmio_regions(d, gfn, gfn_end, mfn);
> if ( ret )
> {
> printk(XENLOG_G_WARNING
> - "memory_map:fail: dom%d gfn=%lx mfn=%lx
> ret:%ld\n",
> - d->domain_id, gfn + i, mfn + i, ret);
> - while ( i-- )
> - clear_mmio_p2m_entry(d, gfn + i, _mfn(mfn + i));
> - if ( iomem_deny_access(d, mfn, mfn + nr_mfns - 1) &&
> + "memory_map: fail: dom%d gfn=%lx mfn=%lx
> ret:%ld\n",
> + d->domain_id, gfn, mfn, ret);
> + if ( iomem_deny_access(d, mfn, mfn_end) &&
> is_hardware_domain(current->domain) )
> printk(XENLOG_ERR
> - "memory_map: failed to deny dom%d access to
> [%lx,%lx]\n",
> - d->domain_id, mfn, mfn + nr_mfns - 1);
> + "memory_map: failed to deny dom%d access "
> + "to [%lx,%lx]\n",
Please don't split format strings like this, unless they become _really_
long (such that when grep-ing for any part of the string it will be
found).
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -1657,6 +1657,48 @@ unsigned long paging_gva_to_gfn(struct vcpu *v,
> return hostmode->gva_to_gfn(v, hostp2m, va, pfec);
> }
>
> +int map_mmio_regions(struct domain *d,
> + unsigned long start_gfn,
> + unsigned long end_gfn,
> + unsigned long mfn)
The last parameter her should be mfn_t.
> +int unmap_mmio_regions(struct domain *d,
> + unsigned long start_gfn,
> + unsigned long end_gfn,
> + unsigned long mfn)
> +{
> + int ret = 0;
> + unsigned long nr_mfns = end_gfn - start_gfn + 1;
> + unsigned long i;
> +
> + if ( !paging_mode_translate(d) )
> + return 0;
> +
> + for ( i = 0; i < nr_mfns; i++ )
> + ret |= !clear_mmio_p2m_entry(d, start_gfn + i, _mfn(mfn + i));
Elsewhere you updated to Mukesh's recent changes, yet this one
certainly is wrong now - clear_mmio_p2m_entry() now returns 0 on
success. When you re-base patches, you really should be rather
careful when you have to adjust code being removed in one place and
added back in another: In almost all cases you'll have to adjust both,
even if the patch applies fine without changes on the adding-back side.
Also, now that you have this as a separate function, perhaps it
would be better to use this for the error recovery path in
map_mmio_regions()?
Jan
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |