diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index 66a07bd..c0da115 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -4593,17 +4593,17 @@ static int handle_iomem_range(unsigned long s, unsigned long e, void *p) } int xenmem_add_to_physmap(struct domain *d, - struct xen_add_to_physmap xatp) + struct xen_add_to_physmap *xatp) { struct page_info* page = NULL; unsigned long mfn = 0; unsigned long prev_mfn, gpfn; int rc; - switch ( xatp.space ) + switch ( xatp->space ) { case XENMAPSPACE_shared_info: - if ( xatp.idx == 0 ) + if ( xatp->idx == 0 ) mfn = virt_to_mfn(d->shared_info); break; case XENMAPSPACE_grant_table: @@ -4613,20 +4613,20 @@ int xenmem_add_to_physmap(struct domain *d, d->grant_table->gt_version = 1; if ( d->grant_table->gt_version == 2 && - (xatp.idx & XENMAPIDX_grant_table_status) ) + (xatp->idx & XENMAPIDX_grant_table_status) ) { - xatp.idx &= ~XENMAPIDX_grant_table_status; - if ( xatp.idx < nr_status_frames(d->grant_table) ) - mfn = virt_to_mfn(d->grant_table->status[xatp.idx]); + xatp->idx &= ~XENMAPIDX_grant_table_status; + if ( xatp->idx < nr_status_frames(d->grant_table) ) + mfn = virt_to_mfn(d->grant_table->status[xatp->idx]); } else { - if ( (xatp.idx >= nr_grant_frames(d->grant_table)) && - (xatp.idx < max_nr_grant_frames) ) - gnttab_grow_table(d, xatp.idx + 1); + if ( (xatp->idx >= nr_grant_frames(d->grant_table)) && + (xatp->idx < max_nr_grant_frames) ) + gnttab_grow_table(d, xatp->idx + 1); - if ( xatp.idx < nr_grant_frames(d->grant_table) ) - mfn = virt_to_mfn(d->grant_table->shared_raw[xatp.idx]); + if ( xatp->idx < nr_grant_frames(d->grant_table) ) + mfn = virt_to_mfn(d->grant_table->shared_raw[xatp->idx]); } spin_unlock(&d->grant_table->lock); @@ -4635,16 +4635,16 @@ int xenmem_add_to_physmap(struct domain *d, { p2m_type_t p2mt; - xatp.idx = mfn_x(gfn_to_mfn_unshare(d, xatp.idx, &p2mt)); + xatp->idx = mfn_x(gfn_to_mfn_unshare(d, xatp->idx, &p2mt)); /* If the page is still shared, exit early */ if ( p2m_is_shared(p2mt) ) { rcu_unlock_domain(d); return -ENOMEM; } - if ( !get_page_from_pagenr(xatp.idx, d) ) + if ( !get_page_from_pagenr(xatp->idx, d) ) break; - mfn = xatp.idx; + mfn = xatp->idx; page = mfn_to_page(mfn); break; } @@ -4666,15 +4666,15 @@ int xenmem_add_to_physmap(struct domain *d, put_page(page); /* Remove previously mapped page if it was present. */ - prev_mfn = gmfn_to_mfn(d, xatp.gpfn); + prev_mfn = gmfn_to_mfn(d, xatp->gpfn); if ( mfn_valid(prev_mfn) ) { if ( is_xen_heap_mfn(prev_mfn) ) /* Xen heap frames are simply unhooked from this phys slot. */ - guest_physmap_remove_page(d, xatp.gpfn, prev_mfn, 0); + guest_physmap_remove_page(d, xatp->gpfn, prev_mfn, 0); else /* Normal domain memory is freed, to avoid leaking memory. */ - guest_remove_page(d, xatp.gpfn); + guest_remove_page(d, xatp->gpfn); } /* Unmap from old location, if any. */ @@ -4684,7 +4684,7 @@ int xenmem_add_to_physmap(struct domain *d, guest_physmap_remove_page(d, gpfn, mfn, 0); /* Map at new location. */ - rc = guest_physmap_add_page(d, xatp.gpfn, mfn, 0); + rc = guest_physmap_add_page(d, xatp->gpfn, mfn, 0); domain_unlock(d); @@ -4715,7 +4715,7 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg) return -EPERM; } - rc = xenmem_add_to_physmap(d, xatp); + rc = xenmem_add_to_physmap(d, &xatp); rcu_unlock_domain(d);