|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v2] memory: overlapping XENMAPSPACE_gmfn_range requests
Overlapping requests may need processing backwards, or else the intended
effect wouldn't be achieved (and instead some pages would be moved more
than once).
With some adjustment there this also covers XEN_DMOP_relocate_memory,
where the potential issue was first noticed.
Fixes: a04811a315e0 ("mm: New XENMEM space, XENMAPSPACE_gmfn_range")
Reported-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
Of course an alternative would be to simply reject overlapping requests.
Then we should reject all overlaps though, I think. But since the code
change didn't end up overly intrusive, I thought I would go the "fix it"
route first.
---
v2: Adjust XEN_DMOP_relocate_memory handling for the working-backwards
case.
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -569,12 +569,16 @@ int dm_op(const struct dmop_args *op_arg
rc = xenmem_add_to_physmap(d, &xatp, 0);
if ( rc == 0 && data->size != xatp.size )
+ {
rc = xatp.size;
+ xatp.idx += rc;
+ xatp.gpfn += rc;
+ }
if ( rc > 0 )
{
data->size -= rc;
- data->src_gfn += rc;
- data->dst_gfn += rc;
+ data->src_gfn = xatp.idx;
+ data->dst_gfn = xatp.gpfn;
const_op = false;
rc = -ERESTART;
}
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -849,7 +849,7 @@ int xenmem_add_to_physmap(struct domain
unsigned int start)
{
unsigned int done = 0;
- long rc = 0;
+ long rc = 0, adjust = 1;
union add_to_physmap_extra extra = {};
struct page_info *pages[16];
@@ -884,8 +884,25 @@ int xenmem_add_to_physmap(struct domain
return -EOVERFLOW;
}
- xatp->idx += start;
- xatp->gpfn += start;
+ /*
+ * Overlapping ranges need processing backwards when destination is above
+ * source.
+ */
+ if ( xatp->gpfn > xatp->idx &&
+ unlikely(xatp->gpfn < xatp->idx + xatp->size) )
+ {
+ adjust = -1;
+
+ /* Both fields store "next item to process". */
+ xatp->idx += xatp->size - start - 1;
+ xatp->gpfn += xatp->size - start - 1;
+ }
+ else
+ {
+ xatp->idx += start;
+ xatp->gpfn += start;
+ }
+
xatp->size -= start;
#ifdef CONFIG_HAS_PASSTHROUGH
@@ -903,8 +920,8 @@ int xenmem_add_to_physmap(struct domain
if ( rc < 0 )
break;
- xatp->idx++;
- xatp->gpfn++;
+ xatp->idx += adjust;
+ xatp->gpfn += adjust;
if ( extra.ppage )
++extra.ppage;
@@ -927,7 +944,10 @@ int xenmem_add_to_physmap(struct domain
this_cpu(iommu_dont_flush_iotlb) = 0;
- ret = iommu_iotlb_flush(d, _dfn(xatp->idx - done), done,
+ if ( likely(adjust > 0) )
+ adjust = done;
+
+ ret = iommu_iotlb_flush(d, _dfn(xatp->idx - adjust), done,
IOMMU_FLUSHF_modified);
if ( unlikely(ret) && rc >= 0 )
rc = ret;
@@ -941,13 +961,26 @@ int xenmem_add_to_physmap(struct domain
for ( i = 0; i < done; ++i )
put_page(pages[i]);
- ret = iommu_iotlb_flush(d, _dfn(xatp->gpfn - done), done,
+ ret = iommu_iotlb_flush(d, _dfn(xatp->gpfn - adjust), done,
IOMMU_FLUSHF_added | IOMMU_FLUSHF_modified);
if ( unlikely(ret) && rc >= 0 )
rc = ret;
}
#endif
+ /*
+ * For internal callers (e.g. XEN_DMOP_relocate_memory handling) leave
+ * the GFNs from where to resume in *xatp (they're correct already when
+ * we worked forwards). These are the values not biased for a possible
+ * non-zero "start" that a subsequent invocation might use, so also do
+ * this updating only when incoming "start" was 0.
+ */
+ if ( rc > 0 && !start && unlikely(adjust < 0) )
+ {
+ xatp->idx -= xatp->size - rc - 1;
+ xatp->gpfn -= xatp->size - rc - 1;
+ }
+
return rc;
}
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |