|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 1/2] x86/HVM: fix preemption handling in do_hvm_op() (try 2)
Just like previously done for some mem-op hypercalls, undo preemption
using the interface structures (altering it in ways the caller may not
expect) and replace it by storing the continuation point in the high
bits of sub-operation argument.
This also changes the "nr" fields of struct xen_hvm_track_dirty_vram
(operation already limited to 1Gb worth of pages) and struct
xen_hvm_modified_memory to be only 32 bits wide, consistent with those
of struct xen_set_mem{type,access}. If that's not acceptable for some
reason, we'd need to shrink the HVMOP_op_bits (while still enforcing
the [then higher] limit resulting from the need to be able to encode
the continuation).
Whether (and if so how) to adjust xc_hvm_track_dirty_vram(),
xc_hvm_modified_memory(), xc_hvm_set_mem_type(), and
xc_hvm_set_mem_access() to reflect the 32-bit restriction on "nr" is
unclear: If the APIs need to remain stable, all four functions should
probably check that there was no truncation. Preferably their
parameters would be changed to uint32_t or unsigned int, though.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
v2: Avoid double increment of pfn in HVMOP_modified_memory and
HVMOP_set_mem_type handling. Change preemption logic to also work
for 32-bit callers. Drop unrelated cleanup.
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4047,13 +4047,16 @@ static int hvm_replace_event_channel(str
return 0;
}
+#define HVMOP_op_mask 0xff
+
long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
{
struct domain *curr_d = current->domain;
+ unsigned long start_iter = op & ~HVMOP_op_mask;
long rc = 0;
- switch ( op )
+ switch ( op &= HVMOP_op_mask )
{
case HVMOP_set_param:
case HVMOP_get_param:
@@ -4383,7 +4386,8 @@ long do_hvm_op(unsigned long op, XEN_GUE
goto param_fail3;
rc = -EINVAL;
- if ( ((a.first_pfn + a.nr - 1) < a.first_pfn) ||
+ if ( a.nr < start_iter ||
+ ((a.first_pfn + a.nr - 1) < a.first_pfn) ||
((a.first_pfn + a.nr - 1) > domain_get_maximum_gpfn(d)) )
goto param_fail3;
@@ -4391,9 +4395,9 @@ long do_hvm_op(unsigned long op, XEN_GUE
if ( !paging_mode_log_dirty(d) )
goto param_fail3;
- while ( a.nr > 0 )
+ while ( a.nr > start_iter )
{
- unsigned long pfn = a.first_pfn;
+ unsigned long pfn = a.first_pfn + start_iter;
struct page_info *page;
page = get_page_from_gfn(d, pfn, NULL, P2M_UNSHARE);
@@ -4406,16 +4410,11 @@ long do_hvm_op(unsigned long op, XEN_GUE
put_page(page);
}
- a.first_pfn++;
- a.nr--;
-
/* Check for continuation if it's not the last interation */
- if ( a.nr > 0 && hypercall_preempt_check() )
+ if ( a.nr > ++start_iter && !(start_iter & HVMOP_op_mask) &&
+ hypercall_preempt_check() )
{
- if ( __copy_to_guest(arg, &a, 1) )
- rc = -EFAULT;
- else
- rc = -EAGAIN;
+ rc = -EAGAIN;
break;
}
}
@@ -4497,16 +4496,17 @@ long do_hvm_op(unsigned long op, XEN_GUE
goto param_fail4;
rc = -EINVAL;
- if ( ((a.first_pfn + a.nr - 1) < a.first_pfn) ||
+ if ( a.nr < start_iter ||
+ ((a.first_pfn + a.nr - 1) < a.first_pfn) ||
((a.first_pfn + a.nr - 1) > domain_get_maximum_gpfn(d)) )
goto param_fail4;
if ( a.hvmmem_type >= ARRAY_SIZE(memtype) )
goto param_fail4;
- while ( a.nr )
+ while ( a.nr > start_iter )
{
- unsigned long pfn = a.first_pfn;
+ unsigned long pfn = a.first_pfn + start_iter;
p2m_type_t t;
p2m_type_t nt;
mfn_t mfn;
@@ -4547,16 +4547,11 @@ long do_hvm_op(unsigned long op, XEN_GUE
}
put_gfn(d, pfn);
- a.first_pfn++;
- a.nr--;
-
/* Check for continuation if it's not the last interation */
- if ( a.nr > 0 && hypercall_preempt_check() )
+ if ( a.nr > ++start_iter && !(start_iter & HVMOP_op_mask) &&
+ hypercall_preempt_check() )
{
- if ( __copy_to_guest(arg, &a, 1) )
- rc = -EFAULT;
- else
- rc = -EAGAIN;
+ rc = -EAGAIN;
goto param_fail4;
}
}
@@ -4590,19 +4585,17 @@ long do_hvm_op(unsigned long op, XEN_GUE
rc = -EINVAL;
if ( (a.first_pfn != ~0ull) &&
- (((a.first_pfn + a.nr - 1) < a.first_pfn) ||
+ (a.nr < start_iter ||
+ ((a.first_pfn + a.nr - 1) < a.first_pfn) ||
((a.first_pfn + a.nr - 1) > domain_get_maximum_gpfn(d))) )
goto param_fail5;
- rc = p2m_set_mem_access(d, a.first_pfn, a.nr, a.hvmmem_access);
+ rc = p2m_set_mem_access(d, a.first_pfn, a.nr, start_iter,
+ HVMOP_op_mask, a.hvmmem_access);
if ( rc > 0 )
{
- a.first_pfn += a.nr - rc;
- a.nr = rc;
- if ( __copy_to_guest(arg, &a, 1) )
- rc = -EFAULT;
- else
- rc = -EAGAIN;
+ start_iter = rc;
+ rc = -EAGAIN;
}
param_fail5:
@@ -4751,8 +4744,11 @@ long do_hvm_op(unsigned long op, XEN_GUE
}
if ( rc == -EAGAIN )
- rc = hypercall_create_continuation(
- __HYPERVISOR_hvm_op, "lh", op, arg);
+ {
+ ASSERT(!(start_iter & HVMOP_op_mask));
+ rc = hypercall_create_continuation(__HYPERVISOR_hvm_op, "lh",
+ op | start_iter, arg);
+ }
return rc;
}
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1334,13 +1334,13 @@ void p2m_mem_access_resume(struct domain
/* Set access type for a region of pfns.
* If start_pfn == -1ul, sets the default access type */
long p2m_set_mem_access(struct domain *d, unsigned long pfn, uint32_t nr,
- hvmmem_access_t access)
+ uint32_t start, uint32_t mask, hvmmem_access_t access)
{
struct p2m_domain *p2m = p2m_get_hostp2m(d);
p2m_access_t a, _a;
p2m_type_t t;
mfn_t mfn;
- long rc;
+ long rc = 0;
/* N.B. _not_ static: initializer depends on p2m->default_access */
p2m_access_t memaccess[] = {
@@ -1369,11 +1369,8 @@ long p2m_set_mem_access(struct domain *d
return 0;
}
- if ( !nr )
- return 0;
-
p2m_lock(p2m);
- for ( ; ; ++pfn )
+ for ( pfn += start; nr > start; ++pfn )
{
mfn = p2m->get_entry(p2m, pfn, &t, &_a, 0, NULL);
if ( p2m->set_entry(p2m, pfn, mfn, PAGE_ORDER_4K, t, a) == 0 )
@@ -1383,9 +1380,9 @@ long p2m_set_mem_access(struct domain *d
}
/* Check for continuation if it's not the last interation. */
- if ( !--nr || hypercall_preempt_check() )
+ if ( nr > ++start && !(start & mask) && hypercall_preempt_check() )
{
- rc = nr;
+ rc = start;
break;
}
}
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -576,8 +576,8 @@ void p2m_mem_access_resume(struct domain
/* Set access type for a region of pfns.
* If start_pfn == -1ul, sets the default access type */
-long p2m_set_mem_access(struct domain *d, unsigned long start_pfn,
- uint32_t nr, hvmmem_access_t access);
+long p2m_set_mem_access(struct domain *d, unsigned long start_pfn, uint32_t nr,
+ uint32_t start, uint32_t mask, hvmmem_access_t access);
/* Get access type for a pfn
* If pfn == -1ul, gets the default access type */
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -90,10 +90,10 @@ typedef enum {
struct xen_hvm_track_dirty_vram {
/* Domain to be tracked. */
domid_t domid;
+ /* Number of pages to track. */
+ uint32_t nr;
/* First pfn to track. */
uint64_aligned_t first_pfn;
- /* Number of pages to track. */
- uint64_aligned_t nr;
/* OUT variable. */
/* Dirty bitmap buffer. */
XEN_GUEST_HANDLE_64(uint8) dirty_bitmap;
@@ -106,10 +106,10 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_track_di
struct xen_hvm_modified_memory {
/* Domain to be updated. */
domid_t domid;
+ /* Number of pages. */
+ uint32_t nr;
/* First pfn. */
uint64_aligned_t first_pfn;
- /* Number of pages. */
- uint64_aligned_t nr;
};
typedef struct xen_hvm_modified_memory xen_hvm_modified_memory_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_modified_memory_t);
Attachment:
x86-HVM-op-preemption.patch _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |