|
|
|
|
|
|
|
|
|
|
xen-changelog
[Xen-changelog] [xen-unstable] [IA64] protect ridblock_owner.
# HG changeset patch
# User Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
# Date 1225171918 -32400
# Node ID ba163d6dc98618bdddfcb2a9507e1ae9b13fe754
# Parent ecfb1637cef9241f04d4f3ee6aac1e923537bef0
[IA64] protect ridblock_owner.
protect ridblock_owner by spin lock.
deallocate_rid() is called by arch_domain_destroy() which
is called as rcu callback.
On the other hand allocate_rid() is called from domctl hypercall.
So the access to ridblock_owner is racy.
Protect it by spin lock.
So far probably xend serializes creating domains, so it hasn't
been caused issues.
Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
---
xen/arch/ia64/xen/regionreg.c | 16 +++++++++++-----
1 files changed, 11 insertions(+), 5 deletions(-)
diff -r ecfb1637cef9 -r ba163d6dc986 xen/arch/ia64/xen/regionreg.c
--- a/xen/arch/ia64/xen/regionreg.c Tue Oct 28 12:20:27 2008 +0900
+++ b/xen/arch/ia64/xen/regionreg.c Tue Oct 28 14:31:58 2008 +0900
@@ -100,6 +100,7 @@ static unsigned long allocate_metaphysic
static int implemented_rid_bits = 0;
static int mp_rid_shift;
+static DEFINE_SPINLOCK(ridblock_lock);
static struct domain *ridblock_owner[MAX_RID_BLOCKS] = { 0 };
void __init init_rid_allocator (void)
@@ -169,6 +170,7 @@ int allocate_rid_range(struct domain *d,
n_rid_blocks = 1UL << (ridbits - IA64_MIN_IMPL_RID_BITS);
// skip over block 0, reserved for "meta-physical mappings (and Xen)"
+ spin_lock(&ridblock_lock);
for (i = n_rid_blocks; i < MAX_RID_BLOCKS; i += n_rid_blocks) {
if (ridblock_owner[i] == NULL) {
for (j = i; j < i + n_rid_blocks; ++j) {
@@ -182,16 +184,19 @@ int allocate_rid_range(struct domain *d,
break;
}
}
-
- if (i >= MAX_RID_BLOCKS)
+
+ if (i >= MAX_RID_BLOCKS) {
+ spin_unlock(&ridblock_lock);
return 0;
-
+ }
+
// found an unused block:
// (i << min_rid_bits) <= rid < ((i + n) << min_rid_bits)
// mark this block as owned
for (j = i; j < i + n_rid_blocks; ++j)
ridblock_owner[j] = d;
-
+ spin_unlock(&ridblock_lock);
+
// setup domain struct
d->arch.rid_bits = ridbits;
d->arch.starting_rid = i << IA64_MIN_IMPL_RID_BITS;
@@ -221,11 +226,12 @@ int deallocate_rid_range(struct domain *
if (d->arch.rid_bits == 0)
return 1;
-
+ spin_lock(&ridblock_lock);
for (i = rid_block_start; i < rid_block_end; ++i) {
ASSERT(ridblock_owner[i] == d);
ridblock_owner[i] = NULL;
}
+ spin_unlock(&ridblock_lock);
d->arch.rid_bits = 0;
d->arch.starting_rid = 0;
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|
<Prev in Thread] |
Current Thread |
[Next in Thread> |
- [Xen-changelog] [xen-unstable] [IA64] protect ridblock_owner.,
Xen patchbot-unstable <=
|
|
|
|
|