# HG changeset patch
# User Keir Fraser <keir@xxxxxxxxxxxxx>
# Date 1193933785 0
# Node ID 338f3c34e65605d9beb96176ef1a71c1262dbf14
# Parent adefbadab27ce0c83cd58fe4216c3d3521235366
x86: Fix various problems with debug-register handling.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
xen/arch/x86/acpi/suspend.c | 17 ++----
xen/arch/x86/domain.c | 42 ++++++++--------
xen/arch/x86/hvm/svm/svm.c | 70 ++++++++++++---------------
xen/arch/x86/hvm/svm/vmcb.c | 2
xen/arch/x86/hvm/vmx/vmx.c | 69 ++++++++++++--------------
xen/arch/x86/traps.c | 95 +++++++++++++++++++------------------
xen/include/asm-x86/hvm/svm/vmcb.h | 7 --
xen/include/asm-x86/processor.h | 9 +++
8 files changed, 156 insertions(+), 155 deletions(-)
diff -r adefbadab27c -r 338f3c34e656 xen/arch/x86/acpi/suspend.c
--- a/xen/arch/x86/acpi/suspend.c Thu Nov 01 10:56:56 2007 +0000
+++ b/xen/arch/x86/acpi/suspend.c Thu Nov 01 16:16:25 2007 +0000
@@ -29,9 +29,6 @@ void save_rest_processor_state(void)
#endif
}
-#define loaddebug(_v,_reg) \
- __asm__ __volatile__ ("mov %0,%%db" #_reg : : "r" ((_v)->debugreg[_reg]))
-
void restore_rest_processor_state(void)
{
int cpu = smp_processor_id();
@@ -54,15 +51,15 @@ void restore_rest_processor_state(void)
#endif
/* Maybe load the debug registers. */
+ BUG_ON(is_hvm_vcpu(v));
if ( !is_idle_vcpu(v) && unlikely(v->arch.guest_context.debugreg[7]) )
{
- loaddebug(&v->arch.guest_context, 0);
- loaddebug(&v->arch.guest_context, 1);
- loaddebug(&v->arch.guest_context, 2);
- loaddebug(&v->arch.guest_context, 3);
- /* no 4 and 5 */
- loaddebug(&v->arch.guest_context, 6);
- loaddebug(&v->arch.guest_context, 7);
+ write_debugreg(0, v->arch.guest_context.debugreg[0]);
+ write_debugreg(1, v->arch.guest_context.debugreg[1]);
+ write_debugreg(2, v->arch.guest_context.debugreg[2]);
+ write_debugreg(3, v->arch.guest_context.debugreg[3]);
+ write_debugreg(6, v->arch.guest_context.debugreg[6]);
+ write_debugreg(7, v->arch.guest_context.debugreg[7]);
}
/* Reload FPU state on next FPU use. */
diff -r adefbadab27c -r 338f3c34e656 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c Thu Nov 01 10:56:56 2007 +0000
+++ b/xen/arch/x86/domain.c Thu Nov 01 16:16:25 2007 +0000
@@ -687,13 +687,13 @@ int arch_set_info_guest(
v->arch.guest_context.ctrlreg[4] =
(cr4 == 0) ? mmu_cr4_features : pv_guest_cr4_fixup(cr4);
- if ( v->is_initialised )
- goto out;
-
memset(v->arch.guest_context.debugreg, 0,
sizeof(v->arch.guest_context.debugreg));
for ( i = 0; i < 8; i++ )
(void)set_debugreg(v, i, c(debugreg[i]));
+
+ if ( v->is_initialised )
+ goto out;
if ( v->vcpu_id == 0 )
d->vm_assist = c(vm_assist);
@@ -1210,6 +1210,15 @@ static void paravirt_ctxt_switch_from(st
static void paravirt_ctxt_switch_from(struct vcpu *v)
{
save_segments(v);
+
+ /*
+ * Disable debug breakpoints. We do this aggressively because if we switch
+ * to an HVM guest we may load DR0-DR3 with values that can cause #DE
+ * inside Xen, before we get a chance to reload DR7, and this cannot always
+ * safely be handled.
+ */
+ if ( unlikely(v->arch.guest_context.debugreg[7]) )
+ write_debugreg(7, 0);
}
static void paravirt_ctxt_switch_to(struct vcpu *v)
@@ -1219,10 +1228,17 @@ static void paravirt_ctxt_switch_to(stru
if ( unlikely(read_cr4() != v->arch.guest_context.ctrlreg[4]) )
write_cr4(v->arch.guest_context.ctrlreg[4]);
-}
-
-#define loaddebug(_v,_reg) \
- asm volatile ( "mov %0,%%db" #_reg : : "r" ((_v)->debugreg[_reg]) )
+
+ if ( unlikely(v->arch.guest_context.debugreg[7]) )
+ {
+ write_debugreg(0, v->arch.guest_context.debugreg[0]);
+ write_debugreg(1, v->arch.guest_context.debugreg[1]);
+ write_debugreg(2, v->arch.guest_context.debugreg[2]);
+ write_debugreg(3, v->arch.guest_context.debugreg[3]);
+ write_debugreg(6, v->arch.guest_context.debugreg[6]);
+ write_debugreg(7, v->arch.guest_context.debugreg[7]);
+ }
+}
static void __context_switch(void)
{
@@ -1248,18 +1264,6 @@ static void __context_switch(void)
memcpy(stack_regs,
&n->arch.guest_context.user_regs,
CTXT_SWITCH_STACK_BYTES);
-
- /* Maybe switch the debug registers. */
- if ( unlikely(n->arch.guest_context.debugreg[7]) )
- {
- loaddebug(&n->arch.guest_context, 0);
- loaddebug(&n->arch.guest_context, 1);
- loaddebug(&n->arch.guest_context, 2);
- loaddebug(&n->arch.guest_context, 3);
- /* no 4 and 5 */
- loaddebug(&n->arch.guest_context, 6);
- loaddebug(&n->arch.guest_context, 7);
- }
n->arch.ctxt_switch_to(n);
}
diff -r adefbadab27c -r 338f3c34e656 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c Thu Nov 01 10:56:56 2007 +0000
+++ b/xen/arch/x86/hvm/svm/svm.c Thu Nov 01 16:16:25 2007 +0000
@@ -137,12 +137,6 @@ static enum handler_return long_mode_do_
return HNDL_done;
}
-
-#define loaddebug(_v,_reg) \
- asm volatile ("mov %0,%%db" #_reg : : "r" ((_v)->debugreg[_reg]))
-#define savedebug(_v,_reg) \
- asm volatile ("mov %%db" #_reg ",%0" : : "r" ((_v)->debugreg[_reg]))
-
static void svm_save_dr(struct vcpu *v)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
@@ -152,26 +146,45 @@ static void svm_save_dr(struct vcpu *v)
/* Clear the DR dirty flag and re-enable intercepts for DR accesses. */
v->arch.hvm_vcpu.flag_dr_dirty = 0;
- v->arch.hvm_svm.vmcb->dr_intercepts = DR_INTERCEPT_ALL_WRITES;
-
- savedebug(&v->arch.guest_context, 0);
- savedebug(&v->arch.guest_context, 1);
- savedebug(&v->arch.guest_context, 2);
- savedebug(&v->arch.guest_context, 3);
+ v->arch.hvm_svm.vmcb->dr_intercepts = ~0u;
+
+ v->arch.guest_context.debugreg[0] = read_debugreg(0);
+ v->arch.guest_context.debugreg[1] = read_debugreg(1);
+ v->arch.guest_context.debugreg[2] = read_debugreg(2);
+ v->arch.guest_context.debugreg[3] = read_debugreg(3);
v->arch.guest_context.debugreg[6] = vmcb->dr6;
v->arch.guest_context.debugreg[7] = vmcb->dr7;
}
-
static void __restore_debug_registers(struct vcpu *v)
{
- loaddebug(&v->arch.guest_context, 0);
- loaddebug(&v->arch.guest_context, 1);
- loaddebug(&v->arch.guest_context, 2);
- loaddebug(&v->arch.guest_context, 3);
- /* DR6 and DR7 are loaded from the VMCB. */
-}
-
+ struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+
+ ASSERT(!v->arch.hvm_vcpu.flag_dr_dirty);
+ v->arch.hvm_vcpu.flag_dr_dirty = 1;
+ vmcb->dr_intercepts = 0;
+
+ write_debugreg(0, v->arch.guest_context.debugreg[0]);
+ write_debugreg(1, v->arch.guest_context.debugreg[1]);
+ write_debugreg(2, v->arch.guest_context.debugreg[2]);
+ write_debugreg(3, v->arch.guest_context.debugreg[3]);
+ vmcb->dr6 = v->arch.guest_context.debugreg[6];
+ vmcb->dr7 = v->arch.guest_context.debugreg[7];
+}
+
+/*
+ * DR7 is saved and restored on every vmexit. Other debug registers only
+ * need to be restored if their value is going to affect execution -- i.e.,
+ * if one of the breakpoints is enabled. So mask out all bits that don't
+ * enable some breakpoint functionality.
+ */
+#define DR7_ACTIVE_MASK 0xff
+
+static void svm_restore_dr(struct vcpu *v)
+{
+ if ( unlikely(v->arch.guest_context.debugreg[7] & DR7_ACTIVE_MASK) )
+ __restore_debug_registers(v);
+}
int svm_vmcb_save(struct vcpu *v, struct hvm_hw_cpu *c)
{
@@ -351,9 +364,6 @@ int svm_vmcb_restore(struct vcpu *v, str
vmcb->h_cr3 = pagetable_get_paddr(v->domain->arch.phys_table);
}
- vmcb->dr6 = c->dr6;
- vmcb->dr7 = c->dr7;
-
if ( c->pending_valid )
{
gdprintk(XENLOG_INFO, "Re-injecting 0x%"PRIx32", 0x%"PRIx32"\n",
@@ -419,12 +429,6 @@ static int svm_load_vmcb_ctxt(struct vcp
}
return 0;
-}
-
-static void svm_restore_dr(struct vcpu *v)
-{
- if ( unlikely(v->arch.guest_context.debugreg[7] & 0xFF) )
- __restore_debug_registers(v);
}
static enum hvm_intblk svm_interrupt_blocked(
@@ -1147,16 +1151,8 @@ static void set_reg(
static void svm_dr_access(struct vcpu *v, struct cpu_user_regs *regs)
{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-
HVMTRACE_0D(DR_WRITE, v);
-
- v->arch.hvm_vcpu.flag_dr_dirty = 1;
-
__restore_debug_registers(v);
-
- /* allow the guest full access to the debug registers */
- vmcb->dr_intercepts = 0;
}
diff -r adefbadab27c -r 338f3c34e656 xen/arch/x86/hvm/svm/vmcb.c
--- a/xen/arch/x86/hvm/svm/vmcb.c Thu Nov 01 10:56:56 2007 +0000
+++ b/xen/arch/x86/hvm/svm/vmcb.c Thu Nov 01 16:16:25 2007 +0000
@@ -130,7 +130,7 @@ static int construct_vmcb(struct vcpu *v
GENERAL2_INTERCEPT_SKINIT | GENERAL2_INTERCEPT_RDTSCP;
/* Intercept all debug-register writes. */
- vmcb->dr_intercepts = DR_INTERCEPT_ALL_WRITES;
+ vmcb->dr_intercepts = ~0u;
/* Intercept all control-register accesses except for CR2 and CR8. */
vmcb->cr_intercepts = ~(CR_INTERCEPT_CR2_READ |
diff -r adefbadab27c -r 338f3c34e656 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c Thu Nov 01 10:56:56 2007 +0000
+++ b/xen/arch/x86/hvm/vmx/vmx.c Thu Nov 01 16:16:25 2007 +0000
@@ -381,11 +381,6 @@ static enum handler_return long_mode_do_
#endif /* __i386__ */
-#define loaddebug(_v,_reg) \
- __asm__ __volatile__ ("mov %0,%%db" #_reg : : "r" ((_v)->debugreg[_reg]))
-#define savedebug(_v,_reg) \
- __asm__ __volatile__ ("mov %%db" #_reg ",%0" : : "r"
((_v)->debugreg[_reg]))
-
static int vmx_guest_x86_mode(struct vcpu *v)
{
unsigned int cs_ar_bytes;
@@ -411,23 +406,41 @@ static void vmx_save_dr(struct vcpu *v)
v->arch.hvm_vmx.exec_control |= CPU_BASED_MOV_DR_EXITING;
__vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control);
- savedebug(&v->arch.guest_context, 0);
- savedebug(&v->arch.guest_context, 1);
- savedebug(&v->arch.guest_context, 2);
- savedebug(&v->arch.guest_context, 3);
- savedebug(&v->arch.guest_context, 6);
+ v->arch.guest_context.debugreg[0] = read_debugreg(0);
+ v->arch.guest_context.debugreg[1] = read_debugreg(1);
+ v->arch.guest_context.debugreg[2] = read_debugreg(2);
+ v->arch.guest_context.debugreg[3] = read_debugreg(3);
+ v->arch.guest_context.debugreg[6] = read_debugreg(6);
+ /* DR7 must be saved as it is used by vmx_restore_dr(). */
v->arch.guest_context.debugreg[7] = __vmread(GUEST_DR7);
}
static void __restore_debug_registers(struct vcpu *v)
{
- loaddebug(&v->arch.guest_context, 0);
- loaddebug(&v->arch.guest_context, 1);
- loaddebug(&v->arch.guest_context, 2);
- loaddebug(&v->arch.guest_context, 3);
- /* No 4 and 5 */
- loaddebug(&v->arch.guest_context, 6);
+ ASSERT(!v->arch.hvm_vcpu.flag_dr_dirty);
+ v->arch.hvm_vcpu.flag_dr_dirty = 1;
+
+ write_debugreg(0, v->arch.guest_context.debugreg[0]);
+ write_debugreg(1, v->arch.guest_context.debugreg[1]);
+ write_debugreg(2, v->arch.guest_context.debugreg[2]);
+ write_debugreg(3, v->arch.guest_context.debugreg[3]);
+ write_debugreg(6, v->arch.guest_context.debugreg[6]);
/* DR7 is loaded from the VMCS. */
+}
+
+/*
+ * DR7 is saved and restored on every vmexit. Other debug registers only
+ * need to be restored if their value is going to affect execution -- i.e.,
+ * if one of the breakpoints is enabled. So mask out all bits that don't
+ * enable some breakpoint functionality.
+ */
+#define DR7_ACTIVE_MASK 0xff
+
+static void vmx_restore_dr(struct vcpu *v)
+{
+ /* NB. __vmread() is not usable here, so we cannot read from the VMCS. */
+ if ( unlikely(v->arch.guest_context.debugreg[7] & DR7_ACTIVE_MASK) )
+ __restore_debug_registers(v);
}
void vmx_vmcs_save(struct vcpu *v, struct hvm_hw_cpu *c)
@@ -701,21 +714,6 @@ static int vmx_load_vmcs_ctxt(struct vcp
}
return 0;
-}
-
-/*
- * DR7 is saved and restored on every vmexit. Other debug registers only
- * need to be restored if their value is going to affect execution -- i.e.,
- * if one of the breakpoints is enabled. So mask out all bits that don't
- * enable some breakpoint functionality.
- */
-#define DR7_ACTIVE_MASK 0xff
-
-static void vmx_restore_dr(struct vcpu *v)
-{
- /* NB. __vmread() is not usable here, so we cannot read from the VMCS. */
- if ( unlikely(v->arch.guest_context.debugreg[7] & DR7_ACTIVE_MASK) )
- __restore_debug_registers(v);
}
static void vmx_ctxt_switch_from(struct vcpu *v)
@@ -1322,15 +1320,12 @@ static void vmx_dr_access(unsigned long
HVMTRACE_0D(DR_WRITE, v);
- v->arch.hvm_vcpu.flag_dr_dirty = 1;
-
- /* We could probably be smarter about this */
- __restore_debug_registers(v);
+ if ( !v->arch.hvm_vcpu.flag_dr_dirty )
+ __restore_debug_registers(v);
/* Allow guest direct access to DR registers */
v->arch.hvm_vmx.exec_control &= ~CPU_BASED_MOV_DR_EXITING;
- __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
- v->arch.hvm_vmx.exec_control);
+ __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control);
}
/*
diff -r adefbadab27c -r 338f3c34e656 xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c Thu Nov 01 10:56:56 2007 +0000
+++ b/xen/arch/x86/traps.c Thu Nov 01 16:16:25 2007 +0000
@@ -2493,50 +2493,44 @@ asmlinkage int do_device_not_available(s
asmlinkage int do_debug(struct cpu_user_regs *regs)
{
- unsigned long condition;
struct vcpu *v = current;
- asm volatile ( "mov %%db6,%0" : "=r" (condition) );
-
- /* Mask out spurious debug traps due to lazy DR7 setting */
- if ( (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) &&
- (v->arch.guest_context.debugreg[7] == 0) )
- {
- asm volatile ( "mov %0,%%db7" : : "r" (0UL) );
+ DEBUGGER_trap_entry(TRAP_debug, regs);
+
+ if ( !guest_mode(regs) )
+ {
+ if ( regs->eflags & EF_TF )
+ {
+#ifdef __x86_64__
+ void sysenter_entry(void);
+ void sysenter_eflags_saved(void);
+ /* In SYSENTER entry path we can't zap TF until EFLAGS is saved. */
+ if ( (regs->rip >= (unsigned long)sysenter_entry) &&
+ (regs->rip < (unsigned long)sysenter_eflags_saved) )
+ goto out;
+ WARN_ON(regs->rip != (unsigned long)sysenter_eflags_saved);
+#else
+ WARN_ON(1);
+#endif
+ regs->eflags &= ~EF_TF;
+ }
+ else
+ {
+ /*
+ * We ignore watchpoints when they trigger within Xen. This may
+ * happen when a buffer is passed to us which previously had a
+ * watchpoint set on it. No need to bump EIP; the only faulting
+ * trap is an instruction breakpoint, which can't happen to us.
+ */
+ WARN_ON(!search_exception_table(regs->eip));
+ }
goto out;
}
- DEBUGGER_trap_entry(TRAP_debug, regs);
-
- if ( !guest_mode(regs) )
- {
-#ifdef __x86_64__
- void sysenter_entry(void);
- void sysenter_eflags_saved(void);
- /* In SYSENTER entry path we cannot zap TF until EFLAGS is saved. */
- if ( (regs->rip >= (unsigned long)sysenter_entry) &&
- (regs->rip < (unsigned long)sysenter_eflags_saved) )
- goto out;
- WARN_ON(regs->rip != (unsigned long)sysenter_eflags_saved);
-#else
- WARN_ON(1);
-#endif
- /* Clear TF just for absolute sanity. */
- regs->eflags &= ~EF_TF;
- /*
- * We ignore watchpoints when they trigger within Xen. This may happen
- * when a buffer is passed to us which previously had a watchpoint set
- * on it. No need to bump EIP; the only faulting trap is an instruction
- * breakpoint, which can't happen to us.
- */
- goto out;
- }
-
/* Save debug status register where guest OS can peek at it */
- v->arch.guest_context.debugreg[6] = condition;
+ v->arch.guest_context.debugreg[6] = read_debugreg(6);
ler_enable();
-
return do_guest_trap(TRAP_debug, regs, 0);
out:
@@ -2750,25 +2744,25 @@ long set_debugreg(struct vcpu *v, int re
if ( !access_ok(value, sizeof(long)) )
return -EPERM;
if ( v == curr )
- asm volatile ( "mov %0, %%db0" : : "r" (value) );
+ write_debugreg(0, value);
break;
case 1:
if ( !access_ok(value, sizeof(long)) )
return -EPERM;
if ( v == curr )
- asm volatile ( "mov %0, %%db1" : : "r" (value) );
+ write_debugreg(1, value);
break;
case 2:
if ( !access_ok(value, sizeof(long)) )
return -EPERM;
if ( v == curr )
- asm volatile ( "mov %0, %%db2" : : "r" (value) );
+ write_debugreg(2, value);
break;
case 3:
if ( !access_ok(value, sizeof(long)) )
return -EPERM;
if ( v == curr )
- asm volatile ( "mov %0, %%db3" : : "r" (value) );
+ write_debugreg(3, value);
break;
case 6:
/*
@@ -2778,7 +2772,7 @@ long set_debugreg(struct vcpu *v, int re
value &= 0xffffefff; /* reserved bits => 0 */
value |= 0xffff0ff0; /* reserved bits => 1 */
if ( v == curr )
- asm volatile ( "mov %0, %%db6" : : "r" (value) );
+ write_debugreg(6, value);
break;
case 7:
/*
@@ -2797,9 +2791,22 @@ long set_debugreg(struct vcpu *v, int re
if ( (value & (1<<13)) != 0 ) return -EPERM;
for ( i = 0; i < 16; i += 2 )
if ( ((value >> (i+16)) & 3) == 2 ) return -EPERM;
- }
- if ( v == current )
- asm volatile ( "mov %0, %%db7" : : "r" (value) );
+ /*
+ * If DR7 was previously clear then we need to load all other
+ * debug registers at this point as they were not restored during
+ * context switch.
+ */
+ if ( (v == curr) && (v->arch.guest_context.debugreg[7] == 0) )
+ {
+ write_debugreg(0, v->arch.guest_context.debugreg[0]);
+ write_debugreg(1, v->arch.guest_context.debugreg[1]);
+ write_debugreg(2, v->arch.guest_context.debugreg[2]);
+ write_debugreg(3, v->arch.guest_context.debugreg[3]);
+ write_debugreg(6, v->arch.guest_context.debugreg[6]);
+ }
+ }
+ if ( v == curr )
+ write_debugreg(7, value);
break;
default:
return -EINVAL;
diff -r adefbadab27c -r 338f3c34e656 xen/include/asm-x86/hvm/svm/vmcb.h
--- a/xen/include/asm-x86/hvm/svm/vmcb.h Thu Nov 01 10:56:56 2007 +0000
+++ b/xen/include/asm-x86/hvm/svm/vmcb.h Thu Nov 01 16:16:25 2007 +0000
@@ -150,13 +150,6 @@ enum DRInterceptBits
DR_INTERCEPT_DR14_WRITE = 1 << 30,
DR_INTERCEPT_DR15_WRITE = 1 << 31,
};
-
-/* for lazy save/restore we'd like to intercept all DR writes */
-#define DR_INTERCEPT_ALL_WRITES \
- (DR_INTERCEPT_DR0_WRITE|DR_INTERCEPT_DR1_WRITE|DR_INTERCEPT_DR2_WRITE \
- |DR_INTERCEPT_DR3_WRITE|DR_INTERCEPT_DR4_WRITE|DR_INTERCEPT_DR5_WRITE \
- |DR_INTERCEPT_DR6_WRITE|DR_INTERCEPT_DR7_WRITE)
-
enum VMEXIT_EXITCODE
{
diff -r adefbadab27c -r 338f3c34e656 xen/include/asm-x86/processor.h
--- a/xen/include/asm-x86/processor.h Thu Nov 01 10:56:56 2007 +0000
+++ b/xen/include/asm-x86/processor.h Thu Nov 01 16:16:25 2007 +0000
@@ -481,6 +481,15 @@ long set_gdt(struct vcpu *d,
unsigned long *frames,
unsigned int entries);
+#define write_debugreg(reg, val) do { \
+ unsigned long __val = val; \
+ asm volatile ( "mov %0,%%db" #reg : : "r" (__val) ); \
+} while (0)
+#define read_debugreg(reg) ({ \
+ unsigned long __val; \
+ asm volatile ( "mov %%db" #reg ",%0" : "=r" (__val) ); \
+ __val; \
+})
long set_debugreg(struct vcpu *p, int reg, unsigned long value);
struct microcode_header {
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|