# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 94b10faa7577ed73cea698a3cc723e9e75404172
# Parent 64f8906399927210b80580335e303ed7ddc437f3
Lower-casify some macros, and rename kernel_mode() to guest_kernel_mode().
Fix the macro so that it evaluates false if the given register context is
not a valid guest context.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
diff -r 64f890639992 -r 94b10faa7577 xen/arch/x86/dom0_ops.c
--- a/xen/arch/x86/dom0_ops.c Wed Mar 8 10:54:48 2006
+++ b/xen/arch/x86/dom0_ops.c Wed Mar 8 14:02:43 2006
@@ -458,7 +458,7 @@
{
memcpy(c, &v->arch.guest_context, sizeof(*c));
- if ( HVM_DOMAIN(v) )
+ if ( hvm_guest(v) )
{
hvm_store_cpu_guest_regs(v, &c->user_regs);
hvm_store_cpu_guest_ctrl_regs(v, c->ctrlreg);
@@ -473,9 +473,9 @@
c->flags = 0;
if ( test_bit(_VCPUF_fpu_initialised, &v->vcpu_flags) )
c->flags |= VGCF_I387_VALID;
- if ( KERNEL_MODE(v, &v->arch.guest_context.user_regs) )
+ if ( guest_kernel_mode(v, &v->arch.guest_context.user_regs) )
c->flags |= VGCF_IN_KERNEL;
- if ( HVM_DOMAIN(v) )
+ if ( hvm_guest(v) )
c->flags |= VGCF_HVM_GUEST;
c->ctrlreg[3] = pagetable_get_paddr(v->arch.guest_table);
diff -r 64f890639992 -r 94b10faa7577 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c Wed Mar 8 10:54:48 2006
+++ b/xen/arch/x86/domain.c Wed Mar 8 14:02:43 2006
@@ -719,7 +719,7 @@
stack_regs,
CTXT_SWITCH_STACK_BYTES);
unlazy_fpu(p);
- if ( !HVM_DOMAIN(p) )
+ if ( !hvm_guest(p) )
{
save_segments(p);
}
@@ -748,7 +748,7 @@
loaddebug(&n->arch.guest_context, 7);
}
- if ( !HVM_DOMAIN(n) )
+ if ( !hvm_guest(n) )
{
set_int80_direct_trap(n);
switch_kernel_stack(n, cpu);
@@ -812,7 +812,7 @@
/* Re-enable interrupts before restoring state which may fault. */
local_irq_enable();
- if ( !HVM_DOMAIN(next) )
+ if ( !hvm_guest(next) )
{
load_LDT(next);
load_segments(next);
@@ -1030,7 +1030,7 @@
v->arch.guest_table_user = mk_pagetable(0);
}
- if ( HVM_DOMAIN(v) )
+ if ( hvm_guest(v) )
hvm_relinquish_guest_resources(v);
}
diff -r 64f890639992 -r 94b10faa7577 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c Wed Mar 8 10:54:48 2006
+++ b/xen/arch/x86/hvm/hvm.c Wed Mar 8 14:02:43 2006
@@ -186,7 +186,7 @@
{
struct hvm_domain *platform;
- if ( !HVM_DOMAIN(current) || (current->vcpu_id != 0) )
+ if ( !hvm_guest(current) || (current->vcpu_id != 0) )
return;
shadow_direct_map_init(d);
@@ -324,7 +324,7 @@
int rc = 0;
/* current must be HVM domain BSP */
- if ( !(HVM_DOMAIN(bsp) && bsp->vcpu_id == 0) ) {
+ if ( !(hvm_guest(bsp) && bsp->vcpu_id == 0) ) {
printk("Not calling hvm_bringup_ap from BSP context.\n");
domain_crash_synchronous();
}
diff -r 64f890639992 -r 94b10faa7577 xen/arch/x86/hvm/vioapic.c
--- a/xen/arch/x86/hvm/vioapic.c Wed Mar 8 10:54:48 2006
+++ b/xen/arch/x86/hvm/vioapic.c Wed Mar 8 14:02:43 2006
@@ -52,7 +52,7 @@
s->flags &= ~IOAPIC_ENABLE_FLAG;
}
-#ifdef HVM_DOMAIN_SAVE_RESTORE
+#ifdef hvm_guest_SAVE_RESTORE
void ioapic_save(QEMUFile* f, void* opaque)
{
printk("no implementation for ioapic_save\n");
diff -r 64f890639992 -r 94b10faa7577 xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c Wed Mar 8 10:54:48 2006
+++ b/xen/arch/x86/hvm/vmx/vmcs.c Wed Mar 8 14:02:43 2006
@@ -79,7 +79,7 @@
{
struct vcpu *v = (struct vcpu *)info;
- ASSERT(HVM_DOMAIN(v));
+ ASSERT(hvm_guest(v));
if (v->arch.hvm_vmx.launch_cpu == smp_processor_id())
__vmpclear(virt_to_maddr(v->arch.hvm_vmx.vmcs));
@@ -87,7 +87,7 @@
void vmx_request_clear_vmcs(struct vcpu *v)
{
- ASSERT(HVM_DOMAIN(v));
+ ASSERT(hvm_guest(v));
if (v->arch.hvm_vmx.launch_cpu == smp_processor_id())
__vmpclear(virt_to_maddr(v->arch.hvm_vmx.vmcs));
diff -r 64f890639992 -r 94b10faa7577 xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c Wed Mar 8 10:54:48 2006
+++ b/xen/arch/x86/traps.c Wed Mar 8 14:02:43 2006
@@ -132,10 +132,10 @@
int i;
unsigned long *stack, addr;
- if ( HVM_DOMAIN(current) )
+ if ( hvm_guest(current) )
return;
- if ( VM86_MODE(regs) )
+ if ( vm86_mode(regs) )
{
stack = (unsigned long *)((regs->ss << 4) + (regs->esp & 0xffff));
printk("Guest stack trace from ss:sp = %04x:%04x (VM86)\n ",
@@ -254,7 +254,7 @@
unsigned long *stack = ESP_BEFORE_EXCEPTION(regs), addr;
int i;
- if ( GUEST_MODE(regs) )
+ if ( guest_mode(regs) )
return show_guest_stack(regs);
printk("Xen stack trace from "__OP"sp=%p:\n ", stack);
@@ -333,7 +333,7 @@
DEBUGGER_trap_entry(trapnr, regs);
- if ( !GUEST_MODE(regs) )
+ if ( !guest_mode(regs) )
goto xen_fault;
ti = ¤t->arch.guest_context.trap_ctxt[trapnr];
@@ -399,7 +399,7 @@
DEBUGGER_trap_entry(TRAP_int3, regs);
- if ( !GUEST_MODE(regs) )
+ if ( !guest_mode(regs) )
{
DEBUGGER_trap_fatal(TRAP_int3, regs);
show_registers(regs);
@@ -433,7 +433,7 @@
/* Re-set error_code.user flag appropriately for the guest. */
error_code &= ~4;
- if ( !KERNEL_MODE(v, guest_cpu_user_regs()) )
+ if ( !guest_kernel_mode(v, guest_cpu_user_regs()) )
error_code |= 4;
ti = &v->arch.guest_context.trap_ctxt[TRAP_page_fault];
@@ -474,7 +474,7 @@
if ( unlikely(ret == 0) )
{
/* In hypervisor mode? Leave it to the #PF handler to fix up. */
- if ( !GUEST_MODE(regs) )
+ if ( !guest_mode(regs) )
return 0;
/* In guest mode? Propagate #PF to guest, with adjusted %cr2. */
propagate_page_fault(
@@ -506,7 +506,7 @@
if ( unlikely(IN_HYPERVISOR_RANGE(addr)) )
{
- if ( shadow_mode_external(d) && GUEST_MODE(regs) )
+ if ( shadow_mode_external(d) && guest_mode(regs) )
return shadow_fault(addr, regs);
if ( (addr >= GDT_LDT_VIRT_START) && (addr < GDT_LDT_VIRT_END) )
return handle_gdt_ldt_mapping_fault(
@@ -528,7 +528,7 @@
return EXCRET_fault_fixed;
}
- if ( KERNEL_MODE(v, regs) &&
+ if ( guest_kernel_mode(v, regs) &&
/* Protection violation on write? No reserved-bit violation? */
((regs->error_code & 0xb) == 0x3) &&
ptwr_do_page_fault(d, addr, regs) )
@@ -564,7 +564,7 @@
if ( unlikely((rc = fixup_page_fault(addr, regs)) != 0) )
return rc;
- if ( unlikely(!GUEST_MODE(regs)) )
+ if ( unlikely(!guest_mode(regs)) )
{
if ( likely((fixup = search_exception_table(regs->eip)) != 0) )
{
@@ -620,7 +620,7 @@
#define TOGGLE_MODE() ((void)0)
#endif
- if ( v->arch.iopl >= (KERNEL_MODE(v, regs) ? 1 : 3) )
+ if ( v->arch.iopl >= (guest_kernel_mode(v, regs) ? 1 : 3) )
return 1;
if ( v->arch.iobmp_limit > (port + bytes) )
@@ -849,7 +849,7 @@
case 0xfa: /* CLI */
case 0xfb: /* STI */
- if ( v->arch.iopl < (KERNEL_MODE(v, regs) ? 1 : 3) )
+ if ( v->arch.iopl < (guest_kernel_mode(v, regs) ? 1 : 3) )
goto fail;
/*
* This is just too dangerous to allow, in my opinion. Consider if the
@@ -868,7 +868,7 @@
}
/* Remaining instructions only emulated from guest kernel. */
- if ( !KERNEL_MODE(v, regs) )
+ if ( !guest_kernel_mode(v, regs) )
goto fail;
/* Privileged (ring 0) instructions. */
@@ -1070,7 +1070,7 @@
if ( regs->error_code & 1 )
goto hardware_gp;
- if ( !GUEST_MODE(regs) )
+ if ( !guest_mode(regs) )
goto gp_in_kernel;
/*
@@ -1097,7 +1097,7 @@
{
/* This fault must be due to <INT n> instruction. */
ti = ¤t->arch.guest_context.trap_ctxt[regs->error_code>>3];
- if ( PERMIT_SOFTINT(TI_GET_DPL(ti), v, regs) )
+ if ( permit_softint(TI_GET_DPL(ti), v, regs) )
{
tb->flags = TBF_EXCEPTION;
regs->eip += 2;
@@ -1305,7 +1305,7 @@
DEBUGGER_trap_entry(TRAP_debug, regs);
- if ( !GUEST_MODE(regs) )
+ if ( !guest_mode(regs) )
{
/* Clear TF just for absolute sanity. */
regs->eflags &= ~EF_TF;
diff -r 64f890639992 -r 94b10faa7577 xen/arch/x86/x86_32/domain_page.c
--- a/xen/arch/x86/x86_32/domain_page.c Wed Mar 8 10:54:48 2006
+++ b/xen/arch/x86/x86_32/domain_page.c Wed Mar 8 14:02:43 2006
@@ -28,7 +28,7 @@
* then it means we are running on the idle domain's page table and must
* therefore use its mapcache.
*/
- if ( unlikely(!pagetable_get_pfn(v->arch.guest_table)) && !HVM_DOMAIN(v) )
+ if ( unlikely(!pagetable_get_pfn(v->arch.guest_table)) && !hvm_guest(v) )
{
/* If we really are idling, perform lazy context switch now. */
if ( (v = idle_vcpu[smp_processor_id()]) == current )
diff -r 64f890639992 -r 94b10faa7577 xen/arch/x86/x86_32/seg_fixup.c
--- a/xen/arch/x86/x86_32/seg_fixup.c Wed Mar 8 10:54:48 2006
+++ b/xen/arch/x86/x86_32/seg_fixup.c Wed Mar 8 14:02:43 2006
@@ -280,7 +280,7 @@
int gs_override = 0;
/* WARNING: We only work for ring-3 segments. */
- if ( unlikely(VM86_MODE(regs)) || unlikely(!RING_3(regs)) )
+ if ( unlikely(vm86_mode(regs)) || unlikely(!ring_3(regs)) )
{
DPRINTK("Taken fault at bad CS %04x\n", regs->cs);
goto fail;
diff -r 64f890639992 -r 94b10faa7577 xen/arch/x86/x86_32/traps.c
--- a/xen/arch/x86/x86_32/traps.c Wed Mar 8 10:54:48 2006
+++ b/xen/arch/x86/x86_32/traps.c Wed Mar 8 14:02:43 2006
@@ -24,7 +24,7 @@
char taint_str[TAINT_STRING_MAX_LEN];
const char *context;
- if ( HVM_DOMAIN(current) && GUEST_MODE(regs) )
+ if ( hvm_guest(current) && guest_mode(regs) )
{
context = "hvm";
hvm_store_cpu_guest_regs(current, &fault_regs);
@@ -32,9 +32,9 @@
}
else
{
- context = GUEST_MODE(regs) ? "guest" : "hypervisor";
-
- if ( !GUEST_MODE(regs) )
+ context = guest_mode(regs) ? "guest" : "hypervisor";
+
+ if ( !guest_mode(regs) )
{
fault_regs.esp = (unsigned long)®s->esp;
fault_regs.ss = read_segment_register(ss);
@@ -53,7 +53,7 @@
print_tainted(taint_str));
printk("CPU: %d\nEIP: %04x:[<%08x>]",
smp_processor_id(), fault_regs.cs, fault_regs.eip);
- if ( !GUEST_MODE(regs) )
+ if ( !guest_mode(regs) )
print_symbol(" %s", fault_regs.eip);
printk("\nEFLAGS: %08x CONTEXT: %s\n", fault_regs.eflags, context);
printk("eax: %08x ebx: %08x ecx: %08x edx: %08x\n",
@@ -172,17 +172,17 @@
regs->esp += 4;
regs->eflags = (eflags & ~X86_EFLAGS_IOPL) | X86_EFLAGS_IF;
- if ( VM86_MODE(regs) )
+ if ( vm86_mode(regs) )
{
/* Return to VM86 mode: pop and restore ESP,SS,ES,DS,FS and GS. */
if ( __copy_from_user(®s->esp, (void __user *)regs->esp, 24) )
domain_crash_synchronous();
}
- else if ( unlikely(RING_0(regs)) )
- {
- domain_crash_synchronous();
- }
- else if ( !RING_1(regs) )
+ else if ( unlikely(ring_0(regs)) )
+ {
+ domain_crash_synchronous();
+ }
+ else if ( !ring_1(regs) )
{
/* Return to ring 2/3: pop and restore ESP and SS. */
if ( __copy_from_user(®s->esp, (void __user *)regs->esp, 8) )
diff -r 64f890639992 -r 94b10faa7577 xen/arch/x86/x86_64/traps.c
--- a/xen/arch/x86/x86_64/traps.c Wed Mar 8 10:54:48 2006
+++ b/xen/arch/x86/x86_64/traps.c Wed Mar 8 14:02:43 2006
@@ -24,7 +24,7 @@
char taint_str[TAINT_STRING_MAX_LEN];
const char *context;
- if ( HVM_DOMAIN(current) && GUEST_MODE(regs) )
+ if ( hvm_guest(current) && guest_mode(regs) )
{
context = "hvm";
hvm_store_cpu_guest_regs(current, &fault_regs);
@@ -32,7 +32,7 @@
}
else
{
- context = GUEST_MODE(regs) ? "guest" : "hypervisor";
+ context = guest_mode(regs) ? "guest" : "hypervisor";
fault_crs[0] = read_cr0();
fault_crs[3] = read_cr3();
fault_regs.ds = read_segment_register(ds);
@@ -46,7 +46,7 @@
print_tainted(taint_str));
printk("CPU: %d\nRIP: %04x:[<%016lx>]",
smp_processor_id(), fault_regs.cs, fault_regs.rip);
- if ( !GUEST_MODE(regs) )
+ if ( !guest_mode(regs) )
print_symbol(" %s", fault_regs.rip);
printk("\nRFLAGS: %016lx CONTEXT: %s\n", fault_regs.rflags, context);
printk("rax: %016lx rbx: %016lx rcx: %016lx\n",
diff -r 64f890639992 -r 94b10faa7577 xen/include/asm-x86/debugger.h
--- a/xen/include/asm-x86/debugger.h Wed Mar 8 10:54:48 2006
+++ b/xen/include/asm-x86/debugger.h Wed Mar 8 14:02:43 2006
@@ -88,7 +88,7 @@
{
struct vcpu *v = current;
- if ( KERNEL_MODE(v, regs) &&
+ if ( guest_kernel_mode(v, regs) &&
test_bit(_DOMF_debugging, &v->domain->domain_flags) &&
((vector == TRAP_int3) || (vector == TRAP_debug)) )
{
diff -r 64f890639992 -r 94b10faa7577 xen/include/asm-x86/hvm/domain.h
--- a/xen/include/asm-x86/hvm/domain.h Wed Mar 8 10:54:48 2006
+++ b/xen/include/asm-x86/hvm/domain.h Wed Mar 8 14:02:43 2006
@@ -19,8 +19,8 @@
*
*/
-#ifndef __ASM_X86_HVM_DOMAIN_H__
-#define __ASM_X86_HVM_DOMAIN_H__
+#ifndef __ASM_X86_hvm_guest_H__
+#define __ASM_X86_hvm_guest_H__
#include <asm/e820.h>
#include <asm/hvm/vpic.h>
@@ -49,5 +49,5 @@
char pbuf[HVM_PBUF_SIZE];
};
-#endif /* __ASM_X86_HVM_DOMAIN_H__ */
+#endif /* __ASM_X86_hvm_guest_H__ */
diff -r 64f890639992 -r 94b10faa7577 xen/include/asm-x86/hvm/support.h
--- a/xen/include/asm-x86/hvm/support.h Wed Mar 8 10:54:48 2006
+++ b/xen/include/asm-x86/hvm/support.h Wed Mar 8 14:02:43 2006
@@ -32,7 +32,7 @@
#define HVM_DEBUG 0
#endif
-#define HVM_DOMAIN(v) ((v)->arch.guest_context.flags & VGCF_HVM_GUEST)
+#define hvm_guest(v) ((v)->arch.guest_context.flags & VGCF_HVM_GUEST)
static inline shared_iopage_t *get_sp(struct domain *d)
{
diff -r 64f890639992 -r 94b10faa7577 xen/include/asm-x86/hvm/vioapic.h
--- a/xen/include/asm-x86/hvm/vioapic.h Wed Mar 8 10:54:48 2006
+++ b/xen/include/asm-x86/hvm/vioapic.h Wed Mar 8 14:02:43 2006
@@ -116,7 +116,7 @@
void ioapic_update_EOI(struct domain *d, int vector);
-#ifdef HVM_DOMAIN_SAVE_RESTORE
+#ifdef hvm_guest_SAVE_RESTORE
void ioapic_save(QEMUFile* f, void* opaque);
int ioapic_load(QEMUFile* f, void* opaque, int version_id);
#endif
diff -r 64f890639992 -r 94b10faa7577 xen/include/asm-x86/regs.h
--- a/xen/include/asm-x86/regs.h Wed Mar 8 10:54:48 2006
+++ b/xen/include/asm-x86/regs.h Wed Mar 8 14:02:43 2006
@@ -31,17 +31,17 @@
EF_ID = 0x00200000, /* id */
};
-#define GUEST_MODE(r) \
+#define guest_mode(r) \
({ \
unsigned long diff = (char *)guest_cpu_user_regs() - (char *)(r); \
/* Frame pointer must point into current CPU stack. */ \
ASSERT(diff < STACK_SIZE); \
/* If a guest frame, it must be have guest privs (unless HVM guest). */ \
/* We permit CS==0 which can come from an uninitialised trap entry. */ \
- ASSERT((diff != 0) || VM86_MODE(r) || ((r->cs&3) >= GUEST_KERNEL_RPL) || \
- (r->cs == 0) || HVM_DOMAIN(current)); \
+ ASSERT((diff != 0) || vm86_mode(r) || ((r->cs&3) >= GUEST_KERNEL_RPL) || \
+ (r->cs == 0) || hvm_guest(current)); \
/* If not a guest frame, it must be a hypervisor frame. */ \
- ASSERT((diff == 0) || (!VM86_MODE(r) && (r->cs == __HYPERVISOR_CS))); \
+ ASSERT((diff == 0) || (!vm86_mode(r) && (r->cs == __HYPERVISOR_CS))); \
/* Return TRUE if it's a guest frame. */ \
(diff == 0); \
})
diff -r 64f890639992 -r 94b10faa7577 xen/include/asm-x86/shadow.h
--- a/xen/include/asm-x86/shadow.h Wed Mar 8 10:54:48 2006
+++ b/xen/include/asm-x86/shadow.h Wed Mar 8 14:02:43 2006
@@ -1646,7 +1646,7 @@
|| (va >= HYPERVISOR_VIRT_END)
#endif
) &&
- KERNEL_MODE(v, regs) )
+ guest_kernel_mode(v, regs) )
return 1;
return 0;
@@ -1700,7 +1700,7 @@
struct domain *d = v->domain;
int paging_enabled;
- if ( HVM_DOMAIN(v) )
+ if ( hvm_guest(v) )
paging_enabled = hvm_paging_enabled(v);
else
// HACK ALERT: there's currently no easy way to figure out if a domU
diff -r 64f890639992 -r 94b10faa7577 xen/include/asm-x86/x86_32/regs.h
--- a/xen/include/asm-x86/x86_32/regs.h Wed Mar 8 10:54:48 2006
+++ b/xen/include/asm-x86/x86_32/regs.h Wed Mar 8 14:02:43 2006
@@ -4,16 +4,17 @@
#include <xen/types.h>
#include <public/xen.h>
-#define VM86_MODE(_r) ((_r)->eflags & EF_VM)
-#define RING_0(_r) (((_r)->cs & 3) == 0)
-#define RING_1(_r) (((_r)->cs & 3) == 1)
-#define RING_2(_r) (((_r)->cs & 3) == 2)
-#define RING_3(_r) (((_r)->cs & 3) == 3)
+#define vm86_mode(r) ((r)->eflags & EF_VM)
+#define ring_0(r) (((r)->cs & 3) == 0)
+#define ring_1(r) (((r)->cs & 3) == 1)
+#define ring_2(r) (((r)->cs & 3) == 2)
+#define ring_3(r) (((r)->cs & 3) == 3)
-#define KERNEL_MODE(_e, _r) (!VM86_MODE(_r) && RING_1(_r))
+#define guest_kernel_mode(v, r) \
+ (!vm86_mode(r) && ring_1(r))
-#define PERMIT_SOFTINT(_dpl, _e, _r) \
- ((_dpl) >= (VM86_MODE(_r) ? 3 : ((_r)->cs & 3)))
+#define permit_softint(dpl, v, r) \
+ ((dpl) >= (vm86_mode(r) ? 3 : ((r)->cs & 3)))
/* Number of bytes of on-stack execution state to be context-switched. */
#define CTXT_SWITCH_STACK_BYTES (sizeof(struct cpu_user_regs))
diff -r 64f890639992 -r 94b10faa7577 xen/include/asm-x86/x86_64/regs.h
--- a/xen/include/asm-x86/x86_64/regs.h Wed Mar 8 10:54:48 2006
+++ b/xen/include/asm-x86/x86_64/regs.h Wed Mar 8 14:02:43 2006
@@ -4,16 +4,17 @@
#include <xen/types.h>
#include <public/xen.h>
-#define VM86_MODE(_r) (0) /* No VM86 support in long mode. */
-#define RING_0(_r) (((_r)->cs & 3) == 0)
-#define RING_1(_r) (((_r)->cs & 3) == 1)
-#define RING_2(_r) (((_r)->cs & 3) == 2)
-#define RING_3(_r) (((_r)->cs & 3) == 3)
+#define vm86_mode(r) (0) /* No VM86 support in long mode. */
+#define ring_0(r) (((r)->cs & 3) == 0)
+#define ring_1(r) (((r)->cs & 3) == 1)
+#define ring_2(r) (((r)->cs & 3) == 2)
+#define ring_3(r) (((r)->cs & 3) == 3)
-#define KERNEL_MODE(_e, _r) ((_e)->arch.flags & TF_kernel_mode)
+#define guest_kernel_mode(v, r) \
+ (ring_3(r) && ((v)->arch.flags & TF_kernel_mode))
-#define PERMIT_SOFTINT(_dpl, _e, _r) \
- ((_dpl) >= (KERNEL_MODE(_e, _r) ? 1 : 3))
+#define permit_softint(dpl, v, r) \
+ ((dpl) >= (guest_kernel_mode(v, r) ? 1 : 3))
/* Number of bytes of on-stack execution state to be context-switched. */
/* NB. Segment registers and bases are not saved/restored on x86/64 stack. */
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|