# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Date 1179222403 -3600
# Node ID cb006eecd6f5b8fbc3baa128e68163799eccb31c
# Parent dc4324d3fbb0ca99734e289c95a5b73244ef7bf2
x86/hvm: HVM_DBG_LOG() cleanup
- make log levels used consistent in a few places
- remove trailing newlines, dots, and commas
- remove explictly specified function names from message text
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
---
xen/arch/x86/hvm/svm/emulate.c | 5 ++---
xen/arch/x86/hvm/svm/svm.c | 16 ++++++++--------
xen/arch/x86/hvm/vioapic.c | 23 ++++++++++-------------
xen/arch/x86/hvm/vlapic.c | 24 ++++++++++++------------
xen/arch/x86/hvm/vmx/vmx.c | 29 ++++++++++++++---------------
5 files changed, 46 insertions(+), 51 deletions(-)
diff -r dc4324d3fbb0 -r cb006eecd6f5 xen/arch/x86/hvm/svm/emulate.c
--- a/xen/arch/x86/hvm/svm/emulate.c Tue May 15 10:46:03 2007 +0100
+++ b/xen/arch/x86/hvm/svm/emulate.c Tue May 15 10:46:43 2007 +0100
@@ -145,9 +145,8 @@ unsigned long get_effective_addr_modrm64
struct vcpu *v = current;
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- HVM_DBG_LOG(DBG_LEVEL_1, "get_effective_addr_modrm64(): prefix = %x, "
- "length = %d, operand[0,1] = %x %x.\n", prefix, *size, operand [0],
- operand [1]);
+ HVM_DBG_LOG(DBG_LEVEL_1, "prefix = %x, length = %d, operand[0,1] = %x %x",
+ prefix, *size, operand[0], operand[1]);
if ((NULL == size) || (NULL == operand) || (1 > *size))
{
diff -r dc4324d3fbb0 -r cb006eecd6f5 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c Tue May 15 10:46:03 2007 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c Tue May 15 10:46:43 2007 +0100
@@ -128,7 +128,7 @@ static inline int long_mode_do_msr_write
struct vcpu *v = current;
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- HVM_DBG_LOG(DBG_LEVEL_1, "msr %x msr_content %"PRIx64"\n",
+ HVM_DBG_LOG(DBG_LEVEL_0, "msr %x msr_content %"PRIx64,
ecx, msr_content);
switch ( ecx )
@@ -387,7 +387,7 @@ int svm_vmcb_restore(struct vcpu *v, str
* If different, make a shadow. Check if the PDBR is valid
* first.
*/
- HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 c->cr3 = %"PRIx64"", c->cr3);
+ HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 c->cr3 = %"PRIx64, c->cr3);
mfn = gmfn_to_mfn(v->domain, c->cr3 >> PAGE_SHIFT);
if( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
goto bad_cr3;
@@ -1590,7 +1590,7 @@ static int svm_set_cr0(unsigned long val
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
unsigned long old_base_mfn;
- HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx\n", value);
+ HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx", value);
/* ET is reserved and should be always be 1. */
value |= X86_CR0_ET;
@@ -1615,11 +1615,11 @@ static int svm_set_cr0(unsigned long val
{
if ( !svm_cr4_pae_is_set(v) )
{
- HVM_DBG_LOG(DBG_LEVEL_1, "Enable paging before PAE enable\n");
+ HVM_DBG_LOG(DBG_LEVEL_1, "Enable paging before PAE enable");
svm_inject_exception(v, TRAP_gp_fault, 1, 0);
return 0;
}
- HVM_DBG_LOG(DBG_LEVEL_1, "Enable the Long mode\n");
+ HVM_DBG_LOG(DBG_LEVEL_1, "Enable the Long mode");
v->arch.hvm_svm.cpu_shadow_efer |= EFER_LMA;
vmcb->efer |= EFER_LMA | EFER_LME;
}
@@ -1712,7 +1712,7 @@ static void mov_from_cr(int cr, int gp,
set_reg(gp, value, regs, vmcb);
- HVM_DBG_LOG(DBG_LEVEL_VMMU, "mov_from_cr: CR%d, value = %lx,", cr, value);
+ HVM_DBG_LOG(DBG_LEVEL_VMMU, "mov_from_cr: CR%d, value = %lx", cr, value);
}
@@ -1730,8 +1730,8 @@ static int mov_to_cr(int gpreg, int cr,
HVMTRACE_2D(CR_WRITE, v, cr, value);
- HVM_DBG_LOG(DBG_LEVEL_1, "mov_to_cr: CR%d, value = %lx,", cr, value);
- HVM_DBG_LOG(DBG_LEVEL_1, "current = %lx,", (unsigned long) current);
+ HVM_DBG_LOG(DBG_LEVEL_1, "mov_to_cr: CR%d, value = %lx, current = %p",
+ cr, value, v);
switch ( cr )
{
diff -r dc4324d3fbb0 -r cb006eecd6f5 xen/arch/x86/hvm/vioapic.c
--- a/xen/arch/x86/hvm/vioapic.c Tue May 15 10:46:03 2007 +0100
+++ b/xen/arch/x86/hvm/vioapic.c Tue May 15 10:46:43 2007 +0100
@@ -99,7 +99,7 @@ static unsigned long vioapic_read(struct
struct hvm_hw_vioapic *vioapic = domain_vioapic(v->domain);
uint32_t result;
- HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "vioapic_read addr %lx\n", addr);
+ HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "addr %lx", addr);
addr &= 0xff;
@@ -183,8 +183,7 @@ static void vioapic_write_indirect(
{
uint32_t redir_index = (vioapic->ioregsel - 0x10) >> 1;
- HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "vioapic_write_indirect "
- "change redir index %x val %lx\n",
+ HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "change redir index %x val %lx",
redir_index, val);
if ( redir_index >= VIOAPIC_NUM_PINS )
@@ -252,8 +251,7 @@ static void ioapic_inj_irq(
uint8_t trig_mode,
uint8_t delivery_mode)
{
- HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "ioapic_inj_irq "
- "irq %d trig %d delive mode %d\n",
+ HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "irq %d trig %d deliv %d",
vector, trig_mode, delivery_mode);
switch ( delivery_mode )
@@ -275,8 +273,8 @@ static uint32_t ioapic_get_delivery_bitm
uint32_t mask = 0;
struct vcpu *v;
- HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "ioapic_get_delivery_bitmask "
- "dest %d dest_mode %d\n", dest, dest_mode);
+ HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "dest %d dest_mode %d",
+ dest, dest_mode);
if ( dest_mode == 0 ) /* Physical mode. */
{
@@ -304,7 +302,7 @@ static uint32_t ioapic_get_delivery_bitm
}
out:
- HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "ioapic_get_delivery_bitmask mask %x\n",
+ HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "mask %x",
mask);
return mask;
}
@@ -331,14 +329,13 @@ static void vioapic_deliver(struct hvm_h
HVM_DBG_LOG(DBG_LEVEL_IOAPIC,
"dest=%x dest_mode=%x delivery_mode=%x "
- "vector=%x trig_mode=%x\n",
+ "vector=%x trig_mode=%x",
dest, dest_mode, delivery_mode, vector, trig_mode);
deliver_bitmask = ioapic_get_delivery_bitmask(vioapic, dest, dest_mode);
if ( !deliver_bitmask )
{
- HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "ioapic deliver "
- "no target on destination\n");
+ HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "no target on destination");
return;
}
@@ -364,7 +361,7 @@ static void vioapic_deliver(struct hvm_h
else
{
HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "null round robin: "
- "mask=%x vector=%x delivery_mode=%x\n",
+ "mask=%x vector=%x delivery_mode=%x",
deliver_bitmask, vector, dest_LowestPrio);
}
break;
@@ -412,7 +409,7 @@ void vioapic_irq_positive_edge(struct do
struct hvm_hw_vioapic *vioapic = domain_vioapic(d);
union vioapic_redir_entry *ent;
- HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "ioapic_irq_positive_edge irq %x", irq);
+ HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "irq %x", irq);
ASSERT(irq < VIOAPIC_NUM_PINS);
ASSERT(spin_is_locked(&d->arch.hvm_domain.irq_lock));
diff -r dc4324d3fbb0 -r cb006eecd6f5 xen/arch/x86/hvm/vlapic.c
--- a/xen/arch/x86/hvm/vlapic.c Tue May 15 10:46:03 2007 +0100
+++ b/xen/arch/x86/hvm/vlapic.c Tue May 15 10:46:43 2007 +0100
@@ -171,7 +171,7 @@ uint32_t vlapic_get_ppr(struct vlapic *v
ppr = isrv & 0xf0;
HVM_DBG_LOG(DBG_LEVEL_VLAPIC_INTERRUPT,
- "vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x.",
+ "vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x",
vlapic, ppr, isr, isrv);
return ppr;
@@ -211,7 +211,7 @@ static int vlapic_match_dest(struct vcpu
struct vlapic *target = vcpu_vlapic(v);
HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "target %p, source %p, dest 0x%x, "
- "dest_mode 0x%x, short_hand 0x%x\n",
+ "dest_mode 0x%x, short_hand 0x%x",
target, source, dest, dest_mode, short_hand);
switch ( short_hand )
@@ -270,14 +270,14 @@ static int vlapic_accept_irq(struct vcpu
if ( vlapic_test_and_set_irr(vector, vlapic) && trig_mode )
{
HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
- "level trig mode repeatedly for vector %d\n", vector);
+ "level trig mode repeatedly for vector %d", vector);
break;
}
if ( trig_mode )
{
HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
- "level trig mode for vector %d\n", vector);
+ "level trig mode for vector %d", vector);
vlapic_set_vector(vector, &vlapic->regs->data[APIC_TMR]);
}
@@ -399,7 +399,7 @@ static void vlapic_ipi(struct vlapic *vl
HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "icr_high 0x%x, icr_low 0x%x, "
"short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, "
- "dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x.",
+ "dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x",
icr_high, icr_low, short_hand, dest,
trig_mode, level, dest_mode, delivery_mode, vector);
@@ -437,7 +437,7 @@ static uint32_t vlapic_get_tmcct(struct
HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER,
"timer initial count %d, timer current count %d, "
- "offset %"PRId64".",
+ "offset %"PRId64,
tmict, tmcct, counter_passed);
return tmcct;
@@ -454,7 +454,7 @@ static void vlapic_set_tdcr(struct vlapi
vlapic->hw.timer_divisor = 1 << (val & 7);
HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER,
- "vlapic_set_tdcr timer_divisor: %d.",
vlapic->hw.timer_divisor);
+ "timer_divisor: %d", vlapic->hw.timer_divisor);
}
static void vlapic_read_aligned(struct vlapic *vlapic, unsigned int offset,
@@ -493,7 +493,7 @@ static unsigned long vlapic_read(struct
/* some bugs on kernel cause read this with byte*/
if ( len != 4 )
HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
- "read with len=0x%lx, should be 4 instead.\n",
+ "read with len=0x%lx, should be 4 instead",
len);
alignment = offset & 0x3;
@@ -522,7 +522,7 @@ static unsigned long vlapic_read(struct
}
HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "offset 0x%x with length 0x%lx, "
- "and the result is 0x%lx.", offset, len, result);
+ "and the result is 0x%lx", offset, len, result);
return result;
@@ -539,7 +539,7 @@ static void vlapic_write(struct vcpu *v,
if ( offset != 0xb0 )
HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
- "offset 0x%x with length 0x%lx, and value is 0x%lx.",
+ "offset 0x%x with length 0x%lx, and value is 0x%lx",
offset, len, val);
/*
@@ -713,7 +713,7 @@ void vlapic_msr_set(struct vlapic *vlapi
vlapic->hw.apic_base_msr = value;
HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
- "apic base msr is 0x%016"PRIx64".", vlapic->hw.apic_base_msr);
+ "apic base msr is 0x%016"PRIx64, vlapic->hw.apic_base_msr);
}
int vlapic_accept_pic_intr(struct vcpu *v)
@@ -913,7 +913,7 @@ int vlapic_init(struct vcpu *v)
{
struct vlapic *vlapic = vcpu_vlapic(v);
- HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "vlapic_init %d", v->vcpu_id);
+ HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "%d", v->vcpu_id);
vlapic->regs_page = alloc_domheap_page(NULL);
if ( vlapic->regs_page == NULL )
diff -r dc4324d3fbb0 -r cb006eecd6f5 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c Tue May 15 10:46:03 2007 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c Tue May 15 10:46:43 2007 +0100
@@ -110,10 +110,11 @@ static inline int long_mode_do_msr_read(
static inline int long_mode_do_msr_read(struct cpu_user_regs *regs)
{
u64 msr_content = 0;
+ u32 ecx = regs->ecx;
struct vcpu *v = current;
struct vmx_msr_state *guest_msr_state = &v->arch.hvm_vmx.msr_state;
- switch ( (u32)regs->ecx ) {
+ switch ( ecx ) {
case MSR_EFER:
msr_content = v->arch.hvm_vmx.efer;
break;
@@ -156,7 +157,7 @@ static inline int long_mode_do_msr_read(
return 0;
}
- HVM_DBG_LOG(DBG_LEVEL_2, "msr_content: 0x%"PRIx64, msr_content);
+ HVM_DBG_LOG(DBG_LEVEL_0, "msr 0x%x content 0x%"PRIx64, ecx, msr_content);
regs->eax = (u32)(msr_content >> 0);
regs->edx = (u32)(msr_content >> 32);
@@ -172,8 +173,7 @@ static inline int long_mode_do_msr_write
struct vmx_msr_state *guest_msr_state = &v->arch.hvm_vmx.msr_state;
struct vmx_msr_state *host_msr_state = &this_cpu(host_msr_state);
- HVM_DBG_LOG(DBG_LEVEL_1, "msr 0x%x msr_content 0x%"PRIx64"\n",
- ecx, msr_content);
+ HVM_DBG_LOG(DBG_LEVEL_0, "msr 0x%x content 0x%"PRIx64, ecx, msr_content);
switch ( ecx )
{
@@ -261,7 +261,7 @@ static inline int long_mode_do_msr_write
return 1;
uncanonical_address:
- HVM_DBG_LOG(DBG_LEVEL_1, "Not cano address of msr write %x\n", ecx);
+ HVM_DBG_LOG(DBG_LEVEL_0, "Not cano address of msr write %x", ecx);
gp_fault:
vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
return 0;
@@ -576,7 +576,7 @@ int vmx_vmcs_restore(struct vcpu *v, str
* If different, make a shadow. Check if the PDBR is valid
* first.
*/
- HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 c->cr3 = %"PRIx64"", c->cr3);
+ HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 c->cr3 = %"PRIx64, c->cr3);
/* current!=vcpu as not called by arch_vmx_do_launch */
mfn = gmfn_to_mfn(v->domain, c->cr3 >> PAGE_SHIFT);
if( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain)) {
@@ -2023,7 +2023,7 @@ static int vmx_set_cr0(unsigned long val
unsigned long old_cr0;
unsigned long old_base_mfn;
- HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx\n", value);
+ HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx", value);
/* ET is reserved and should be always be 1. */
value |= X86_CR0_ET;
@@ -2072,12 +2072,12 @@ static int vmx_set_cr0(unsigned long val
if ( !(v->arch.hvm_vmx.cpu_shadow_cr4 & X86_CR4_PAE) )
{
HVM_DBG_LOG(DBG_LEVEL_1, "Guest enabled paging "
- "with EFER.LME set but not CR4.PAE\n");
+ "with EFER.LME set but not CR4.PAE");
vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
}
else
{
- HVM_DBG_LOG(DBG_LEVEL_1, "Enabling long mode\n");
+ HVM_DBG_LOG(DBG_LEVEL_1, "Enabling long mode");
v->arch.hvm_vmx.efer |= EFER_LMA;
vm_entry_value = __vmread(VM_ENTRY_CONTROLS);
vm_entry_value |= VM_ENTRY_IA32E_MODE;
@@ -2138,7 +2138,7 @@ static int vmx_set_cr0(unsigned long val
{
eip = __vmread(GUEST_RIP);
HVM_DBG_LOG(DBG_LEVEL_1,
- "Transfering control to vmxassist %%eip 0x%lx\n", eip);
+ "Transfering control to vmxassist %%eip 0x%lx", eip);
return 0; /* do not update eip! */
}
}
@@ -2146,12 +2146,12 @@ static int vmx_set_cr0(unsigned long val
{
eip = __vmread(GUEST_RIP);
HVM_DBG_LOG(DBG_LEVEL_1,
- "Enabling CR0.PE at %%eip 0x%lx\n", eip);
+ "Enabling CR0.PE at %%eip 0x%lx", eip);
if ( vmx_assist(v, VMX_ASSIST_RESTORE) )
{
eip = __vmread(GUEST_RIP);
HVM_DBG_LOG(DBG_LEVEL_1,
- "Restoring to %%eip 0x%lx\n", eip);
+ "Restoring to %%eip 0x%lx", eip);
return 0; /* do not update eip! */
}
}
@@ -2309,7 +2309,7 @@ static int mov_to_cr(int gp, int cr, str
if ( unlikely(vmx_long_mode_enabled(v)) )
{
HVM_DBG_LOG(DBG_LEVEL_1, "Guest cleared CR4.PAE while "
- "EFER.LMA is set\n");
+ "EFER.LMA is set");
vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
}
}
@@ -2439,8 +2439,7 @@ static inline int vmx_do_msr_read(struct
u32 ecx = regs->ecx, eax, edx;
struct vcpu *v = current;
- HVM_DBG_LOG(DBG_LEVEL_1, "ecx=%x, eax=%x, edx=%x",
- ecx, (u32)regs->eax, (u32)regs->edx);
+ HVM_DBG_LOG(DBG_LEVEL_1, "ecx=%x", ecx);
switch (ecx) {
case MSR_IA32_TIME_STAMP_COUNTER:
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|