# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 96d606c074f8801180d3f158e33f9b89a76149f0
# Parent eb2a2529f96cbed28aae36ccad16dbd498060121
The current ptrace code is traversing the page table structures to
guest guest physical address, even when the guest paging is disabled.
The gdbserver-xen tries to access guest pdes & ptes to map memory of
hvm guest being debugged; and it gets a seg-fault because guest has not
setup it's paging yet. The attached patch adds guest paging state check,
so that the map_domain_va() can get the correct guest physical address
from guest va.
Signed-off-by: Nitin A Kamble <nitin.a.kamble@xxxxxxxxx>
diff -r eb2a2529f96c -r 96d606c074f8 tools/libxc/xc_ptrace.c
--- a/tools/libxc/xc_ptrace.c Tue Mar 21 10:28:03 2006
+++ b/tools/libxc/xc_ptrace.c Tue Mar 21 10:29:17 2006
@@ -251,35 +251,39 @@
if (fetch_regs(xc_handle, cpu, NULL))
return NULL;
- if ( ctxt[cpu].ctrlreg[3] != cr3_phys[cpu] )
- {
- cr3_phys[cpu] = ctxt[cpu].ctrlreg[3];
- if ( cr3_virt[cpu] )
- munmap(cr3_virt[cpu], PAGE_SIZE);
- cr3_virt[cpu] = xc_map_foreign_range(
- xc_handle, current_domid, PAGE_SIZE, PROT_READ,
- cr3_phys[cpu] >> PAGE_SHIFT);
- if ( cr3_virt[cpu] == NULL )
+ if (paging_enabled(&ctxt[cpu])) {
+ if ( ctxt[cpu].ctrlreg[3] != cr3_phys[cpu] )
+ {
+ cr3_phys[cpu] = ctxt[cpu].ctrlreg[3];
+ if ( cr3_virt[cpu] )
+ munmap(cr3_virt[cpu], PAGE_SIZE);
+ cr3_virt[cpu] = xc_map_foreign_range(
+ xc_handle, current_domid, PAGE_SIZE, PROT_READ,
+ cr3_phys[cpu] >> PAGE_SHIFT);
+ if ( cr3_virt[cpu] == NULL )
+ return NULL;
+ }
+ if ( (pde = cr3_virt[cpu][vtopdi(va)]) == 0 )
return NULL;
- }
- if ( (pde = cr3_virt[cpu][vtopdi(va)]) == 0 )
- return NULL;
- if ( (ctxt[cpu].flags & VGCF_HVM_GUEST) && paging_enabled(&ctxt[cpu]) )
- pde = page_array[pde >> PAGE_SHIFT] << PAGE_SHIFT;
- if ( pde != pde_phys[cpu] )
- {
- pde_phys[cpu] = pde;
- if ( pde_virt[cpu] )
- munmap(pde_virt[cpu], PAGE_SIZE);
- pde_virt[cpu] = xc_map_foreign_range(
- xc_handle, current_domid, PAGE_SIZE, PROT_READ,
- pde_phys[cpu] >> PAGE_SHIFT);
- if ( pde_virt[cpu] == NULL )
+ if ( (ctxt[cpu].flags & VGCF_HVM_GUEST) && paging_enabled(&ctxt[cpu]) )
+ pde = page_array[pde >> PAGE_SHIFT] << PAGE_SHIFT;
+ if ( pde != pde_phys[cpu] )
+ {
+ pde_phys[cpu] = pde;
+ if ( pde_virt[cpu] )
+ munmap(pde_virt[cpu], PAGE_SIZE);
+ pde_virt[cpu] = xc_map_foreign_range(
+ xc_handle, current_domid, PAGE_SIZE, PROT_READ,
+ pde_phys[cpu] >> PAGE_SHIFT);
+ if ( pde_virt[cpu] == NULL )
+ return NULL;
+ }
+ if ( (page = pde_virt[cpu][vtopti(va)]) == 0 )
return NULL;
- }
- if ( (page = pde_virt[cpu][vtopti(va)]) == 0 )
- return NULL;
- if ( (ctxt[cpu].flags & VGCF_HVM_GUEST) && paging_enabled(&ctxt[cpu]) )
+ } else {
+ page = va;
+ }
+ if (ctxt[cpu].flags & VGCF_HVM_GUEST)
page = page_array[page >> PAGE_SHIFT] << PAGE_SHIFT;
if ( (page != page_phys[cpu]) || (perm != prev_perm[cpu]) )
{
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|