Hi,
With this patch,
* XEN correctly emulates ld.s for HVM
* original memory attribute is preserved in vcpu->arch.vtlb
Without this, XEN infrequently calls panic_domain() by mistake for windows.
Thanks,
Kouya
Signed-off-by: Kouya Shimura <kouya@xxxxxxxxxxxxxx>
Signed-off-by: Akio Takebe <takebe_akio@xxxxxxxxxxxxxx>
diff -r ac28ee0ee098 xen/arch/ia64/vmx/vmx_process.c
--- a/xen/arch/ia64/vmx/vmx_process.c Wed May 16 11:38:48 2007 -0600
+++ b/xen/arch/ia64/vmx/vmx_process.c Wed May 23 10:17:24 2007 +0900
@@ -311,6 +311,8 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r
if(is_physical_mode(v)&&(!(vadr<<1>>62))){
if(vec==2){
+ if (misr.sp) /* Refer to SDM Vol2 Table 4-11,4-12 */
+ return vmx_handle_lds(regs);
if (v->domain != dom0
&& __gpfn_is_io(v->domain, (vadr << 1) >> (PAGE_SHIFT + 1))) {
emulate_io_inst(v,((vadr<<1)>>1),4); // UC
@@ -324,9 +326,16 @@ try_again:
try_again:
if((data=vtlb_lookup(v, vadr,type))!=0){
if (v->domain != dom0 && type == DSIDE_TLB) {
+ if (misr.sp) { /* Refer to SDM Vol2 Table 4-10,4-12 */
+ if ((data->ma == VA_MATTR_UC) || (data->ma == VA_MATTR_UCE))
+ return vmx_handle_lds(regs);
+ }
gppa = (vadr & ((1UL << data->ps) - 1)) +
(data->ppn >> (data->ps - 12) << data->ps);
if (__gpfn_is_io(v->domain, gppa >> PAGE_SHIFT)) {
+ if (misr.sp)
+ panic_domain(NULL, "ld.s on I/O page not with UC attr."
+ " pte=0x%lx\n", data->page_flags);
if (data->pl >= ((regs->cr_ipsr >> IA64_PSR_CPL0_BIT) & 3))
emulate_io_inst(v, gppa, data->ma);
else {
diff -r ac28ee0ee098 xen/arch/ia64/vmx/vtlb.c
--- a/xen/arch/ia64/vmx/vtlb.c Wed May 16 11:38:48 2007 -0600
+++ b/xen/arch/ia64/vmx/vtlb.c Tue May 22 21:42:39 2007 +0900
@@ -507,6 +507,13 @@ u64 translate_phy_pte(VCPU *v, u64 *pte,
*pte |= VTLB_PTE_IO;
return -1;
}
+ /* Ensure WB attribute if pte is related to a normal mem page,
+ * which is required by vga acceleration since qemu maps shared
+ * vram buffer with WB.
+ */
+ if (phy_pte.ma != VA_MATTR_NATPAGE)
+ phy_pte.ma = VA_MATTR_WB;
+
// rr.rrval = ia64_get_rr(va);
// ps = rr.ps;
maddr = ((maddr & _PAGE_PPN_MASK) & PAGE_MASK) | (paddr & ~PAGE_MASK);
@@ -530,17 +537,8 @@ int thash_purge_and_insert(VCPU *v, u64
vcpu_get_rr(current, ifa, &vrr.rrval);
mrr.rrval = ia64_get_rr(ifa);
if(VMX_DOMAIN(v)){
-
phy_pte = translate_phy_pte(v, &pte, itir, ifa);
- /* Ensure WB attribute if pte is related to a normal mem page,
- * which is required by vga acceleration since qemu maps shared
- * vram buffer with WB.
- */
- if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT)) {
- pte &= ~_PAGE_MA_MASK;
- phy_pte &= ~_PAGE_MA_MASK;
- }
if (pte & VTLB_PTE_IO)
ret = 1;
vtlb_purge(v, ifa, ps);
_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ia64-devel
|