WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] Transform double mapping to single mapping on vti domain

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] Transform double mapping to single mapping on vti domain.
From: Xen patchbot -unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Thu, 15 Sep 2005 07:48:26 +0000
Delivery-date: Thu, 15 Sep 2005 07:55:02 +0000
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User djm@xxxxxxxxxxxxxxx
# Node ID 10b1d30d3f66beac5a8275d108461da558c38d1d
# Parent  0c1f966af47e0c4718b3d2a0376a591dfa2ef7f4
Transform double mapping to single mapping on vti domain.
After this change I think it is possible to merge ivt.S file.
Signed-off-by Anthony Xu <anthony.xu@xxxxxxxxx>

diff -r 0c1f966af47e -r 10b1d30d3f66 xen/arch/ia64/linux-xen/efi.c
--- a/xen/arch/ia64/linux-xen/efi.c     Thu Sep  8 13:24:08 2005
+++ b/xen/arch/ia64/linux-xen/efi.c     Thu Sep  8 15:18:40 2005
@@ -523,11 +523,21 @@
        return NULL;
 }
 
+
+#ifdef XEN
+void *pal_vaddr;
+#endif
+
 void
 efi_map_pal_code (void)
 {
+#ifdef XEN
+       u64 psr;
+       pal_vaddr = efi_get_pal_addr ();
+#else
        void *pal_vaddr = efi_get_pal_addr ();
        u64 psr;
+#endif
 
        if (!pal_vaddr)
                return;
diff -r 0c1f966af47e -r 10b1d30d3f66 xen/arch/ia64/linux-xen/unaligned.c
--- a/xen/arch/ia64/linux-xen/unaligned.c       Thu Sep  8 13:24:08 2005
+++ b/xen/arch/ia64/linux-xen/unaligned.c       Thu Sep  8 15:18:40 2005
@@ -296,7 +296,7 @@
 }
 
 #if defined(XEN) && defined(CONFIG_VTI)
-static void
+void
 set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, 
unsigned long nat)
 {
        struct switch_stack *sw = (struct switch_stack *) regs - 1;
@@ -359,6 +359,57 @@
     }
     ia64_set_rsc(old_rsc);
 }
+
+
+static void
+get_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long *val, 
unsigned long *nat)
+{
+    struct switch_stack *sw = (struct switch_stack *) regs - 1;
+    unsigned long *bsp, *addr, *rnat_addr, *ubs_end, *bspstore;
+    unsigned long *kbs = (void *) current + IA64_RBS_OFFSET;
+    unsigned long rnats, nat_mask;
+    unsigned long on_kbs;
+    unsigned long old_rsc, new_rsc;
+    long sof = (regs->cr_ifs) & 0x7f;
+    long sor = 8 * ((regs->cr_ifs >> 14) & 0xf);
+    long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
+    long ridx = r1 - 32;
+
+    if (ridx >= sof) {
+        /* read of out-of-frame register returns an undefined value; 0 in our 
case.  */
+        DPRINT("ignoring read from r%lu; only %lu registers are allocated!\n", 
r1, sof);
+        panic("wrong stack register number");
+    }
+
+    if (ridx < sor)
+        ridx = rotate_reg(sor, rrb_gr, ridx);
+
+    old_rsc=ia64_get_rsc();
+    new_rsc=old_rsc&(~(0x3));
+    ia64_set_rsc(new_rsc);
+
+    bspstore = ia64_get_bspstore();
+    bsp =kbs + (regs->loadrs >> 19); //16+3;
+
+    addr = ia64_rse_skip_regs(bsp, -sof + ridx);
+    nat_mask = 1UL << ia64_rse_slot_num(addr);
+    rnat_addr = ia64_rse_rnat_addr(addr);
+
+    if(addr >= bspstore){
+
+        ia64_flushrs ();
+        ia64_mf ();
+        bspstore = ia64_get_bspstore();
+    }
+    *val=*addr;
+    if(bspstore < rnat_addr){
+        *nat=!!(ia64_get_rnat()&nat_mask);
+    }else{
+        *nat = !!((*rnat_addr)&nat_mask);
+    }
+    ia64_set_rsc(old_rsc);
+}
+
 #else // CONFIG_VTI
 static void
 set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, int 
nat)
diff -r 0c1f966af47e -r 10b1d30d3f66 xen/arch/ia64/vmx/mm.c
--- a/xen/arch/ia64/vmx/mm.c    Thu Sep  8 13:24:08 2005
+++ b/xen/arch/ia64/vmx/mm.c    Thu Sep  8 15:18:40 2005
@@ -125,7 +125,7 @@
             entry.cl = DSIDE_TLB;
             rr = vmx_vcpu_rr(vcpu, req.ptr);
             entry.ps = rr.ps;
-            entry.key = redistribute_rid(rr.rid);
+            entry.key = rr.rid;
             entry.rid = rr.rid;
             entry.vadr = PAGEALIGN(req.ptr,entry.ps);
             sections.tr = 1;
diff -r 0c1f966af47e -r 10b1d30d3f66 xen/arch/ia64/vmx/vlsapic.c
--- a/xen/arch/ia64/vmx/vlsapic.c       Thu Sep  8 13:24:08 2005
+++ b/xen/arch/ia64/vmx/vlsapic.c       Thu Sep  8 15:18:40 2005
@@ -181,7 +181,7 @@
  */
 /* Interrupt must be disabled at this point */
 
-extern u64 tick_to_ns(u64 tick);
+extern u64 cycle_to_ns(u64 cyle);
 #define TIMER_SLOP (50*1000) /* ns */  /* copy from ac_timer.c */
 void vtm_interruption_update(VCPU *vcpu, vtime_t* vtm)
 {
@@ -212,7 +212,7 @@
     }
     /* Both last_itc & cur_itc < itm, wait for fire condition */
     else {
-        expires = NOW() + tick_to_ns(0-diff_now) + TIMER_SLOP;
+        expires = NOW() + cycle_to_ns(0-diff_now) + TIMER_SLOP;
         set_ac_timer(&vtm->vtm_timer, expires);
     }
     local_irq_restore(spsr);
diff -r 0c1f966af47e -r 10b1d30d3f66 xen/arch/ia64/vmx/vmmu.c
--- a/xen/arch/ia64/vmx/vmmu.c  Thu Sep  8 13:24:08 2005
+++ b/xen/arch/ia64/vmx/vmmu.c  Thu Sep  8 15:18:40 2005
@@ -91,6 +91,10 @@
 
 void recycle_message(thash_cb_t *hcb, u64 para)
 {
+    if(hcb->ht == THASH_VHPT)
+    {
+        printk("ERROR : vhpt recycle happenning!!!\n");
+    }
     printk("hcb=%p recycled with %lx\n",hcb,para);
 }
 
@@ -237,8 +241,12 @@
  */
 void machine_tlb_insert(struct vcpu *d, thash_data_t *tlb)
 {
-    u64     saved_itir, saved_ifa, saved_rr;
+#if 0
+    u64     saved_itir, saved_ifa;
+#endif
+    u64      saved_rr;
     u64     pages;
+    u64     psr;
     thash_data_t    mtlb;
     ia64_rr vrr;
     unsigned int    cl = tlb->cl;
@@ -253,12 +261,12 @@
     if (mtlb.ppn == INVALID_MFN)
     panic("Machine tlb insert with invalid mfn number.\n");
 
-    __asm __volatile("rsm   psr.ic|psr.i;; srlz.i" );
-    
+    psr = ia64_clear_ic();
+#if 0
     saved_itir = ia64_getreg(_IA64_REG_CR_ITIR);
     saved_ifa = ia64_getreg(_IA64_REG_CR_IFA);
+#endif
     saved_rr = ia64_get_rr(mtlb.ifa);
-
     ia64_setreg(_IA64_REG_CR_ITIR, mtlb.itir);
     ia64_setreg(_IA64_REG_CR_IFA, mtlb.ifa);
     /* Only access memory stack which is mapped by TR,
@@ -268,18 +276,22 @@
     ia64_srlz_d();
     if ( cl == ISIDE_TLB ) {
         ia64_itci(mtlb.page_flags);
-    ia64_srlz_i();
+        ia64_srlz_i();
     }
     else {
         ia64_itcd(mtlb.page_flags);
-    ia64_srlz_d();
+        ia64_srlz_d();
     }
     ia64_set_rr(mtlb.ifa,saved_rr);
     ia64_srlz_d();
+#if 0
     ia64_setreg(_IA64_REG_CR_IFA, saved_ifa);
     ia64_setreg(_IA64_REG_CR_ITIR, saved_itir);
-    __asm __volatile("ssm   psr.ic|psr.i;; srlz.i" );
-}
+#endif
+    ia64_set_psr(psr);
+    ia64_srlz_i();
+}
+
 
 u64 machine_thash(PTA pta, u64 va, u64 rid, u64 ps)
 {
@@ -289,7 +301,6 @@
     struct vcpu *v = current;
     ia64_rr vrr;
 
-    
     saved_pta = ia64_getreg(_IA64_REG_CR_PTA);
     saved_rr0 = ia64_get_rr(0);
     vrr.rrval = saved_rr0;
@@ -308,7 +319,7 @@
 
     ia64_set_rr(0, saved_rr0);
     ia64_srlz_d();
-    local_irq_restore(psr);
+    ia64_set_psr(psr);
     return hash_addr;
 }
 
@@ -320,7 +331,7 @@
     struct vcpu *v = current;
     ia64_rr vrr;
 
-    // TODO: Set to enforce lazy mode    
+    // TODO: Set to enforce lazy mode
     saved_pta = ia64_getreg(_IA64_REG_CR_PTA);
     saved_rr0 = ia64_get_rr(0);
     vrr.rrval = saved_rr0;
@@ -341,7 +352,6 @@
     local_irq_restore(psr);
     return tag;
 }
-
 /*
  *  Purge machine tlb.
  *  INPUT
diff -r 0c1f966af47e -r 10b1d30d3f66 xen/arch/ia64/vmx/vmx_entry.S
--- a/xen/arch/ia64/vmx/vmx_entry.S     Thu Sep  8 13:24:08 2005
+++ b/xen/arch/ia64/vmx/vmx_entry.S     Thu Sep  8 15:18:40 2005
@@ -33,7 +33,7 @@
 #include <asm/processor.h>
 #include <asm/thread_info.h>
 #include <asm/unistd.h>
-
+#include <asm/vhpt.h>
 #include "vmx_minstate.h"
 
 /*
@@ -401,8 +401,9 @@
     mov b0=r16
     br.cond.sptk b0         // call the service
     ;;
+switch_rr7:
+#ifdef XEN_DBL_MAPPING
 // switch rr7 and rr5
-switch_rr7:
     adds r24=SWITCH_MRR5_OFFSET, r21
     adds r26=SWITCH_MRR6_OFFSET, r21
     adds r16=SWITCH_MRR7_OFFSET ,r21
@@ -428,6 +429,7 @@
     ;;
     srlz.i
     ;;
+#endif
 // fall through
 GLOBAL_ENTRY(ia64_vmm_entry)
 /*
@@ -470,6 +472,7 @@
        ;;
 END(vmx_dorfirfi)
 
+#ifdef XEN_DBL_MAPPING  /* will be removed */
 
 #define VMX_PURGE_RR7  0
 #define VMX_INSERT_RR7 1
@@ -609,3 +612,180 @@
     br.sptk rp
 END(vmx_switch_rr7)
     .align PAGE_SIZE
+
+#else
+/*
+ * in0: new rr7
+ * in1: virtual address of shared_info
+ * in2: virtual address of shared_arch_info (VPD)
+ * in3: virtual address of guest_vhpt
+ * in4: virtual address of pal code segment
+ * r8: will contain old rid value
+ */
+
+
+#define PSR_BITS_TO_CLEAR                      \
+   (IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_DB |IA64_PSR_RT |     \
+    IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED |    \
+    IA64_PSR_DFL | IA64_PSR_DFH)
+#define PSR_BITS_TO_SET    IA64_PSR_BN
+
+//extern void vmx_switch_rr7(unsigned long rid,void *shared_info, void 
*shared_arch_info, void *guest_vhpt, void * pal_vaddr );
+
+GLOBAL_ENTRY(vmx_switch_rr7)
+   // not sure this unwind statement is correct...
+   .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1)
+   alloc loc1 = ar.pfs, 5, 9, 0, 0
+1: {
+     mov r28  = in0        // copy procedure index
+     mov r8   = ip         // save ip to compute branch
+     mov loc0 = rp         // save rp
+    };;
+    .body
+    movl loc2=PERCPU_ADDR
+    ;;
+    tpa loc2 = loc2         // get physical address of per cpu date
+    ;;
+    dep loc3 = 0,in1,60,4          // get physical address of shared_info
+    dep loc4 = 0,in2,60,4          // get physical address of shared_arch_info
+    dep loc5 = 0,in3,60,4          // get physical address of guest_vhpt
+    dep loc6 = 0,in4,60,4          // get physical address of pal code
+    ;;
+    mov loc7 = psr          // save psr
+    ;;
+    mov loc8 = ar.rsc           // save RSE configuration
+    ;;
+    mov ar.rsc = 0          // put RSE in enforced lazy, LE mode
+    movl r16=PSR_BITS_TO_CLEAR
+    movl r17=PSR_BITS_TO_SET
+    ;;
+    or loc7 = loc7,r17      // add in psr the bits to set
+    ;;
+    andcm r16=loc7,r16      // removes bits to clear from psr
+    br.call.sptk.many rp=ia64_switch_mode_phys
+1:
+   // now in physical mode with psr.i/ic off so do rr7 switch
+    dep r16=-1,r0,61,3
+    ;;
+    mov rr[r16]=in0
+    srlz.d
+    ;;
+    rsm 0x6000
+    ;;
+    srlz.d
+
+    // re-pin mappings for kernel text and data
+    mov r18=KERNEL_TR_PAGE_SHIFT<<2
+    movl r17=KERNEL_START
+    ;;
+    ptr.i   r17,r18
+    ptr.d   r17,r18
+    ;;
+    mov cr.itir=r18
+    mov cr.ifa=r17
+    mov r16=IA64_TR_KERNEL
+    //mov r3=ip
+    movl r25 = PAGE_KERNEL
+    ;;
+    dep r2=0,r3,0,KERNEL_TR_PAGE_SHIFT
+    ;;
+    or r18=r2,r25
+    ;;
+   srlz.i
+   ;;
+   itr.i itr[r16]=r18
+   ;;
+   itr.d dtr[r16]=r18
+   ;;
+
+   // re-pin mappings for per-cpu data
+
+   movl r22 = PERCPU_ADDR
+   ;;
+   mov r24=IA64_TR_PERCPU_DATA
+   or loc2 = r25,loc2          // construct PA | page properties
+   mov r23=PERCPU_PAGE_SHIFT<<2
+   ;;
+   ptr.d   r22,r23
+   ;;
+   mov cr.itir=r23
+   mov cr.ifa=r22
+   ;;
+   itr.d dtr[r24]=loc2     // wire in new mapping...
+   ;;
+
+
+#if    0
+   // re-pin mappings for shared_info
+
+   mov r24=IA64_TR_SHARED_INFO
+   movl r25=__pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RW)
+   ;;
+   or loc3 = r25,loc3          // construct PA | page properties
+   mov r23 = PAGE_SHIFT<<2
+   ;;
+   ptr.d   in1,r23
+   ;;
+   mov cr.itir=r23
+   mov cr.ifa=in1
+   ;;
+   itr.d dtr[r24]=loc3     // wire in new mapping...
+   ;;
+   // re-pin mappings for shared_arch_info
+
+   mov r24=IA64_TR_ARCH_INFO
+   or loc4 = r25,loc4          // construct PA | page properties
+   mov r23 = PAGE_SHIFT<<2
+   ;;
+   ptr.d   in2,r23
+   ;;
+   mov cr.itir=r23
+   mov cr.ifa=in2
+   ;;
+   itr.d dtr[r24]=loc4     // wire in new mapping...
+   ;;
+#endif
+
+
+   // re-pin mappings for guest_vhpt
+
+   mov r24=IA64_TR_VHPT
+   movl r25=PAGE_KERNEL
+   ;;
+   or loc5 = r25,loc5          // construct PA | page properties
+   mov r23 = VCPU_TLB_SHIFT<<2
+   ;;
+   ptr.d   in3,r23
+   ;;
+   mov cr.itir=r23
+   mov cr.ifa=in3
+   ;;
+   itr.d dtr[r24]=loc5     // wire in new mapping...
+   ;;
+
+   // re-pin mappings for PAL code section
+
+   mov r24=IA64_TR_PALCODE
+   or loc6 = r25,loc6          // construct PA | page properties
+   mov r23 = IA64_GRANULE_SHIFT<<2
+   ;;
+   ptr.i   in4,r23
+   ;;
+   mov cr.itir=r23
+   mov cr.ifa=in4
+   ;;
+   itr.i itr[r24]=loc6     // wire in new mapping...
+   ;;
+
+   // done, switch back to virtual and return
+   mov r16=loc7            // r16= original psr
+   br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
+   mov ar.pfs = loc1
+   mov rp = loc0
+   ;;
+   mov ar.rsc=loc8         // restore RSE configuration
+   srlz.d              // seralize restoration of psr.l
+   br.ret.sptk.many rp
+END(vmx_switch_rr7)
+#endif
+
diff -r 0c1f966af47e -r 10b1d30d3f66 xen/arch/ia64/vmx/vmx_irq_ia64.c
--- a/xen/arch/ia64/vmx/vmx_irq_ia64.c  Thu Sep  8 13:24:08 2005
+++ b/xen/arch/ia64/vmx/vmx_irq_ia64.c  Thu Sep  8 15:18:40 2005
@@ -23,6 +23,12 @@
 #include <asm/machvec.h>
 #include <asm/pgtable.h>
 #include <asm/system.h>
+
+#ifdef CONFIG_SMP
+#   define IS_RESCHEDULE(vec)   (vec == IA64_IPI_RESCHEDULE)
+#else
+#   define IS_RESCHEDULE(vec)   (0)
+#endif
 
 #ifdef CONFIG_PERFMON
 # include <asm/perfmon.h>
diff -r 0c1f966af47e -r 10b1d30d3f66 xen/arch/ia64/vmx/vmx_ivt.S
--- a/xen/arch/ia64/vmx/vmx_ivt.S       Thu Sep  8 13:24:08 2005
+++ b/xen/arch/ia64/vmx/vmx_ivt.S       Thu Sep  8 15:18:40 2005
@@ -118,10 +118,12 @@
     mov r29=cr.ipsr;
     ;;
     tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
-(p6) br.sptk vmx_fault_1
+(p6) br.sptk vmx_alt_itlb_miss_1
+//(p6) br.sptk vmx_fault_1
     mov r16 = cr.ifa
     ;;
     thash r17 = r16
+    ;;
     ttag r20 = r16
     ;;
 vmx_itlb_loop:
@@ -180,10 +182,12 @@
     mov r29=cr.ipsr;
     ;;
     tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
-(p6)br.sptk vmx_fault_2
+    (p6)br.sptk vmx_alt_dtlb_miss_1
+//(p6)br.sptk vmx_fault_2
     mov r16 = cr.ifa
     ;;
     thash r17 = r16
+    ;;
     ttag r20 = r16
     ;;
 vmx_dtlb_loop:
@@ -243,6 +247,7 @@
     ;;
     tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
 (p7)br.sptk vmx_fault_3
+vmx_alt_itlb_miss_1:
        mov r16=cr.ifa          // get address that caused the TLB miss
        movl r17=PAGE_KERNEL
        mov r24=cr.ipsr
@@ -272,6 +277,7 @@
     ;;
     tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
 (p7)br.sptk vmx_fault_4
+vmx_alt_dtlb_miss_1:
        mov r16=cr.ifa          // get address that caused the TLB miss
        movl r17=PAGE_KERNEL
        mov r20=cr.isr
diff -r 0c1f966af47e -r 10b1d30d3f66 xen/arch/ia64/vmx/vmx_minstate.h
--- a/xen/arch/ia64/vmx/vmx_minstate.h  Thu Sep  8 13:24:08 2005
+++ b/xen/arch/ia64/vmx/vmx_minstate.h  Thu Sep  8 15:18:40 2005
@@ -128,21 +128,31 @@
  * Note that psr.ic is NOT turned on by this macro.  This is so that
  * we can pass interruption state as arguments to a handler.
  */
-#define VMX_DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA)                           \
+#ifdef XEN_DBL_MAPPING
+#define SAVE_MIN_CHANGE_RR  \
 /*  switch rr7 */       \
     movl r16=((ia64_rid(IA64_REGION_ID_KERNEL, (7<<61)) << 8) | 
(IA64_GRANULE_SHIFT << 2)); \
     movl r17=(7<<61);        \
     movl r20=((ia64_rid(IA64_REGION_ID_KERNEL, (6<<61)) << 8) | 
(IA64_GRANULE_SHIFT << 2)); \
     movl r22=(6<<61);        \
-    movl r18=((ia64_rid(IA64_REGION_ID_KERNEL, (5<<61)) << 8) | (PAGE_SHIFT << 
2) | 1);                \
-    movl r23=(5<<61);  \
+    movl r18=((ia64_rid(IA64_REGION_ID_KERNEL, (5<<61)) << 8) | (PAGE_SHIFT << 
2) | 1);     \
+    movl r23=(5<<61);   \
     ;;              \
     mov rr[r17]=r16;             \
-    mov rr[r22]=r20;            \
-    mov rr[r23]=r18;            \
+    mov rr[r22]=r20;         \
+    mov rr[r23]=r18;         \
     ;;      \
     srlz.i;      \
-    ;;  \
+    ;;
+
+#else
+
+#define SAVE_MIN_CHANGE_RR
+
+#endif
+
+#define VMX_DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA)                           \
+    SAVE_MIN_CHANGE_RR;      \
     VMX_MINSTATE_GET_CURRENT(r16);  /* M (or M;;I) */                   \
     mov r27=ar.rsc;         /* M */                         \
     mov r20=r1;         /* A */                         \
diff -r 0c1f966af47e -r 10b1d30d3f66 xen/arch/ia64/vmx/vmx_phy_mode.c
--- a/xen/arch/ia64/vmx/vmx_phy_mode.c  Thu Sep  8 13:24:08 2005
+++ b/xen/arch/ia64/vmx/vmx_phy_mode.c  Thu Sep  8 15:18:40 2005
@@ -27,7 +27,6 @@
 #include <asm/vmx_phy_mode.h>
 #include <xen/sched.h>
 #include <asm/pgtable.h>
-
 
 int valid_mm_mode[8] = {
     GUEST_PHYS, /* (it, dt, rt) -> (0, 0, 0) */
@@ -215,13 +214,13 @@
 vmx_init_all_rr(VCPU *vcpu)
 {
        VMX(vcpu,vrr[VRN0]) = 0x38;
-       VMX(vcpu,vrr[VRN1]) = 0x38;
-       VMX(vcpu,vrr[VRN2]) = 0x38;
-       VMX(vcpu,vrr[VRN3]) = 0x38;
-       VMX(vcpu,vrr[VRN4]) = 0x38;
-       VMX(vcpu,vrr[VRN5]) = 0x38;
-       VMX(vcpu,vrr[VRN6]) = 0x60;
-       VMX(vcpu,vrr[VRN7]) = 0x60;
+       VMX(vcpu,vrr[VRN1]) = 0x138;
+       VMX(vcpu,vrr[VRN2]) = 0x238;
+       VMX(vcpu,vrr[VRN3]) = 0x338;
+       VMX(vcpu,vrr[VRN4]) = 0x438;
+       VMX(vcpu,vrr[VRN5]) = 0x538;
+       VMX(vcpu,vrr[VRN6]) = 0x660;
+       VMX(vcpu,vrr[VRN7]) = 0x760;
 
        VMX(vcpu,mrr5) = vmx_vrrtomrr(vcpu, 0x38);
        VMX(vcpu,mrr6) = vmx_vrrtomrr(vcpu, 0x60);
@@ -234,10 +233,8 @@
        unsigned long psr;
        ia64_rr phy_rr;
 
-       psr = ia64_clear_ic();
-
-       phy_rr.ps = EMUL_PHY_PAGE_SHIFT; 
-       phy_rr.ve = 1;
+       local_irq_save(psr);
+
 
        /* WARNING: not allow co-exist of both virtual mode and physical
         * mode in same region
@@ -245,9 +242,15 @@
        if (is_physical_mode(vcpu)) {
                if (vcpu->arch.mode_flags & GUEST_PHY_EMUL)
                        panic("Unexpected domain switch in phy emul\n");
-               phy_rr.rid = vcpu->domain->arch.metaphysical_rr0;
+               phy_rr.rrval = vcpu->domain->arch.metaphysical_rr0;
+       phy_rr.ps = EMUL_PHY_PAGE_SHIFT;
+       phy_rr.ve = 1;
+
                ia64_set_rr((VRN0 << VRN_SHIFT), phy_rr.rrval);
-               phy_rr.rid = vcpu->domain->arch.metaphysical_rr4;
+               phy_rr.rrval = vcpu->domain->arch.metaphysical_rr4;
+       phy_rr.ps = EMUL_PHY_PAGE_SHIFT;
+           phy_rr.ve = 1;
+
                ia64_set_rr((VRN4 << VRN_SHIFT), phy_rr.rrval);
        } else {
                ia64_set_rr((VRN0 << VRN_SHIFT),
@@ -265,6 +268,18 @@
        ia64_set_rr((VRN3 << VRN_SHIFT),
                     vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN3])));
 #endif
+#ifndef XEN_DBL_MAPPING
+    extern void * pal_vaddr;
+    ia64_set_rr((VRN5 << VRN_SHIFT),
+            vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN5])));
+    ia64_set_rr((VRN6 << VRN_SHIFT),
+            vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN6])));
+    vmx_switch_rr7(vmx_vrrtomrr(vcpu,VMX(vcpu, vrr[VRN7])),(void 
*)vcpu->domain->shared_info,
+                (void *)vcpu->vcpu_info->arch.privregs,
+                ( void *)vcpu->arch.vtlb->ts->vhpt->hash, pal_vaddr );
+    ia64_set_pta(vcpu->arch.arch_vmx.mpta);
+#endif
+
        ia64_srlz_d();
        ia64_set_psr(psr);
     ia64_srlz_i();
@@ -276,15 +291,17 @@
     UINT64 psr;
     ia64_rr phy_rr;
 
-    phy_rr.ps = EMUL_PHY_PAGE_SHIFT; 
-    phy_rr.ve = 1;
 
     /* Save original virtual mode rr[0] and rr[4] */
     psr=ia64_clear_ic();
-    phy_rr.rid = vcpu->domain->arch.metaphysical_rr0;
+    phy_rr.rrval = vcpu->domain->arch.metaphysical_rr0;
+    phy_rr.ps = EMUL_PHY_PAGE_SHIFT;
+    phy_rr.ve = 1;
     ia64_set_rr(VRN0<<VRN_SHIFT, phy_rr.rrval);
     ia64_srlz_d();
-    phy_rr.rid = vcpu->domain->arch.metaphysical_rr4;
+    phy_rr.rrval = vcpu->domain->arch.metaphysical_rr4;
+    phy_rr.ps = EMUL_PHY_PAGE_SHIFT;
+    phy_rr.ve = 1;
     ia64_set_rr(VRN4<<VRN_SHIFT, phy_rr.rrval);
     ia64_srlz_d();
 
diff -r 0c1f966af47e -r 10b1d30d3f66 xen/arch/ia64/vmx/vmx_process.c
--- a/xen/arch/ia64/vmx/vmx_process.c   Thu Sep  8 13:24:08 2005
+++ b/xen/arch/ia64/vmx/vmx_process.c   Thu Sep  8 15:18:40 2005
@@ -41,7 +41,7 @@
 #include <asm/regionreg.h>
 #include <asm/privop.h>
 #include <asm/ia64_int.h>
-#include <asm/hpsim_ssc.h>
+//#include <asm/hpsim_ssc.h>
 #include <asm/dom_fw.h>
 #include <asm/vmx_vcpu.h>
 #include <asm/kregs.h>
diff -r 0c1f966af47e -r 10b1d30d3f66 xen/arch/ia64/vmx/vmx_vcpu.c
--- a/xen/arch/ia64/vmx/vmx_vcpu.c      Thu Sep  8 13:24:08 2005
+++ b/xen/arch/ia64/vmx/vmx_vcpu.c      Thu Sep  8 15:18:40 2005
@@ -215,6 +215,7 @@
 {
     ia64_rr oldrr,newrr;
     thash_cb_t *hcb;
+    extern void * pal_vaddr;
     oldrr=vmx_vcpu_rr(vcpu,reg);
     newrr.rrval=val;
 #if 1
@@ -224,7 +225,9 @@
     }
 #endif
     VMX(vcpu,vrr[reg>>61]) = val;
+
     switch((u64)(reg>>61)) {
+#ifdef XEN_DBL_MAPPING
     case VRN5:
         VMX(vcpu,mrr5)=vmx_vrrtomrr(vcpu,val);
         break;
@@ -234,12 +237,17 @@
     case VRN7:
         VMX(vcpu,mrr7)=vmx_vrrtomrr(vcpu,val);
         /* Change double mapping for this domain */
-#ifdef XEN_DBL_MAPPING
         vmx_change_double_mapping(vcpu,
                       vmx_vrrtomrr(vcpu,oldrr.rrval),
                       vmx_vrrtomrr(vcpu,newrr.rrval));
+        break;
+#else
+    case VRN7:
+       vmx_switch_rr7(vmx_vrrtomrr(vcpu,val),vcpu->domain->shared_info,
+        (void *)vcpu->vcpu_info->arch.privregs,
+       ( void *)vcpu->arch.vtlb->ts->vhpt->hash, pal_vaddr );
+       break;
 #endif
-        break;
     default:
         ia64_set_rr(reg,vmx_vrrtomrr(vcpu,val));
         break;
diff -r 0c1f966af47e -r 10b1d30d3f66 xen/arch/ia64/vmx/vtlb.c
--- a/xen/arch/ia64/vmx/vtlb.c  Thu Sep  8 13:24:08 2005
+++ b/xen/arch/ia64/vmx/vtlb.c  Thu Sep  8 15:18:40 2005
@@ -343,7 +343,7 @@
                 hcb->recycle_notifier(hcb,(u64)entry);
         }
         thash_purge_all(hcb);
-        cch = cch_alloc(hcb);
+//        cch = cch_alloc(hcb);
     }
     return cch;
 }
@@ -364,7 +364,7 @@
     ia64_rr vrr;
     u64 gppn;
     u64 ppns, ppne;
-    
+
     hash_table = (hcb->hash_func)(hcb->pta,
                         va, entry->rid, entry->ps);
     if( INVALID_ENTRY(hcb, hash_table) ) {
@@ -374,10 +374,14 @@
     else {
         // TODO: Add collision chain length limitation.
         cch = __alloc_chain(hcb,entry);
-        
-        *cch = *hash_table;
-        *hash_table = *entry;
-        hash_table->next = cch;
+        if(cch == NULL){
+            *hash_table = *entry;
+            hash_table->next = 0;
+        }else{
+            *cch = *hash_table;
+            *hash_table = *entry;
+            hash_table->next = cch;
+        }
     }
     if(hcb->vcpu->domain->domain_id==0){
        thash_insert(hcb->ts->vhpt, entry, va);
@@ -396,26 +400,29 @@
 
 static void vhpt_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
 {
-    thash_data_t    *hash_table, *cch;
+    thash_data_t   vhpt_entry, *hash_table, *cch;
     ia64_rr vrr;
-    
+    if ( !__tlb_to_vhpt(hcb, entry, va, &vhpt_entry) ) {
+        panic("Can't convert to machine VHPT entry\n");
+    }
     hash_table = (hcb->hash_func)(hcb->pta,
                         va, entry->rid, entry->ps);
     if( INVALID_ENTRY(hcb, hash_table) ) {
-        if ( !__tlb_to_vhpt(hcb, entry, va, hash_table) ) {
-            panic("Can't convert to machine VHPT entry\n");
-        }
+        *hash_table = vhpt_entry;
         hash_table->next = 0;
     }
     else {
         // TODO: Add collision chain length limitation.
         cch = __alloc_chain(hcb,entry);
-        
-        *cch = *hash_table;
-        if ( !__tlb_to_vhpt(hcb, entry, va, hash_table) ) {
-            panic("Can't convert to machine VHPT entry\n");
-        }
-        hash_table->next = cch;
+        if(cch == NULL){
+            *hash_table = vhpt_entry;
+            hash_table->next = 0;
+        }else{
+            *cch = *hash_table;
+            *hash_table = vhpt_entry;
+            hash_table->next = cch;
+        }
+
         if(hash_table->tag==hash_table->next->tag)
             while(1);
     }
@@ -488,10 +495,10 @@
 {
     thash_data_t *next;
 
-    if ( ++cch_depth > MAX_CCH_LENGTH ) {
-        printf ("cch length > MAX_CCH_LENGTH, exceed the expected length\n");
-        while(1);
-   }
+//    if ( ++cch_depth > MAX_CCH_LENGTH ) {
+//        printf ("cch length > MAX_CCH_LENGTH, exceed the expected length\n");
+//        while(1);
+//   }
     if ( cch -> next ) {
         next = thash_rem_cch(hcb, cch->next);
     }
@@ -914,7 +921,7 @@
         INVALIDATE_HASH(hcb,hash_table);
     }
 }
-
+#define VTLB_DEBUG
 #ifdef  VTLB_DEBUG
 static  u64 cch_length_statistics[MAX_CCH_LENGTH+1];
 u64  sanity_check=0;
diff -r 0c1f966af47e -r 10b1d30d3f66 xen/arch/ia64/xen/hyperprivop.S
--- a/xen/arch/ia64/xen/hyperprivop.S   Thu Sep  8 13:24:08 2005
+++ b/xen/arch/ia64/xen/hyperprivop.S   Thu Sep  8 15:18:40 2005
@@ -26,6 +26,24 @@
 #define FAST_PTC_GA
 #undef RFI_TO_INTERRUPT // not working yet
 #endif
+
+#define    XEN_HYPER_RFI           0x1
+#define    XEN_HYPER_RSM_DT        0x2
+#define    XEN_HYPER_SSM_DT        0x3
+#define    XEN_HYPER_COVER         0x4
+#define    XEN_HYPER_ITC_D         0x5
+#define    XEN_HYPER_ITC_I         0x6
+#define    XEN_HYPER_SSM_I         0x7
+#define    XEN_HYPER_GET_IVR       0x8
+#define    XEN_HYPER_GET_TPR       0x9
+#define    XEN_HYPER_SET_TPR       0xa
+#define    XEN_HYPER_EOI           0xb
+#define    XEN_HYPER_SET_ITM       0xc
+#define    XEN_HYPER_THASH         0xd
+#define    XEN_HYPER_PTC_GA        0xe
+#define    XEN_HYPER_ITR_D         0xf
+#define    XEN_HYPER_GET_RR        0x10
+#define    XEN_HYPER_SET_RR        0x11
 
 #ifdef CONFIG_SMP
 #warning "FIXME: ptc.ga instruction requires spinlock for SMP"
diff -r 0c1f966af47e -r 10b1d30d3f66 xen/arch/ia64/xen/regionreg.c
--- a/xen/arch/ia64/xen/regionreg.c     Thu Sep  8 13:24:08 2005
+++ b/xen/arch/ia64/xen/regionreg.c     Thu Sep  8 15:18:40 2005
@@ -51,7 +51,7 @@
 // use this to allocate a rid out of the "Xen reserved rid block"
 unsigned long allocate_reserved_rid(void)
 {
-       static unsigned long currentrid = XEN_DEFAULT_RID;
+       static unsigned long currentrid = XEN_DEFAULT_RID+1;
        unsigned long t = currentrid;
 
        unsigned long max = RIDS_PER_RIDBLOCK;
diff -r 0c1f966af47e -r 10b1d30d3f66 xen/arch/ia64/xen/vcpu.c
--- a/xen/arch/ia64/xen/vcpu.c  Thu Sep  8 13:24:08 2005
+++ b/xen/arch/ia64/xen/vcpu.c  Thu Sep  8 15:18:40 2005
@@ -1037,7 +1037,7 @@
 #endif
 
        if (is_idle_task(vcpu->domain)) {
-               printf("****** vcpu_set_next_timer called during idle!!\n");
+//             printf("****** vcpu_set_next_timer called during idle!!\n");
                vcpu_safe_set_itm(s);
                return;
        }
diff -r 0c1f966af47e -r 10b1d30d3f66 xen/include/asm-ia64/mm.h
--- a/xen/include/asm-ia64/mm.h Thu Sep  8 13:24:08 2005
+++ b/xen/include/asm-ia64/mm.h Thu Sep  8 15:18:40 2005
@@ -163,8 +163,8 @@
            unlikely((nx & PGC_count_mask) == 0) ||     /* Count overflow? */
            unlikely((x >> 32) != _domain)) {           /* Wrong owner? */
            DPRINTK("Error pfn %lx: rd=%p, od=%p, caf=%08x, taf=%08x\n",
-               page_to_pfn(page), domain, unpickle_domptr(d),
-               x, page->u.inuse.typeinfo);
+               page_to_pfn(page), domain, unpickle_domptr(domain),
+               x, page->u.inuse.type_info);
            return 0;
        }
     }
diff -r 0c1f966af47e -r 10b1d30d3f66 xen/include/asm-ia64/regionreg.h
--- a/xen/include/asm-ia64/regionreg.h  Thu Sep  8 13:24:08 2005
+++ b/xen/include/asm-ia64/regionreg.h  Thu Sep  8 15:18:40 2005
@@ -55,8 +55,8 @@
 
        t.uint = RIDVal;
        tmp = t.bytes[1];
-       t.bytes[1] = t.bytes[3];
-       t.bytes[3] = tmp;
+       t.bytes[1] = t.bytes[2];
+       t.bytes[2] = tmp;
 
        return t.uint;
 }
diff -r 0c1f966af47e -r 10b1d30d3f66 xen/include/asm-ia64/vmmu.h
--- a/xen/include/asm-ia64/vmmu.h       Thu Sep  8 13:24:08 2005
+++ b/xen/include/asm-ia64/vmmu.h       Thu Sep  8 15:18:40 2005
@@ -225,8 +225,8 @@
            INVALID_ENTRY(hcb, hash) = 1;        \
            hash->next = NULL; }
 
-#define PURGABLE_ENTRY(hcb,en)  \
-               ((hcb)->ht == THASH_VHPT || ( (en)->tc && !(en->locked)) )
+#define PURGABLE_ENTRY(hcb,en)  1
+//             ((hcb)->ht == THASH_VHPT || ( (en)->tc && !(en->locked)) )
 
 
 /*
diff -r 0c1f966af47e -r 10b1d30d3f66 xen/include/asm-ia64/vmx.h
--- a/xen/include/asm-ia64/vmx.h        Thu Sep  8 13:24:08 2005
+++ b/xen/include/asm-ia64/vmx.h        Thu Sep  8 15:18:40 2005
@@ -29,7 +29,6 @@
 extern unsigned int vmx_enabled;
 extern void vmx_init_env(void);
 extern void vmx_final_setup_domain(struct domain *d);
-extern void vmx_init_double_mapping_stub(void);
 extern void vmx_save_state(struct vcpu *v);
 extern void vmx_load_state(struct vcpu *v);
 extern void vmx_setup_platform(struct vcpu *v, struct vcpu_guest_context *c);
@@ -37,6 +36,7 @@
 extern vmx_insert_double_mapping(u64,u64,u64,u64,u64);
 extern void vmx_purge_double_mapping(u64, u64, u64);
 extern void vmx_change_double_mapping(struct vcpu *v, u64 oldrr7, u64 newrr7);
+extern void vmx_init_double_mapping_stub(void);
 #endif
 
 extern void vmx_wait_io(void);
diff -r 0c1f966af47e -r 10b1d30d3f66 xen/include/asm-ia64/vmx_vcpu.h
--- a/xen/include/asm-ia64/vmx_vcpu.h   Thu Sep  8 13:24:08 2005
+++ b/xen/include/asm-ia64/vmx_vcpu.h   Thu Sep  8 15:18:40 2005
@@ -593,9 +593,10 @@
     VMX_VPD(vcpu,vpsr) |= IA64_PSR_BN;
     return (IA64_NO_FAULT);
 }
-
+#if 0
 /* Another hash performance algorithm */
 #define redistribute_rid(rid)  (((rid) & ~0xffff) | (((rid) << 8) & 0xff00) | 
(((rid) >> 8) & 0xff))
+#endif
 static inline unsigned long
 vmx_vrrtomrr(VCPU *v, unsigned long val)
 {
@@ -603,14 +604,14 @@
     u64          rid;
 
     rr.rrval=val;
-    rr.rid = vmMangleRID(v->arch.starting_rid  + rr.rid);
+    rr.rid = rr.rid + v->arch.starting_rid;
+    rr.ve = 1;
+    return  vmMangleRID(rr.rrval);
 /* Disable this rid allocation algorithm for now */
 #if 0
     rid=(((u64)vcpu->domain->domain_id)<<DOMAIN_RID_SHIFT) + rr.rid;
     rr.rid = redistribute_rid(rid);
 #endif 
 
-    rr.ve=1;
-    return rr.rrval;
-}
-#endif
+}
+#endif
diff -r 0c1f966af47e -r 10b1d30d3f66 xen/include/public/arch-ia64.h
--- a/xen/include/public/arch-ia64.h    Thu Sep  8 13:24:08 2005
+++ b/xen/include/public/arch-ia64.h    Thu Sep  8 15:18:40 2005
@@ -280,22 +280,4 @@
 
 #endif /* !__ASSEMBLY__ */
 
-#define        XEN_HYPER_RFI                   0x1
-#define        XEN_HYPER_RSM_DT                0x2
-#define        XEN_HYPER_SSM_DT                0x3
-#define        XEN_HYPER_COVER                 0x4
-#define        XEN_HYPER_ITC_D                 0x5
-#define        XEN_HYPER_ITC_I                 0x6
-#define        XEN_HYPER_SSM_I                 0x7
-#define        XEN_HYPER_GET_IVR               0x8
-#define        XEN_HYPER_GET_TPR               0x9
-#define        XEN_HYPER_SET_TPR               0xa
-#define        XEN_HYPER_EOI                   0xb
-#define        XEN_HYPER_SET_ITM               0xc
-#define        XEN_HYPER_THASH                 0xd
-#define        XEN_HYPER_PTC_GA                0xe
-#define        XEN_HYPER_ITR_D                 0xf
-#define        XEN_HYPER_GET_RR                0x10
-#define        XEN_HYPER_SET_RR                0x11
-
 #endif /* __HYPERVISOR_IF_IA64_H__ */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] Transform double mapping to single mapping on vti domain., Xen patchbot -unstable <=