WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] [IA64] vDSO paravirtualization: import li

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] [IA64] vDSO paravirtualization: import linux files
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Fri, 28 Jul 2006 16:21:17 +0000
Delivery-date: Fri, 28 Jul 2006 09:29:26 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID 614deef19299e3352233d5e27017b677830d47f3
# Parent  70ee75d5c12c867ae4920bd4e1ddef84c4f06286
[IA64] vDSO paravirtualization: import linux files

import gate.S, gate.ld.S and patch.c which are needed to
paravirtualize vdso area.

Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
---
 linux-2.6-xen-sparse/arch/ia64/kernel/gate.S     |  376 +++++++++++++++++++++++
 linux-2.6-xen-sparse/arch/ia64/kernel/gate.lds.S |   95 +++++
 linux-2.6-xen-sparse/arch/ia64/kernel/patch.c    |  197 ++++++++++++
 3 files changed, 668 insertions(+)

diff -r 70ee75d5c12c -r 614deef19299 
linux-2.6-xen-sparse/arch/ia64/kernel/gate.S
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/linux-2.6-xen-sparse/arch/ia64/kernel/gate.S      Mon Jul 24 13:04:40 
2006 -0600
@@ -0,0 +1,376 @@
+/*
+ * This file contains the code that gets mapped at the upper end of each 
task's text
+ * region.  For now, it contains the signal trampoline code only.
+ *
+ * Copyright (C) 1999-2003 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ */
+
+#include <linux/config.h>
+
+#include <asm/asmmacro.h>
+#include <asm/errno.h>
+#include <asm/asm-offsets.h>
+#include <asm/sigcontext.h>
+#include <asm/system.h>
+#include <asm/unistd.h>
+
+/*
+ * We can't easily refer to symbols inside the kernel.  To avoid full runtime 
relocation,
+ * complications with the linker (which likes to create PLT stubs for branches
+ * to targets outside the shared object) and to avoid multi-phase kernel 
builds, we
+ * simply create minimalistic "patch lists" in special ELF sections.
+ */
+       .section ".data.patch.fsyscall_table", "a"
+       .previous
+#define LOAD_FSYSCALL_TABLE(reg)                       \
+[1:]   movl reg=0;                                     \
+       .xdata4 ".data.patch.fsyscall_table", 1b-.
+
+       .section ".data.patch.brl_fsys_bubble_down", "a"
+       .previous
+#define BRL_COND_FSYS_BUBBLE_DOWN(pr)                  \
+[1:](pr)brl.cond.sptk 0;                               \
+       .xdata4 ".data.patch.brl_fsys_bubble_down", 1b-.
+
+GLOBAL_ENTRY(__kernel_syscall_via_break)
+       .prologue
+       .altrp b6
+       .body
+       /*
+        * Note: for (fast) syscall restart to work, the break instruction must 
be
+        *       the first one in the bundle addressed by syscall_via_break.
+        */
+{ .mib
+       break 0x100000
+       nop.i 0
+       br.ret.sptk.many b6
+}
+END(__kernel_syscall_via_break)
+
+/*
+ * On entry:
+ *     r11 = saved ar.pfs
+ *     r15 = system call #
+ *     b0  = saved return address
+ *     b6  = return address
+ * On exit:
+ *     r11 = saved ar.pfs
+ *     r15 = system call #
+ *     b0  = saved return address
+ *     all other "scratch" registers:  undefined
+ *     all "preserved" registers:      same as on entry
+ */
+
+GLOBAL_ENTRY(__kernel_syscall_via_epc)
+       .prologue
+       .altrp b6
+       .body
+{
+       /*
+        * Note: the kernel cannot assume that the first two instructions in 
this
+        * bundle get executed.  The remaining code must be safe even if
+        * they do not get executed.
+        */
+       adds r17=-1024,r15                      // A
+       mov r10=0                               // A    default to successful 
syscall execution
+       epc                                     // B    causes split-issue
+}
+       ;;
+       rsm psr.be | psr.i                      // M2 (5 cyc to srlz.d)
+       LOAD_FSYSCALL_TABLE(r14)                // X
+       ;;
+       mov r16=IA64_KR(CURRENT)                // M2 (12 cyc)
+       shladd r18=r17,3,r14                    // A
+       mov r19=NR_syscalls-1                   // A
+       ;;
+       lfetch [r18]                            // M0|1
+       mov r29=psr                             // M2 (12 cyc)
+       // If r17 is a NaT, p6 will be zero
+       cmp.geu p6,p7=r19,r17                   // A    (sysnr > 0 && sysnr < 
1024+NR_syscalls)?
+       ;;
+       mov r21=ar.fpsr                         // M2 (12 cyc)
+       tnat.nz p10,p9=r15                      // I0
+       mov.i r26=ar.pfs                        // I0 (would stall anyhow due 
to srlz.d...)
+       ;;
+       srlz.d                                  // M0 (forces split-issue) 
ensure PSR.BE==0
+(p6)   ld8 r18=[r18]                           // M0|1
+       nop.i 0
+       ;;
+       nop.m 0
+(p6)   tbit.z.unc p8,p0=r18,0                  // I0 (dual-issues with "mov 
b7=r18"!)
+       nop.i 0
+       ;;
+(p8)   ssm psr.i
+(p6)   mov b7=r18                              // I0
+(p8)   br.dptk.many b7                         // B
+
+       mov r27=ar.rsc                          // M2 (12 cyc)
+/*
+ * brl.cond doesn't work as intended because the linker would convert this 
branch
+ * into a branch to a PLT.  Perhaps there will be a way to avoid this with some
+ * future version of the linker.  In the meantime, we just use an indirect 
branch
+ * instead.
+ */
+#ifdef CONFIG_ITANIUM
+(p6)   add r14=-8,r14                          // r14 <- addr of 
fsys_bubble_down entry
+       ;;
+(p6)   ld8 r14=[r14]                           // r14 <- fsys_bubble_down
+       ;;
+(p6)   mov b7=r14
+(p6)   br.sptk.many b7
+#else
+       BRL_COND_FSYS_BUBBLE_DOWN(p6)
+#endif
+       ssm psr.i
+       mov r10=-1
+(p10)  mov r8=EINVAL
+(p9)   mov r8=ENOSYS
+       FSYS_RETURN
+END(__kernel_syscall_via_epc)
+
+#      define ARG0_OFF         (16 + IA64_SIGFRAME_ARG0_OFFSET)
+#      define ARG1_OFF         (16 + IA64_SIGFRAME_ARG1_OFFSET)
+#      define ARG2_OFF         (16 + IA64_SIGFRAME_ARG2_OFFSET)
+#      define SIGHANDLER_OFF   (16 + IA64_SIGFRAME_HANDLER_OFFSET)
+#      define SIGCONTEXT_OFF   (16 + IA64_SIGFRAME_SIGCONTEXT_OFFSET)
+
+#      define FLAGS_OFF        IA64_SIGCONTEXT_FLAGS_OFFSET
+#      define CFM_OFF          IA64_SIGCONTEXT_CFM_OFFSET
+#      define FR6_OFF          IA64_SIGCONTEXT_FR6_OFFSET
+#      define BSP_OFF          IA64_SIGCONTEXT_AR_BSP_OFFSET
+#      define RNAT_OFF         IA64_SIGCONTEXT_AR_RNAT_OFFSET
+#      define UNAT_OFF         IA64_SIGCONTEXT_AR_UNAT_OFFSET
+#      define FPSR_OFF         IA64_SIGCONTEXT_AR_FPSR_OFFSET
+#      define PR_OFF           IA64_SIGCONTEXT_PR_OFFSET
+#      define RP_OFF           IA64_SIGCONTEXT_IP_OFFSET
+#      define SP_OFF           IA64_SIGCONTEXT_R12_OFFSET
+#      define RBS_BASE_OFF     IA64_SIGCONTEXT_RBS_BASE_OFFSET
+#      define LOADRS_OFF       IA64_SIGCONTEXT_LOADRS_OFFSET
+#      define base0            r2
+#      define base1            r3
+       /*
+        * When we get here, the memory stack looks like this:
+        *
+        *   +===============================+
+                *   |                               |
+                *   //     struct sigframe          //
+                *   |                               |
+        *   +-------------------------------+ <-- sp+16
+        *   |      16 byte of scratch       |
+        *   |            space              |
+        *   +-------------------------------+ <-- sp
+        *
+        * The register stack looks _exactly_ the way it looked at the time the 
signal
+        * occurred.  In other words, we're treading on a potential mine-field: 
each
+        * incoming general register may be a NaT value (including sp, in which 
case the
+        * process ends up dying with a SIGSEGV).
+        *
+        * The first thing need to do is a cover to get the registers onto the 
backing
+        * store.  Once that is done, we invoke the signal handler which may 
modify some
+        * of the machine state.  After returning from the signal handler, we 
return
+        * control to the previous context by executing a sigreturn system 
call.  A signal
+        * handler may call the rt_sigreturn() function to directly return to a 
given
+        * sigcontext.  However, the user-level sigreturn() needs to do much 
more than
+        * calling the rt_sigreturn() system call as it needs to unwind the 
stack to
+        * restore preserved registers that may have been saved on the signal 
handler's
+        * call stack.
+        */
+
+#define SIGTRAMP_SAVES                                                         
                \
+       .unwabi 3, 's';         /* mark this as a sigtramp handler (saves 
scratch regs) */      \
+       .unwabi @svr4, 's'; /* backwards compatibility with old unwinders 
(remove in v2.7) */   \
+       .savesp ar.unat, UNAT_OFF+SIGCONTEXT_OFF;                               
                \
+       .savesp ar.fpsr, FPSR_OFF+SIGCONTEXT_OFF;                               
                \
+       .savesp pr, PR_OFF+SIGCONTEXT_OFF;                                      
                \
+       .savesp rp, RP_OFF+SIGCONTEXT_OFF;                                      
                \
+       .savesp ar.pfs, CFM_OFF+SIGCONTEXT_OFF;                                 
                \
+       .vframesp SP_OFF+SIGCONTEXT_OFF
+
+GLOBAL_ENTRY(__kernel_sigtramp)
+       // describe the state that is active when we get here:
+       .prologue
+       SIGTRAMP_SAVES
+       .body
+
+       .label_state 1
+
+       adds base0=SIGHANDLER_OFF,sp
+       adds base1=RBS_BASE_OFF+SIGCONTEXT_OFF,sp
+       br.call.sptk.many rp=1f
+1:
+       ld8 r17=[base0],(ARG0_OFF-SIGHANDLER_OFF)       // get pointer to 
signal handler's plabel
+       ld8 r15=[base1]                                 // get address of new 
RBS base (or NULL)
+       cover                           // push args in interrupted frame onto 
backing store
+       ;;
+       cmp.ne p1,p0=r15,r0             // do we need to switch rbs? (note: pr 
is saved by kernel)
+       mov.m r9=ar.bsp                 // fetch ar.bsp
+       .spillsp.p p1, ar.rnat, RNAT_OFF+SIGCONTEXT_OFF
+(p1)   br.cond.spnt setup_rbs          // yup -> (clobbers p8, r14-r16, and 
r18-r20)
+back_from_setup_rbs:
+       alloc r8=ar.pfs,0,0,3,0
+       ld8 out0=[base0],16             // load arg0 (signum)
+       adds base1=(ARG1_OFF-(RBS_BASE_OFF+SIGCONTEXT_OFF)),base1
+       ;;
+       ld8 out1=[base1]                // load arg1 (siginfop)
+       ld8 r10=[r17],8                 // get signal handler entry point
+       ;;
+       ld8 out2=[base0]                // load arg2 (sigcontextp)
+       ld8 gp=[r17]                    // get signal handler's global pointer
+       adds base0=(BSP_OFF+SIGCONTEXT_OFF),sp
+       ;;
+       .spillsp ar.bsp, BSP_OFF+SIGCONTEXT_OFF
+       st8 [base0]=r9                  // save sc_ar_bsp
+       adds base0=(FR6_OFF+SIGCONTEXT_OFF),sp
+       adds base1=(FR6_OFF+16+SIGCONTEXT_OFF),sp
+       ;;
+       stf.spill [base0]=f6,32
+       stf.spill [base1]=f7,32
+       ;;
+       stf.spill [base0]=f8,32
+       stf.spill [base1]=f9,32
+       mov b6=r10
+       ;;
+       stf.spill [base0]=f10,32
+       stf.spill [base1]=f11,32
+       ;;
+       stf.spill [base0]=f12,32
+       stf.spill [base1]=f13,32
+       ;;
+       stf.spill [base0]=f14,32
+       stf.spill [base1]=f15,32
+       br.call.sptk.many rp=b6                 // call the signal handler
+.ret0: adds base0=(BSP_OFF+SIGCONTEXT_OFF),sp
+       ;;
+       ld8 r15=[base0]                         // fetch sc_ar_bsp
+       mov r14=ar.bsp
+       ;;
+       cmp.ne p1,p0=r14,r15                    // do we need to restore the 
rbs?
+(p1)   br.cond.spnt restore_rbs                // yup -> (clobbers r14-r18, f6 
& f7)
+       ;;
+back_from_restore_rbs:
+       adds base0=(FR6_OFF+SIGCONTEXT_OFF),sp
+       adds base1=(FR6_OFF+16+SIGCONTEXT_OFF),sp
+       ;;
+       ldf.fill f6=[base0],32
+       ldf.fill f7=[base1],32
+       ;;
+       ldf.fill f8=[base0],32
+       ldf.fill f9=[base1],32
+       ;;
+       ldf.fill f10=[base0],32
+       ldf.fill f11=[base1],32
+       ;;
+       ldf.fill f12=[base0],32
+       ldf.fill f13=[base1],32
+       ;;
+       ldf.fill f14=[base0],32
+       ldf.fill f15=[base1],32
+       mov r15=__NR_rt_sigreturn
+       .restore sp                             // pop .prologue
+       break __BREAK_SYSCALL
+
+       .prologue
+       SIGTRAMP_SAVES
+setup_rbs:
+       mov ar.rsc=0                            // put RSE into enforced lazy 
mode
+       ;;
+       .save ar.rnat, r19
+       mov r19=ar.rnat                         // save RNaT before switching 
backing store area
+       adds r14=(RNAT_OFF+SIGCONTEXT_OFF),sp
+
+       mov r18=ar.bspstore
+       mov ar.bspstore=r15                     // switch over to new register 
backing store area
+       ;;
+
+       .spillsp ar.rnat, RNAT_OFF+SIGCONTEXT_OFF
+       st8 [r14]=r19                           // save sc_ar_rnat
+       .body
+       mov.m r16=ar.bsp                        // sc_loadrs <- (new bsp - new 
bspstore) << 16
+       adds r14=(LOADRS_OFF+SIGCONTEXT_OFF),sp
+       ;;
+       invala
+       sub r15=r16,r15
+       extr.u r20=r18,3,6
+       ;;
+       mov ar.rsc=0xf                          // set RSE into eager mode, pl 3
+       cmp.eq p8,p0=63,r20
+       shl r15=r15,16
+       ;;
+       st8 [r14]=r15                           // save sc_loadrs
+(p8)   st8 [r18]=r19           // if bspstore points at RNaT slot, store RNaT 
there now
+       .restore sp                             // pop .prologue
+       br.cond.sptk back_from_setup_rbs
+
+       .prologue
+       SIGTRAMP_SAVES
+       .spillsp ar.rnat, RNAT_OFF+SIGCONTEXT_OFF
+       .body
+restore_rbs:
+       // On input:
+       //      r14 = bsp1 (bsp at the time of return from signal handler)
+       //      r15 = bsp0 (bsp at the time the signal occurred)
+       //
+       // Here, we need to calculate bspstore0, the value that ar.bspstore 
needs
+       // to be set to, based on bsp0 and the size of the dirty partition on
+       // the alternate stack (sc_loadrs >> 16).  This can be done with the
+       // following algorithm:
+       //
+       //  bspstore0 = rse_skip_regs(bsp0, -rse_num_regs(bsp1 - (loadrs >> 
19), bsp1));
+       //
+       // This is what the code below does.
+       //
+       alloc r2=ar.pfs,0,0,0,0                 // alloc null frame
+       adds r16=(LOADRS_OFF+SIGCONTEXT_OFF),sp
+       adds r18=(RNAT_OFF+SIGCONTEXT_OFF),sp
+       ;;
+       ld8 r17=[r16]
+       ld8 r16=[r18]                   // get new rnat
+       extr.u r18=r15,3,6      // r18 <- rse_slot_num(bsp0)
+       ;;
+       mov ar.rsc=r17                  // put RSE into enforced lazy mode
+       shr.u r17=r17,16
+       ;;
+       sub r14=r14,r17         // r14 (bspstore1) <- bsp1 - (sc_loadrs >> 16)
+       shr.u r17=r17,3         // r17 <- (sc_loadrs >> 19)
+       ;;
+       loadrs                  // restore dirty partition
+       extr.u r14=r14,3,6      // r14 <- rse_slot_num(bspstore1)
+       ;;
+       add r14=r14,r17         // r14 <- rse_slot_num(bspstore1) + (sc_loadrs 
>> 19)
+       ;;
+       shr.u r14=r14,6         // r14 <- (rse_slot_num(bspstore1) + (sc_loadrs 
>> 19))/0x40
+       ;;
+       sub r14=r14,r17         // r14 <- -rse_num_regs(bspstore1, bsp1)
+       movl r17=0x8208208208208209
+       ;;
+       add r18=r18,r14         // r18 (delta) <- rse_slot_num(bsp0) - 
rse_num_regs(bspstore1,bsp1)
+       setf.sig f7=r17
+       cmp.lt p7,p0=r14,r0     // p7 <- (r14 < 0)?
+       ;;
+(p7)   adds r18=-62,r18        // delta -= 62
+       ;;
+       setf.sig f6=r18
+       ;;
+       xmpy.h f6=f6,f7
+       ;;
+       getf.sig r17=f6
+       ;;
+       add r17=r17,r18
+       shr r18=r18,63
+       ;;
+       shr r17=r17,5
+       ;;
+       sub r17=r17,r18         // r17 = delta/63
+       ;;
+       add r17=r14,r17         // r17 <- delta/63 - rse_num_regs(bspstore1, 
bsp1)
+       ;;
+       shladd r15=r17,3,r15    // r15 <- bsp0 + 8*(delta/63 - 
rse_num_regs(bspstore1, bsp1))
+       ;;
+       mov ar.bspstore=r15                     // switch back to old register 
backing store area
+       ;;
+       mov ar.rnat=r16                         // restore RNaT
+       mov ar.rsc=0xf                          // (will be restored later on 
from sc_ar_rsc)
+       // invala not necessary as that will happen when returning to user-mode
+       br.cond.sptk back_from_restore_rbs
+END(__kernel_sigtramp)
diff -r 70ee75d5c12c -r 614deef19299 
linux-2.6-xen-sparse/arch/ia64/kernel/gate.lds.S
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/linux-2.6-xen-sparse/arch/ia64/kernel/gate.lds.S  Mon Jul 24 13:04:40 
2006 -0600
@@ -0,0 +1,95 @@
+/*
+ * Linker script for gate DSO.  The gate pages are an ELF shared object 
prelinked to its
+ * virtual address, with only one read-only segment and one execute-only 
segment (both fit
+ * in one page).  This script controls its layout.
+ */
+
+#include <linux/config.h>
+
+#include <asm/system.h>
+
+SECTIONS
+{
+  . = GATE_ADDR + SIZEOF_HEADERS;
+
+  .hash                                : { *(.hash) }                          
:readable
+  .dynsym                      : { *(.dynsym) }
+  .dynstr                      : { *(.dynstr) }
+  .gnu.version                 : { *(.gnu.version) }
+  .gnu.version_d               : { *(.gnu.version_d) }
+  .gnu.version_r               : { *(.gnu.version_r) }
+  .dynamic                     : { *(.dynamic) }                       
:readable :dynamic
+
+  /*
+   * This linker script is used both with -r and with -shared.  For the 
layouts to match,
+   * we need to skip more than enough space for the dynamic symbol table et 
al.  If this
+   * amount is insufficient, ld -shared will barf.  Just increase it here.
+   */
+  . = GATE_ADDR + 0x500;
+
+  .data.patch                  : {
+                                   __start_gate_mckinley_e9_patchlist = .;
+                                   *(.data.patch.mckinley_e9)
+                                   __end_gate_mckinley_e9_patchlist = .;
+
+                                   __start_gate_vtop_patchlist = .;
+                                   *(.data.patch.vtop)
+                                   __end_gate_vtop_patchlist = .;
+
+                                   __start_gate_fsyscall_patchlist = .;
+                                   *(.data.patch.fsyscall_table)
+                                   __end_gate_fsyscall_patchlist = .;
+
+                                   __start_gate_brl_fsys_bubble_down_patchlist 
= .;
+                                   *(.data.patch.brl_fsys_bubble_down)
+                                   __end_gate_brl_fsys_bubble_down_patchlist = 
.;
+  }                                                                    
:readable
+  .IA_64.unwind_info           : { *(.IA_64.unwind_info*) }
+  .IA_64.unwind                        : { *(.IA_64.unwind*) }                 
:readable :unwind
+#ifdef HAVE_BUGGY_SEGREL
+  .text (GATE_ADDR + PAGE_SIZE)        : { *(.text) *(.text.*) }               
:readable
+#else
+  . = ALIGN (PERCPU_PAGE_SIZE) + (. & (PERCPU_PAGE_SIZE - 1));
+  .text                                : { *(.text) *(.text.*) }               
:epc
+#endif
+
+  /DISCARD/                    : {
+       *(.got.plt) *(.got)
+       *(.data .data.* .gnu.linkonce.d.*)
+       *(.dynbss)
+       *(.bss .bss.* .gnu.linkonce.b.*)
+       *(__ex_table)
+  }
+}
+
+/*
+ * We must supply the ELF program headers explicitly to get just one
+ * PT_LOAD segment, and set the flags explicitly to make segments read-only.
+ */
+PHDRS
+{
+  readable  PT_LOAD    FILEHDR PHDRS   FLAGS(4);       /* PF_R */
+#ifndef HAVE_BUGGY_SEGREL
+  epc      PT_LOAD     FILEHDR PHDRS   FLAGS(1);       /* PF_X */
+#endif
+  dynamic   PT_DYNAMIC                 FLAGS(4);       /* PF_R */
+  unwind    0x70000001; /* PT_IA_64_UNWIND, but ld doesn't match the name */
+}
+
+/*
+ * This controls what symbols we export from the DSO.
+ */
+VERSION
+{
+  LINUX_2.5 {
+    global:
+       __kernel_syscall_via_break;
+       __kernel_syscall_via_epc;
+       __kernel_sigtramp;
+
+    local: *;
+  };
+}
+
+/* The ELF entry point can be used to set the AT_SYSINFO value.  */
+ENTRY(__kernel_syscall_via_epc)
diff -r 70ee75d5c12c -r 614deef19299 
linux-2.6-xen-sparse/arch/ia64/kernel/patch.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/linux-2.6-xen-sparse/arch/ia64/kernel/patch.c     Mon Jul 24 13:04:40 
2006 -0600
@@ -0,0 +1,197 @@
+/*
+ * Instruction-patching support.
+ *
+ * Copyright (C) 2003 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ */
+#include <linux/init.h>
+#include <linux/string.h>
+
+#include <asm/patch.h>
+#include <asm/processor.h>
+#include <asm/sections.h>
+#include <asm/system.h>
+#include <asm/unistd.h>
+
+/*
+ * This was adapted from code written by Tony Luck:
+ *
+ * The 64-bit value in a "movl reg=value" is scattered between the two words 
of the bundle
+ * like this:
+ *
+ * 6  6         5         4         3         2         1
+ * 3210987654321098765432109876543210987654321098765432109876543210
+ * ABBBBBBBBBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCCCCDEEEEEFFFFFFFFFGGGGGGG
+ *
+ * CCCCCCCCCCCCCCCCCCxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+ * xxxxAFFFFFFFFFEEEEEDxGGGGGGGxxxxxxxxxxxxxBBBBBBBBBBBBBBBBBBBBBBB
+ */
+static u64
+get_imm64 (u64 insn_addr)
+{
+       u64 *p = (u64 *) (insn_addr & -16);     /* mask out slot number */
+
+       return ( (p[1] & 0x0800000000000000UL) << 4)  | /*A*/
+               ((p[1] & 0x00000000007fffffUL) << 40) | /*B*/
+               ((p[0] & 0xffffc00000000000UL) >> 24) | /*C*/
+               ((p[1] & 0x0000100000000000UL) >> 23) | /*D*/
+               ((p[1] & 0x0003e00000000000UL) >> 29) | /*E*/
+               ((p[1] & 0x07fc000000000000UL) >> 43) | /*F*/
+               ((p[1] & 0x000007f000000000UL) >> 36);  /*G*/
+}
+
+/* Patch instruction with "val" where "mask" has 1 bits. */
+void
+ia64_patch (u64 insn_addr, u64 mask, u64 val)
+{
+       u64 m0, m1, v0, v1, b0, b1, *b = (u64 *) (insn_addr & -16);
+#      define insn_mask ((1UL << 41) - 1)
+       unsigned long shift;
+
+       b0 = b[0]; b1 = b[1];
+       shift = 5 + 41 * (insn_addr % 16); /* 5 bits of template, then 3 x 
41-bit instructions */
+       if (shift >= 64) {
+               m1 = mask << (shift - 64);
+               v1 = val << (shift - 64);
+       } else {
+               m0 = mask << shift; m1 = mask >> (64 - shift);
+               v0 = val  << shift; v1 = val >> (64 - shift);
+               b[0] = (b0 & ~m0) | (v0 & m0);
+       }
+       b[1] = (b1 & ~m1) | (v1 & m1);
+}
+
+void
+ia64_patch_imm64 (u64 insn_addr, u64 val)
+{
+       /* The assembler may generate offset pointing to either slot 1
+          or slot 2 for a long (2-slot) instruction, occupying slots 1
+          and 2.  */
+       insn_addr &= -16UL;
+       ia64_patch(insn_addr + 2,
+                  0x01fffefe000UL, (  ((val & 0x8000000000000000UL) >> 27) /* 
bit 63 -> 36 */
+                                    | ((val & 0x0000000000200000UL) <<  0) /* 
bit 21 -> 21 */
+                                    | ((val & 0x00000000001f0000UL) <<  6) /* 
bit 16 -> 22 */
+                                    | ((val & 0x000000000000ff80UL) << 20) /* 
bit  7 -> 27 */
+                                    | ((val & 0x000000000000007fUL) << 13) /* 
bit  0 -> 13 */));
+       ia64_patch(insn_addr + 1, 0x1ffffffffffUL, val >> 22);
+}
+
+void
+ia64_patch_imm60 (u64 insn_addr, u64 val)
+{
+       /* The assembler may generate offset pointing to either slot 1
+          or slot 2 for a long (2-slot) instruction, occupying slots 1
+          and 2.  */
+       insn_addr &= -16UL;
+       ia64_patch(insn_addr + 2,
+                  0x011ffffe000UL, (  ((val & 0x0800000000000000UL) >> 23) /* 
bit 59 -> 36 */
+                                    | ((val & 0x00000000000fffffUL) << 13) /* 
bit  0 -> 13 */));
+       ia64_patch(insn_addr + 1, 0x1fffffffffcUL, val >> 18);
+}
+
+/*
+ * We need sometimes to load the physical address of a kernel
+ * object.  Often we can convert the virtual address to physical
+ * at execution time, but sometimes (either for performance reasons
+ * or during error recovery) we cannot to this.  Patch the marked
+ * bundles to load the physical address.
+ */
+void __init
+ia64_patch_vtop (unsigned long start, unsigned long end)
+{
+       s32 *offp = (s32 *) start;
+       u64 ip;
+
+       while (offp < (s32 *) end) {
+               ip = (u64) offp + *offp;
+
+               /* replace virtual address with corresponding physical address: 
*/
+               ia64_patch_imm64(ip, ia64_tpa(get_imm64(ip)));
+               ia64_fc((void *) ip);
+               ++offp;
+       }
+       ia64_sync_i();
+       ia64_srlz_i();
+}
+
+void
+ia64_patch_mckinley_e9 (unsigned long start, unsigned long end)
+{
+       static int first_time = 1;
+       int need_workaround;
+       s32 *offp = (s32 *) start;
+       u64 *wp;
+
+       need_workaround = (local_cpu_data->family == 0x1f && 
local_cpu_data->model == 0);
+
+       if (first_time) {
+               first_time = 0;
+               if (need_workaround)
+                       printk(KERN_INFO "Leaving McKinley Errata 9 workaround 
enabled\n");
+               else
+                       printk(KERN_INFO "McKinley Errata 9 workaround not 
needed; "
+                              "disabling it\n");
+       }
+       if (need_workaround)
+               return;
+
+       while (offp < (s32 *) end) {
+               wp = (u64 *) ia64_imva((char *) offp + *offp);
+               wp[0] = 0x0000000100000000UL; /* nop.m 0; nop.i 0; nop.i 0 */
+               wp[1] = 0x0004000000000200UL;
+               wp[2] = 0x0000000100000011UL; /* nop.m 0; nop.i 0; 
br.ret.sptk.many b6 */
+               wp[3] = 0x0084006880000200UL;
+               ia64_fc(wp); ia64_fc(wp + 2);
+               ++offp;
+       }
+       ia64_sync_i();
+       ia64_srlz_i();
+}
+
+static void
+patch_fsyscall_table (unsigned long start, unsigned long end)
+{
+       extern unsigned long fsyscall_table[NR_syscalls];
+       s32 *offp = (s32 *) start;
+       u64 ip;
+
+       while (offp < (s32 *) end) {
+               ip = (u64) ia64_imva((char *) offp + *offp);
+               ia64_patch_imm64(ip, (u64) fsyscall_table);
+               ia64_fc((void *) ip);
+               ++offp;
+       }
+       ia64_sync_i();
+       ia64_srlz_i();
+}
+
+static void
+patch_brl_fsys_bubble_down (unsigned long start, unsigned long end)
+{
+       extern char fsys_bubble_down[];
+       s32 *offp = (s32 *) start;
+       u64 ip;
+
+       while (offp < (s32 *) end) {
+               ip = (u64) offp + *offp;
+               ia64_patch_imm60((u64) ia64_imva((void *) ip),
+                                (u64) (fsys_bubble_down - (ip & -16)) / 16);
+               ia64_fc((void *) ip);
+               ++offp;
+       }
+       ia64_sync_i();
+       ia64_srlz_i();
+}
+
+void
+ia64_patch_gate (void)
+{
+#      define START(name)      ((unsigned long) 
__start_gate_##name##_patchlist)
+#      define END(name)        ((unsigned long)__end_gate_##name##_patchlist)
+
+       patch_fsyscall_table(START(fsyscall), END(fsyscall));
+       patch_brl_fsys_bubble_down(START(brl_fsys_bubble_down), 
END(brl_fsys_bubble_down));
+       ia64_patch_vtop(START(vtop), END(vtop));
+       ia64_patch_mckinley_e9(START(mckinley_e9), END(mckinley_e9));
+}

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] [IA64] vDSO paravirtualization: import linux files, Xen patchbot-unstable <=