[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC v2 08/23] x86/entry/64: Adapt assembly for PIE support



Change the assembly code to use only relative references of symbols for the
kernel to be PIE compatible.

Position Independent Executable (PIE) support will allow to extended the
KASLR randomization range below the -2G memory limit.

Signed-off-by: Thomas Garnier <thgarnie@xxxxxxxxxx>
---
 arch/x86/entry/entry_64.S | 22 +++++++++++++++-------
 1 file changed, 15 insertions(+), 7 deletions(-)

diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index daf8936d0628..a3967a2af6ec 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -199,12 +199,15 @@ entry_SYSCALL_64_fastpath:
        ja      1f                              /* return -ENOSYS (already in 
pt_regs->ax) */
        movq    %r10, %rcx
 
+       /* Ensures the call is position independent */
+       leaq    sys_call_table(%rip), %r11
+
        /*
         * This call instruction is handled specially in stub_ptregs_64.
         * It might end up jumping to the slow path.  If it jumps, RAX
         * and all argument registers are clobbered.
         */
-       call    *sys_call_table(, %rax, 8)
+       call    *(%r11, %rax, 8)
 .Lentry_SYSCALL_64_after_fastpath_call:
 
        movq    %rax, RAX(%rsp)
@@ -339,7 +342,8 @@ ENTRY(stub_ptregs_64)
         * RAX stores a pointer to the C function implementing the syscall.
         * IRQs are on.
         */
-       cmpq    $.Lentry_SYSCALL_64_after_fastpath_call, (%rsp)
+       leaq    .Lentry_SYSCALL_64_after_fastpath_call(%rip), %r11
+       cmpq    %r11, (%rsp)
        jne     1f
 
        /*
@@ -1210,7 +1214,8 @@ ENTRY(error_entry)
        movl    %ecx, %eax                      /* zero extend */
        cmpq    %rax, RIP+8(%rsp)
        je      .Lbstep_iret
-       cmpq    $.Lgs_change, RIP+8(%rsp)
+       leaq    .Lgs_change(%rip), %rcx
+       cmpq    %rcx, RIP+8(%rsp)
        jne     .Lerror_entry_done
 
        /*
@@ -1430,10 +1435,10 @@ ENTRY(nmi)
         * resume the outer NMI.
         */
 
-       movq    $repeat_nmi, %rdx
+       leaq    repeat_nmi(%rip), %rdx
        cmpq    8(%rsp), %rdx
        ja      1f
-       movq    $end_repeat_nmi, %rdx
+       leaq    end_repeat_nmi(%rip), %rdx
        cmpq    8(%rsp), %rdx
        ja      nested_nmi_out
 1:
@@ -1487,7 +1492,8 @@ nested_nmi:
        pushq   %rdx
        pushfq
        pushq   $__KERNEL_CS
-       pushq   $repeat_nmi
+       leaq    repeat_nmi(%rip), %rdx
+       pushq   %rdx
 
        /* Put stack back */
        addq    $(6*8), %rsp
@@ -1526,7 +1532,9 @@ first_nmi:
        addq    $8, (%rsp)      /* Fix up RSP */
        pushfq                  /* RFLAGS */
        pushq   $__KERNEL_CS    /* CS */
-       pushq   $1f             /* RIP */
+       pushq   %rax            /* Support Position Independent Code */
+       leaq    1f(%rip), %rax  /* RIP */
+       xchgq   %rax, (%rsp)    /* Restore RAX, put 1f */
        INTERRUPT_RETURN        /* continues at repeat_nmi below */
        UNWIND_HINT_IRET_REGS
 1:
-- 
2.14.0.434.g98096fd7a8-goog


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.