|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 3/3] x86/entry: Make intra-funciton symbols properly local
Each of these symbols are local to their main function. By not having them
globally visible, livepatch's binary diffing logic can reason about the
functions properly.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
CC: Wei Liu <wl@xxxxxxx>
CC: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
CC: Ross Lagerwall <ross.lagerwall@xxxxxxxxxx>
sysenter_eflags_saved() is an open question. This does need to be globally
visible, and I think any livepatch modifying sysenter_entry() will need a
manual adjustment to do_debug() to update the reference there.
---
xen/arch/x86/x86_64/compat/entry.S | 20 ++++++++++----------
xen/arch/x86/x86_64/entry.S | 22 +++++++++++-----------
2 files changed, 21 insertions(+), 21 deletions(-)
diff --git a/xen/arch/x86/x86_64/compat/entry.S
b/xen/arch/x86/x86_64/compat/entry.S
index 4fbd89cea1a9..1e9e0455eaf3 100644
--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
@@ -41,7 +41,7 @@ FUNC(compat_test_all_events)
shll $IRQSTAT_shift,%eax
leaq irq_stat+IRQSTAT_softirq_pending(%rip),%rcx
cmpl $0,(%rcx,%rax,1)
- jne compat_process_softirqs
+ jne .L_compat_process_softirqs
/* Inject exception if pending. */
lea VCPU_trap_bounce(%rbx), %rdx
@@ -49,11 +49,11 @@ FUNC(compat_test_all_events)
jnz .Lcompat_process_trapbounce
cmpb $0, VCPU_mce_pending(%rbx)
- jne compat_process_mce
+ jne .L_compat_process_mce
.Lcompat_test_guest_nmi:
cmpb $0, VCPU_nmi_pending(%rbx)
- jne compat_process_nmi
-compat_test_guest_events:
+ jne .L_compat_process_nmi
+.L_compat_test_guest_events:
movq VCPU_vcpu_info(%rbx),%rax
movzwl COMPAT_VCPUINFO_upcall_pending(%rax),%eax
decl %eax
@@ -71,7 +71,7 @@ compat_test_guest_events:
jmp compat_test_all_events
/* %rbx: struct vcpu */
-LABEL_LOCAL(compat_process_softirqs)
+LABEL_LOCAL(.L_compat_process_softirqs)
sti
call do_softirq
jmp compat_test_all_events
@@ -84,7 +84,7 @@ LABEL_LOCAL(.Lcompat_process_trapbounce)
jmp compat_test_all_events
/* %rbx: struct vcpu */
-LABEL_LOCAL(compat_process_mce)
+LABEL_LOCAL(.L_compat_process_mce)
testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%rbx)
jnz .Lcompat_test_guest_nmi
sti
@@ -96,12 +96,12 @@ LABEL_LOCAL(compat_process_mce)
movb %dl,VCPU_mce_old_mask(%rbx) # iret hypercall
orl $1 << VCPU_TRAP_MCE,%edx
movb %dl,VCPU_async_exception_mask(%rbx)
- jmp compat_process_trap
+ jmp .L_compat_process_trap
/* %rbx: struct vcpu */
-LABEL_LOCAL(compat_process_nmi)
+LABEL_LOCAL(.L_compat_process_nmi)
testb $1 << VCPU_TRAP_NMI,VCPU_async_exception_mask(%rbx)
- jnz compat_test_guest_events
+ jnz .L_compat_test_guest_events
sti
movb $0, VCPU_nmi_pending(%rbx)
call set_guest_nmi_trapbounce
@@ -112,7 +112,7 @@ LABEL_LOCAL(compat_process_nmi)
orl $1 << VCPU_TRAP_NMI,%edx
movb %dl,VCPU_async_exception_mask(%rbx)
/* FALLTHROUGH */
-compat_process_trap:
+.L_compat_process_trap:
leaq VCPU_trap_bounce(%rbx),%rdx
call compat_create_bounce_frame
jmp compat_test_all_events
diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
index fc64ef1fd460..130462ba0e1a 100644
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -188,7 +188,7 @@ FUNC_LOCAL(restore_all_guest)
RESTORE_ALL
testw $TRAP_syscall,4(%rsp)
- jz iret_exit_to_guest
+ jz .L_iret_exit_to_guest
movq 24(%rsp),%r11 # RFLAGS
andq $~(X86_EFLAGS_IOPL | X86_EFLAGS_VM), %r11
@@ -220,7 +220,7 @@ FUNC_LOCAL(restore_all_guest)
LABEL_LOCAL(.Lrestore_rcx_iret_exit_to_guest)
movq 8(%rsp), %rcx # RIP
/* No special register assumptions. */
-iret_exit_to_guest:
+.L_iret_exit_to_guest:
andl $~(X86_EFLAGS_IOPL | X86_EFLAGS_VM), 24(%rsp)
orl $X86_EFLAGS_IF,24(%rsp)
addq $8,%rsp
@@ -432,10 +432,10 @@ UNLIKELY_END(msi_check)
cmove %rdi, %rdx
test %rdx, %rdx
- jz int80_slow_path
+ jz .L_int80_slow_path
#else
test %rdi, %rdi
- jz int80_slow_path
+ jz .L_int80_slow_path
#endif
/* Construct trap_bounce from trap_ctxt[0x80]. */
@@ -457,7 +457,7 @@ UNLIKELY_END(msi_check)
call create_bounce_frame
jmp test_all_events
-int80_slow_path:
+.L_int80_slow_path:
/*
* Setup entry vector and error code as if this was a GPF caused by an
* IDT entry with DPL==0.
@@ -472,7 +472,7 @@ int80_slow_path:
* need to set up %r14 here, while %r15 is required to still be zero.
*/
GET_STACK_END(14)
- jmp handle_exception_saved
+ jmp .L_handle_exception_saved
END(entry_int80)
/* create_bounce_frame & helpers don't need to be in .text.entry */
@@ -750,10 +750,10 @@ FUNC(handle_exception, 0)
cmovnz %r12d, %r13d
.Lxcpt_cr3_okay:
-handle_exception_saved:
+.L_handle_exception_saved:
GET_CURRENT(bx)
testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%rsp)
- jz exception_with_ints_disabled
+ jz .L_exception_with_ints_disabled
#if defined(CONFIG_PV32)
ALTERNATIVE_2 "jmp .Lcr4_pv32_done", \
@@ -859,9 +859,9 @@ handle_exception_saved:
#endif
/* No special register assumptions. */
-exception_with_ints_disabled:
+.L_exception_with_ints_disabled:
testb $3,UREGS_cs(%rsp) # interrupts disabled outside Xen?
- jnz FATAL_exception_with_ints_disabled
+ jnz .L_FATAL_exception_with_ints_disabled
movq %rsp,%rdi
call search_pre_exception_table
testq %rax,%rax # no fixup code for faulting EIP?
@@ -891,7 +891,7 @@ exception_with_ints_disabled:
jmp restore_all_xen # return to fixup code
/* No special register assumptions. */
-FATAL_exception_with_ints_disabled:
+.L_FATAL_exception_with_ints_disabled:
xorl %esi,%esi
movq %rsp,%rdi
tailcall fatal_trap
--
2.30.2
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |