[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v6 1/7] xen/arm: Introduce alternative runtime patching
On Wed, Jul 27, 2016 at 05:37:05PM +0100, Julien Grall wrote: > Some of the processor erratum will require to modify code sequence. > As those modifications may impact the performance, they should only > be enabled on affected cores. Furthermore, Xen may also want to take > advantage of new hardware features coming up with v8.1 and v8.2. > > This patch adds an infrastructure to patch Xen during boot time > depending on the "features" available on the platform. > > This code is based on the file arch/arm64/kernel/alternative.c in > Linux 4.6-rc3. Any references to arm64 have been dropped to make the > code as generic as possible. > > When Xen is creating the page tables, all the executable sections > (.text and .init.text) will be marked read-only and then enforced by > setting SCTLR.WNX. > > Whilst it might be possible to mark those entries read-only after > Xen has been patched, we would need extra care to avoid possible > TLBs conflicts (see D4-1732 in ARM DDI 0487A.i) as all > physical CPUs will be running. > > All the physical CPUs have to be brought up before patching Xen because > each cores may have different errata/features which require code > patching. The only way to find them is to probe system registers on > each CPU. > > To avoid extra complexity, it is possible to create a temporary > writeable mapping with vmap. This mapping will be used to write the > new instructions. > > Lastly, runtime patching is currently not necessary for ARM32. So the > code is only enabled for ARM64. > > Note that the header asm-arm/alternative.h is a verbatim copy for the > Linux one (arch/arm64/include/asm/alternative.h). It may contain > innacurate comments, but I did not touch them for now. > > Signed-off-by: Julien Grall <julien.grall@xxxxxxx> > > --- > Changes in v6: > - Update some messages > - Constify some structure > - Add an ASSERT in apply_alternatives apply_alternatives_all actually. > - Mention in the commit message that alternative is a verbatim > copy of the Linux headers. Thank you. With that Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx> > > Changes in v5: > - Rebase on the latest staging > > Changes in v4: > - Fix build when ALTERNATIVE is not enabled > - Fix typo > - Move .altinstructions in init.data > > Changes in v3: > - .altinstr_replacement should live in init.text > - Add a comment to explain when apply_alternatives_all should be > called. > > Changes in v2: > - Use hard tabs in Kconfig > - Update the copyright year > - Update the commit message to explain the extra mapping > --- > xen/arch/arm/Kconfig | 5 + > xen/arch/arm/Makefile | 1 + > xen/arch/arm/alternative.c | 224 > ++++++++++++++++++++++++++++++++++++++ > xen/arch/arm/setup.c | 7 ++ > xen/arch/arm/xen.lds.S | 9 ++ > xen/include/asm-arm/alternative.h | 165 ++++++++++++++++++++++++++++ > xen/include/asm-arm/arm64/insn.h | 16 +++ > 7 files changed, 427 insertions(+) > create mode 100644 xen/arch/arm/alternative.c > create mode 100644 xen/include/asm-arm/alternative.h > > diff --git a/xen/arch/arm/Kconfig b/xen/arch/arm/Kconfig > index 6231cd5..0141bd9 100644 > --- a/xen/arch/arm/Kconfig > +++ b/xen/arch/arm/Kconfig > @@ -14,6 +14,7 @@ config ARM_64 > def_bool y > depends on 64BIT > select HAS_GICV3 > + select ALTERNATIVE > > config ARM > def_bool y > @@ -46,6 +47,10 @@ config ACPI > config HAS_GICV3 > bool > > +# Select ALTERNATIVE if the architecture supports runtime patching > +config ALTERNATIVE > + bool > + > endmenu > > source "common/Kconfig" > diff --git a/xen/arch/arm/Makefile b/xen/arch/arm/Makefile > index b264ed4..74bd7b8 100644 > --- a/xen/arch/arm/Makefile > +++ b/xen/arch/arm/Makefile > @@ -4,6 +4,7 @@ subdir-y += platforms > subdir-$(CONFIG_ARM_64) += efi > subdir-$(CONFIG_ACPI) += acpi > > +obj-$(CONFIG_ALTERNATIVE) += alternative.o > obj-y += bootfdt.o > obj-y += cpu.o > obj-y += cpufeature.o > diff --git a/xen/arch/arm/alternative.c b/xen/arch/arm/alternative.c > new file mode 100644 > index 0000000..8ee5a11 > --- /dev/null > +++ b/xen/arch/arm/alternative.c > @@ -0,0 +1,224 @@ > +/* > + * alternative runtime patching > + * inspired by the x86 version > + * > + * Copyright (C) 2014-2016 ARM Ltd. > + * > + * This program is free software; you can redistribute it and/or modify > + * it under the terms of the GNU General Public License version 2 as > + * published by the Free Software Foundation. > + * > + * This program is distributed in the hope that it will be useful, > + * but WITHOUT ANY WARRANTY; without even the implied warranty of > + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the > + * GNU General Public License for more details. > + * > + * You should have received a copy of the GNU General Public License > + * along with this program. If not, see <http://www.gnu.org/licenses/>. > + */ > + > +#include <xen/config.h> > +#include <xen/init.h> > +#include <xen/types.h> > +#include <xen/kernel.h> > +#include <xen/mm.h> > +#include <xen/vmap.h> > +#include <xen/smp.h> > +#include <xen/stop_machine.h> > +#include <asm/alternative.h> > +#include <asm/atomic.h> > +#include <asm/byteorder.h> > +#include <asm/cpufeature.h> > +#include <asm/insn.h> > +#include <asm/page.h> > + > +#define __ALT_PTR(a,f) (u32 *)((void *)&(a)->f + (a)->f) > +#define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset) > +#define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset) > + > +extern const struct alt_instr __alt_instructions[], __alt_instructions_end[]; > + > +struct alt_region { > + const struct alt_instr *begin; > + const struct alt_instr *end; > +}; > + > +/* > + * Check if the target PC is within an alternative block. > + */ > +static bool_t branch_insn_requires_update(const struct alt_instr *alt, > + unsigned long pc) > +{ > + unsigned long replptr; > + > + if ( is_active_kernel_text(pc) ) > + return 1; > + > + replptr = (unsigned long)ALT_REPL_PTR(alt); > + if ( pc >= replptr && pc <= (replptr + alt->alt_len) ) > + return 0; > + > + /* > + * Branching into *another* alternate sequence is doomed, and > + * we're not even trying to fix it up. > + */ > + BUG(); > +} > + > +static u32 get_alt_insn(const struct alt_instr *alt, > + const u32 *insnptr, const u32 *altinsnptr) > +{ > + u32 insn; > + > + insn = le32_to_cpu(*altinsnptr); > + > + if ( insn_is_branch_imm(insn) ) > + { > + s32 offset = insn_get_branch_offset(insn); > + unsigned long target; > + > + target = (unsigned long)altinsnptr + offset; > + > + /* > + * If we're branching inside the alternate sequence, > + * do not rewrite the instruction, as it is already > + * correct. Otherwise, generate the new instruction. > + */ > + if ( branch_insn_requires_update(alt, target) ) > + { > + offset = target - (unsigned long)insnptr; > + insn = insn_set_branch_offset(insn, offset); > + } > + } > + > + return insn; > +} > + > +static int __apply_alternatives(const struct alt_region *region) > +{ > + const struct alt_instr *alt; > + const u32 *origptr, *replptr; > + u32 *writeptr, *writemap; > + mfn_t text_mfn = _mfn(virt_to_mfn(_stext)); > + unsigned int text_order = get_order_from_bytes(_end - _start); > + > + printk(XENLOG_INFO "alternatives: Patching kernel code\n"); > + > + /* > + * The text section is read-only. So re-map Xen to be able to patch > + * the code. > + */ > + writemap = __vmap(&text_mfn, 1 << text_order, 1, 1, PAGE_HYPERVISOR, > + VMAP_DEFAULT); > + if ( !writemap ) > + { > + printk(XENLOG_ERR "alternatives: Unable to map the text section > (size %u)\n", > + 1 << text_order); > + return -ENOMEM; > + } > + > + for ( alt = region->begin; alt < region->end; alt++ ) > + { > + u32 insn; > + int i, nr_inst; > + > + if ( !cpus_have_cap(alt->cpufeature) ) > + continue; > + > + BUG_ON(alt->alt_len != alt->orig_len); > + > + origptr = ALT_ORIG_PTR(alt); > + writeptr = origptr - (u32 *)_start + writemap; > + replptr = ALT_REPL_PTR(alt); > + > + nr_inst = alt->alt_len / sizeof(insn); > + > + for ( i = 0; i < nr_inst; i++ ) > + { > + insn = get_alt_insn(alt, origptr + i, replptr + i); > + *(writeptr + i) = cpu_to_le32(insn); > + } > + > + /* Ensure the new instructions reached the memory and nuke */ > + clean_and_invalidate_dcache_va_range(writeptr, > + (sizeof (*writeptr) * nr_inst)); > + } > + > + /* Nuke the instruction cache */ > + invalidate_icache(); > + > + vunmap(writemap); > + > + return 0; > +} > + > +/* > + * We might be patching the stop_machine state machine, so implement a > + * really simple polling protocol here. > + */ > +static int __apply_alternatives_multi_stop(void *unused) > +{ > + static int patched = 0; > + const struct alt_region region = { > + .begin = __alt_instructions, > + .end = __alt_instructions_end, > + }; > + > + /* We always have a CPU 0 at this point (__init) */ > + if ( smp_processor_id() ) > + { > + while ( !read_atomic(&patched) ) > + cpu_relax(); > + isb(); > + } > + else > + { > + int ret; > + > + BUG_ON(patched); > + ret = __apply_alternatives(®ion); > + /* The patching is not expected to fail during boot. */ > + BUG_ON(ret != 0); > + > + /* Barriers provided by the cache flushing */ > + write_atomic(&patched, 1); > + } > + > + return 0; > +} > + > +/* > + * This function should only be called during boot and before CPU0 jump > + * into the idle_loop. > + */ > +void __init apply_alternatives_all(void) > +{ > + int ret; > + > + ASSERT(system_state != SYS_STATE_active); > + > + /* better not try code patching on a live SMP system */ > + ret = stop_machine_run(__apply_alternatives_multi_stop, NULL, NR_CPUS); > + > + /* stop_machine_run should never fail at this stage of the boot */ > + BUG_ON(ret); > +} > + > +int apply_alternatives(void *start, size_t length) > +{ > + const struct alt_region region = { > + .begin = start, > + .end = start + length, > + }; > + > + return __apply_alternatives(®ion); > +} > + > +/* > + * Local variables: > + * mode: C > + * c-file-style: "BSD" > + * c-basic-offset: 4 > + * indent-tabs-mode: nil > + * End: > + */ > diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c > index 46cf6dd..97b3214 100644 > --- a/xen/arch/arm/setup.c > +++ b/xen/arch/arm/setup.c > @@ -38,6 +38,7 @@ > #include <xen/vmap.h> > #include <xen/libfdt/libfdt.h> > #include <xen/acpi.h> > +#include <asm/alternative.h> > #include <asm/page.h> > #include <asm/current.h> > #include <asm/setup.h> > @@ -835,6 +836,12 @@ void __init start_xen(unsigned long boot_phys_offset, > > do_initcalls(); > > + /* > + * It needs to be called after do_initcalls to be able to use > + * stop_machine (tasklets initialized via an initcall). > + */ > + apply_alternatives_all(); > + > /* Create initial domain 0. */ > /* The vGIC for DOM0 is exactly emulating the hardware GIC */ > config.gic_version = XEN_DOMCTL_CONFIG_GIC_NATIVE; > diff --git a/xen/arch/arm/xen.lds.S b/xen/arch/arm/xen.lds.S > index b18c9c2..b24e93b 100644 > --- a/xen/arch/arm/xen.lds.S > +++ b/xen/arch/arm/xen.lds.S > @@ -151,6 +151,15 @@ SECTIONS > *(.initcall1.init) > __initcall_end = .; > > +#ifdef CONFIG_ALTERNATIVE > + . = ALIGN(4); > + __alt_instructions = .; > + *(.altinstructions) > + __alt_instructions_end = .; > + . = ALIGN(4); > + *(.altinstr_replacement) > +#endif > + > *(.init.data) > *(.init.data.rel) > *(.init.data.rel.*) > diff --git a/xen/include/asm-arm/alternative.h > b/xen/include/asm-arm/alternative.h > new file mode 100644 > index 0000000..4287bac > --- /dev/null > +++ b/xen/include/asm-arm/alternative.h > @@ -0,0 +1,165 @@ > +#ifndef __ASM_ALTERNATIVE_H > +#define __ASM_ALTERNATIVE_H > + > +#include <asm/cpufeature.h> > +#include <xen/config.h> > +#include <xen/kconfig.h> > + > +#ifdef CONFIG_ALTERNATIVE > + > +#ifndef __ASSEMBLY__ > + > +#include <xen/init.h> > +#include <xen/types.h> > +#include <xen/stringify.h> > + > +struct alt_instr { > + s32 orig_offset; /* offset to original instruction */ > + s32 alt_offset; /* offset to replacement instruction */ > + u16 cpufeature; /* cpufeature bit set for replacement */ > + u8 orig_len; /* size of original instruction(s) */ > + u8 alt_len; /* size of new instruction(s), <= orig_len */ > +}; > + > +void __init apply_alternatives_all(void); > +int apply_alternatives(void *start, size_t length); > + > +#define ALTINSTR_ENTRY(feature) > \ > + " .word 661b - .\n" /* label */ \ > + " .word 663f - .\n" /* new instruction */ \ > + " .hword " __stringify(feature) "\n" /* feature bit */ \ > + " .byte 662b-661b\n" /* source len */ \ > + " .byte 664f-663f\n" /* replacement len */ > + > +/* > + * alternative assembly primitive: > + * > + * If any of these .org directive fail, it means that insn1 and insn2 > + * don't have the same length. This used to be written as > + * > + * .if ((664b-663b) != (662b-661b)) > + * .error "Alternatives instruction length mismatch" > + * .endif > + * > + * but most assemblers die if insn1 or insn2 have a .inst. This should > + * be fixed in a binutils release posterior to 2.25.51.0.2 (anything > + * containing commit 4e4d08cf7399b606 or c1baaddf8861). > + */ > +#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled) \ > + ".if "__stringify(cfg_enabled)" == 1\n" \ > + "661:\n\t" \ > + oldinstr "\n" \ > + "662:\n" \ > + ".pushsection .altinstructions,\"a\"\n" \ > + ALTINSTR_ENTRY(feature) \ > + ".popsection\n" \ > + ".pushsection .altinstr_replacement, \"a\"\n" \ > + "663:\n\t" \ > + newinstr "\n" \ > + "664:\n\t" \ > + ".popsection\n\t" \ > + ".org . - (664b-663b) + (662b-661b)\n\t" \ > + ".org . - (662b-661b) + (664b-663b)\n" \ > + ".endif\n" > + > +#define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...) \ > + __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg)) > + > +#else > + > +#include <asm/asm_defns.h> > + > +.macro altinstruction_entry orig_offset alt_offset feature orig_len alt_len > + .word \orig_offset - . > + .word \alt_offset - . > + .hword \feature > + .byte \orig_len > + .byte \alt_len > +.endm > + > +.macro alternative_insn insn1, insn2, cap, enable = 1 > + .if \enable > +661: \insn1 > +662: .pushsection .altinstructions, "a" > + altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f > + .popsection > + .pushsection .altinstr_replacement, "ax" > +663: \insn2 > +664: .popsection > + .org . - (664b-663b) + (662b-661b) > + .org . - (662b-661b) + (664b-663b) > + .endif > +.endm > + > +/* > + * Begin an alternative code sequence. > + * > + * The code that follows this macro will be assembled and linked as > + * normal. There are no restrictions on this code. > + */ > +.macro alternative_if_not cap, enable = 1 > + .if \enable > + .pushsection .altinstructions, "a" > + altinstruction_entry 661f, 663f, \cap, 662f-661f, 664f-663f > + .popsection > +661: > + .endif > +.endm > + > +/* > + * Provide the alternative code sequence. > + * > + * The code that follows this macro is assembled into a special > + * section to be used for dynamic patching. Code that follows this > + * macro must: > + * > + * 1. Be exactly the same length (in bytes) as the default code > + * sequence. > + * > + * 2. Not contain a branch target that is used outside of the > + * alternative sequence it is defined in (branches into an > + * alternative sequence are not fixed up). > + */ > +.macro alternative_else > +662: .pushsection .altinstr_replacement, "ax" > +663: > +.endm > + > +/* > + * Complete an alternative code sequence. > + */ > +.macro alternative_endif > +664: .popsection > + .org . - (664b-663b) + (662b-661b) > + .org . - (662b-661b) + (664b-663b) > +.endm > + > +#define _ALTERNATIVE_CFG(insn1, insn2, cap, cfg, ...) \ > + alternative_insn insn1, insn2, cap, IS_ENABLED(cfg) > + > +#endif /* __ASSEMBLY__ */ > + > +/* > + * Usage: asm(ALTERNATIVE(oldinstr, newinstr, feature)); > + * > + * Usage: asm(ALTERNATIVE(oldinstr, newinstr, feature, CONFIG_FOO)); > + * N.B. If CONFIG_FOO is specified, but not selected, the whole block > + * will be omitted, including oldinstr. > + */ > +#define ALTERNATIVE(oldinstr, newinstr, ...) \ > + _ALTERNATIVE_CFG(oldinstr, newinstr, __VA_ARGS__, 1) > + > +#else /* !CONFIG_ALTERNATIVE */ > + > +static inline void apply_alternatives_all(void) > +{ > +} > + > +static inline int apply_alternatives(void *start, size_t lenght) > +{ > + return 0; > +} > + > +#endif > + > +#endif /* __ASM_ALTERNATIVE_H */ > diff --git a/xen/include/asm-arm/arm64/insn.h > b/xen/include/asm-arm/arm64/insn.h > index cfcdbe9..6ce37be 100644 > --- a/xen/include/asm-arm/arm64/insn.h > +++ b/xen/include/asm-arm/arm64/insn.h > @@ -61,6 +61,22 @@ u32 aarch64_insn_encode_immediate(enum > aarch64_insn_imm_type type, > s32 aarch64_get_branch_offset(u32 insn); > u32 aarch64_set_branch_offset(u32 insn, s32 offset); > > +/* Wrapper for common code */ > +static inline bool insn_is_branch_imm(u32 insn) > +{ > + return aarch64_insn_is_branch_imm(insn); > +} > + > +static inline s32 insn_get_branch_offset(u32 insn) > +{ > + return aarch64_get_branch_offset(insn); > +} > + > +static inline u32 insn_set_branch_offset(u32 insn, s32 offset) > +{ > + return aarch64_set_branch_offset(insn, offset); > +} > + > #endif /* !__ARCH_ARM_ARM64_INSN */ > /* > * Local variables: > -- > 1.9.1 > _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |