[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [PATCH 04/17] x86/PV: harden guest memory accesses against speculative abuse
On Thu, Jan 14, 2021 at 04:04:57PM +0100, Jan Beulich wrote: > Inspired by > https://lore.kernel.org/lkml/f12e7d3cecf41b2c29734ea45a393be21d4a8058.1597848273.git.jpoimboe@xxxxxxxxxx/ > and prior work in that area of x86 Linux, suppress speculation with > guest specified pointer values by suitably masking the addresses to > non-canonical space in case they fall into Xen's virtual address range. > > Introduce a new Kconfig control. > > Note that it is necessary in such code to avoid using "m" kind operands: > If we didn't, there would be no guarantee that the register passed to > guest_access_mask_ptr is also the (base) one used for the memory access. > > As a minor unrelated change in get_unsafe_asm() the unnecessary "itype" > parameter gets dropped and the XOR on the fixup path gets changed to be > a 32-bit one in all cases: This way we avoid pointless REX.W or operand > size overrides, or writes to partial registers. > > Requested-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> > Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> > --- > The insn sequence chosen is certainly up for discussion; I've picked > this one despite the RCR because alternatives I could come up with, > like > > mov $(HYPERVISOR_VIRT_END), %rax > mov $~0, %rdx > mov $0x7fffffffffffffff, %rcx > cmp %rax, %rdi > cmovb %rcx, %rdx > and %rdx, %rdi > > weren't necessarily better: Either, as above, they are longer and > require a 3rd scratch register, or they also utilize the carry flag in > some similar way. > --- > Judging from the comment ahead of put_unsafe_asm() we might as well not > tell gcc at all anymore about the memory access there, now that there's > no use of the operand anymore in the assembly code. > > --- a/xen/arch/x86/usercopy.c > +++ b/xen/arch/x86/usercopy.c > @@ -10,12 +10,19 @@ > #include <xen/sched.h> > #include <asm/uaccess.h> > > -unsigned __copy_to_user_ll(void __user *to, const void *from, unsigned n) > +#ifndef GUARD > +# define GUARD UA_KEEP > +#endif > + > +unsigned int copy_to_guest_ll(void __user *to, const void *from, unsigned > int n) > { > unsigned dummy; > > stac(); > asm volatile ( > + GUARD( > + " guest_access_mask_ptr %[to], %q[scratch1], %q[scratch2]\n" > + ) > " cmp $"STR(2*BYTES_PER_LONG-1)", %[cnt]\n" > " jbe 1f\n" > " mov %k[to], %[cnt]\n" > @@ -42,6 +49,7 @@ unsigned __copy_to_user_ll(void __user * > _ASM_EXTABLE(1b, 2b) > : [cnt] "+c" (n), [to] "+D" (to), [from] "+S" (from), > [aux] "=&r" (dummy) > + GUARD(, [scratch1] "=&r" (dummy), [scratch2] "=&r" (dummy)) > : "[aux]" (n) > : "memory" ); > clac(); > @@ -49,12 +57,15 @@ unsigned __copy_to_user_ll(void __user * > return n; > } > > -unsigned __copy_from_user_ll(void *to, const void __user *from, unsigned n) > +unsigned int copy_from_guest_ll(void *to, const void __user *from, unsigned > int n) > { > unsigned dummy; > > stac(); > asm volatile ( > + GUARD( > + " guest_access_mask_ptr %[from], %q[scratch1], %q[scratch2]\n" > + ) > " cmp $"STR(2*BYTES_PER_LONG-1)", %[cnt]\n" > " jbe 1f\n" > " mov %k[to], %[cnt]\n" > @@ -87,6 +98,7 @@ unsigned __copy_from_user_ll(void *to, c > _ASM_EXTABLE(1b, 6b) > : [cnt] "+c" (n), [to] "+D" (to), [from] "+S" (from), > [aux] "=&r" (dummy) > + GUARD(, [scratch1] "=&r" (dummy), [scratch2] "=&r" (dummy)) > : "[aux]" (n) > : "memory" ); > clac(); > @@ -94,6 +106,8 @@ unsigned __copy_from_user_ll(void *to, c > return n; > } > > +#if GUARD(1) + 0 Why do you need the '+ 0' here? I guess it's to prevent the preprocessor from complaining when GUARD(1) gets replaced by nothing? > + > /** > * copy_to_user: - Copy a block of data into user space. > * @to: Destination address, in user space. > @@ -128,8 +142,11 @@ unsigned clear_user(void __user *to, uns > { > if ( access_ok(to, n) ) > { > + long dummy; > + > stac(); > asm volatile ( > + " guest_access_mask_ptr %[to], %[scratch1], %[scratch2]\n" > "0: rep stos"__OS"\n" > " mov %[bytes], %[cnt]\n" > "1: rep stosb\n" > @@ -140,7 +157,8 @@ unsigned clear_user(void __user *to, uns > ".previous\n" > _ASM_EXTABLE(0b,3b) > _ASM_EXTABLE(1b,2b) > - : [cnt] "=&c" (n), [to] "+D" (to) > + : [cnt] "=&c" (n), [to] "+D" (to), [scratch1] "=&r" (dummy), > + [scratch2] "=&r" (dummy) > : [bytes] "r" (n & (BYTES_PER_LONG - 1)), > [longs] "0" (n / BYTES_PER_LONG), "a" (0) ); > clac(); > @@ -174,6 +192,16 @@ unsigned copy_from_user(void *to, const > return n; > } > > +# undef GUARD > +# define GUARD UA_DROP > +# define copy_to_guest_ll copy_to_unsafe_ll > +# define copy_from_guest_ll copy_from_unsafe_ll > +# undef __user > +# define __user > +# include __FILE__ > + > +#endif /* GUARD(1) */ > + > /* > * Local variables: > * mode: C > --- a/xen/arch/x86/x86_64/entry.S > +++ b/xen/arch/x86/x86_64/entry.S > @@ -446,6 +446,8 @@ UNLIKELY_START(g, create_bounce_frame_ba > jmp asm_domain_crash_synchronous /* Does not return */ > __UNLIKELY_END(create_bounce_frame_bad_sp) > > + guest_access_mask_ptr %rsi, %rax, %rcx > + > #define STORE_GUEST_STACK(reg, n) \ > 0: movq %reg,(n)*8(%rsi); \ > _ASM_EXTABLE(0b, domain_crash_page_fault_ ## n ## x8) > --- a/xen/common/Kconfig > +++ b/xen/common/Kconfig > @@ -114,6 +114,24 @@ config SPECULATIVE_HARDEN_BRANCH > > If unsure, say Y. > > +config SPECULATIVE_HARDEN_GUEST_ACCESS > + bool "Speculative PV Guest Memory Access Hardening" > + default y > + depends on PV > + help > + Contemporary processors may use speculative execution as a > + performance optimisation, but this can potentially be abused by an > + attacker to leak data via speculative sidechannels. > + > + One source of data leakage is via speculative accesses to hypervisor > + memory through guest controlled values used to access guest memory. > + > + When enabled, code paths accessing PV guest memory will have guest > + controlled addresses massaged such that memory accesses through them > + won't touch hypervisor address space. > + > + If unsure, say Y. > + > endmenu > > config HYPFS > --- a/xen/include/asm-x86/asm-defns.h > +++ b/xen/include/asm-x86/asm-defns.h > @@ -44,3 +44,16 @@ > .macro INDIRECT_JMP arg:req > INDIRECT_BRANCH jmp \arg > .endm > + > +.macro guest_access_mask_ptr ptr:req, scratch1:req, scratch2:req > +#if defined(CONFIG_SPECULATIVE_HARDEN_GUEST_ACCESS) > + mov $(HYPERVISOR_VIRT_END - 1), \scratch1 > + mov $~0, \scratch2 > + cmp \ptr, \scratch1 > + rcr $1, \scratch2 > + and \scratch2, \ptr If my understanding is correct, that's equivalent to: ptr &= ~0ull >> (ptr < HYPERVISOR_VIRT_END); It might be helpful to add this as a comment, to clarify the indented functionality of the assembly bit. I wonder if the C code above can generate any jumps? As you pointed out, we already use something similar in array_index_mask_nospec and that's fine to do in C. Thanks, Roger.
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |