On 01/06/2011 14:20, "Yang, Wei Y" <wei.y.yang@xxxxxxxxx> wrote:
>
> Intel new CPU supports SMEP (Supervisor Mode Execution Protection). SMEP
> prevents
> kernel from executing code in user. Updated Intel SDM describes this CPU
> feature.
> The document will be published soon.
Then I can hardly be expected to review this patch. I have no idea what it
does!
-- Keir
> This patch enables SMEP in Xen to protect Xen hypervisor from executing pv
> guest
> code, and kills a pv guest triggering SMEP fault.
>
> Signed-off-by: Yang, Wei <wei.y.yang@xxxxxxxxx>
> Signed-off-by: Shan, Haitao <haitao.shan@xxxxxxxxx>
> Signed-off-by: Li, Xin <xin.li@xxxxxxxxx>
>
> ---
> arch/x86/cpu/common.c | 16 ++++++++++++++++
> arch/x86/traps.c | 43
> +++++++++++++++++++++++++++++++++++++++++--
> include/asm-x86/cpufeature.h | 5 ++++-
> include/asm-x86/processor.h | 1 +
> 4 files changed, 62 insertions(+), 3 deletions(-)
>
> diff -r d4f6310f1ef5 xen/arch/x86/cpu/common.c
> --- a/xen/arch/x86/cpu/common.c Wed Jun 01 11:11:43 2011 +0100
> +++ b/xen/arch/x86/cpu/common.c Wed Jun 01 19:53:52 2011 +0800
> @@ -28,6 +28,9 @@
> integer_param("cpuid_mask_ext_ecx", opt_cpuid_mask_ext_ecx);
> unsigned int __devinitdata opt_cpuid_mask_ext_edx = ~0u;
> integer_param("cpuid_mask_ext_edx", opt_cpuid_mask_ext_edx);
> +/* nosmep: if true, Intel SMEP is disabled. */
> +static bool_t __initdata disable_smep;
> +boolean_param("nosmep", disable_smep);
>
> struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
>
> @@ -222,6 +225,17 @@
> c->x86_capability[4] = cap4;
> }
>
> +static void __cpuinit setup_smep(struct cpuinfo_x86 *c)
> +{
> + if ( cpu_has(c, X86_FEATURE_SMEP) ) {
> + if( unlikely(disable_smep) ) {
> + setup_clear_cpu_cap(X86_FEATURE_SMEP);
> + clear_in_cr4(X86_CR4_SMEP);
> + } else
> + set_in_cr4(X86_CR4_SMEP);
> + }
> +}
> +
> void __cpuinit generic_identify(struct cpuinfo_x86 * c)
> {
> u32 tfms, xlvl, capability, excap, ebx;
> @@ -268,6 +282,8 @@
> c->x86_capability[X86_FEATURE_FSGSBASE / 32] = ebx;
> }
>
> + setup_smep(c);
> +
> early_intel_workaround(c);
>
> #ifdef CONFIG_X86_HT
> diff -r d4f6310f1ef5 xen/arch/x86/traps.c
> --- a/xen/arch/x86/traps.c Wed Jun 01 11:11:43 2011 +0100
> +++ b/xen/arch/x86/traps.c Wed Jun 01 19:53:52 2011 +0800
> @@ -1195,8 +1195,16 @@
> if ( ((l3e_get_flags(l3e) & required_flags) != required_flags) ||
> (l3e_get_flags(l3e) & disallowed_flags) )
> return 0;
> - if ( l3e_get_flags(l3e) & _PAGE_PSE )
> + if ( l3e_get_flags(l3e) & _PAGE_PSE ) {
> + /* SMEP fault error code 10001b */
> + if ( (error_code & PFEC_insn_fetch) &&
> + !(error_code & PFEC_user_mode) &&
> + cpu_has_smep &&
> + (_PAGE_USER & l4e_get_flags(l4e) & l3e_get_flags(l3e)) )
> + return 2;
> +
> return 1;
> + }
> #endif
> #endif
>
> @@ -1207,8 +1215,21 @@
> if ( ((l2e_get_flags(l2e) & required_flags) != required_flags) ||
> (l2e_get_flags(l2e) & disallowed_flags) )
> return 0;
> - if ( l2e_get_flags(l2e) & _PAGE_PSE )
> + if ( l2e_get_flags(l2e) & _PAGE_PSE ) {
> + /* SMEP fault error code 10001b */
> + if ( (error_code & PFEC_insn_fetch) &&
> + !(error_code & PFEC_user_mode) &&
> + cpu_has_smep &&
> + (_PAGE_USER &
> +#if CONFIG_PAGING_LEVELS >= 4
> + l4e_get_flags(l4e) &
> + l3e_get_flags(l3e) &
> +#endif
> + l2e_get_flags(l2e)) )
> + return 2;
> +
> return 1;
> + }
>
> l1t = map_domain_page(mfn);
> l1e = l1e_read_atomic(&l1t[l1_table_offset(addr)]);
> @@ -1218,6 +1239,18 @@
> (l1e_get_flags(l1e) & disallowed_flags) )
> return 0;
>
> + /* SMEP fault error code 10001b */
> + if ( (error_code & PFEC_insn_fetch) &&
> + !(error_code & PFEC_user_mode) &&
> + cpu_has_smep &&
> + (_PAGE_USER &
> +#if CONFIG_PAGING_LEVELS >= 4
> + l4e_get_flags(l4e) &
> + l3e_get_flags(l3e) &
> +#endif
> + l2e_get_flags(l2e) & l1e_get_flags(l1e)) )
> + return 2;
> +
> return 1;
> }
>
> @@ -1235,6 +1268,12 @@
> is_spurious = __spurious_page_fault(addr, error_code);
> local_irq_restore(flags);
>
> + if ( is_spurious == 2 ) {
> + printk("SMEP fault at address %lx, crashing current domain %d\n",
> + addr, current->domain->domain_id);
> + domain_crash_synchronous();
> + }
> +
> return is_spurious;
> }
>
> diff -r d4f6310f1ef5 xen/include/asm-x86/cpufeature.h
> --- a/xen/include/asm-x86/cpufeature.h Wed Jun 01 11:11:43 2011 +0100
> +++ b/xen/include/asm-x86/cpufeature.h Wed Jun 01 19:53:52 2011 +0800
> @@ -141,8 +141,9 @@
> #define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations */
> #define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs
> */
>
> -/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
> +/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 7 */
> #define X86_FEATURE_FSGSBASE (7*32+ 0) /* {RD,WR}{FS,GS}BASE instructions */
> +#define X86_FEATURE_SMEP (7*32+ 7) /* Supervisor Mode Execution Protection */
>
> #define cpu_has(c, bit) test_bit(bit, (c)->x86_capability)
> #define boot_cpu_has(bit) test_bit(bit, boot_cpu_data.x86_capability)
> @@ -201,6 +202,8 @@
> #define cpu_has_fsgsbase boot_cpu_has(X86_FEATURE_FSGSBASE)
> #endif
>
> +#define cpu_has_smep boot_cpu_has(X86_FEATURE_SMEP)
> +
> #define cpu_has_ffxsr ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
> \
> && boot_cpu_has(X86_FEATURE_FFXSR))
>
> diff -r d4f6310f1ef5 xen/include/asm-x86/processor.h
> --- a/xen/include/asm-x86/processor.h Wed Jun 01 11:11:43 2011 +0100
> +++ b/xen/include/asm-x86/processor.h Wed Jun 01 19:53:52 2011 +0800
> @@ -85,6 +85,7 @@
> #define X86_CR4_SMXE 0x4000 /* enable SMX */
> #define X86_CR4_FSGSBASE 0x10000 /* enable {rd,wr}{fs,gs}base */
> #define X86_CR4_OSXSAVE 0x40000 /* enable XSAVE/XRSTOR */
> +#define X86_CR4_SMEP 0x100000/* enable SMEP */
>
> /*
> * Trap/fault mnemonics.
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxxxxxxxx
> http://lists.xensource.com/xen-devel
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|