[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH V3 5/6] x86/xsaves: support compact format for hvm save/restore
On 05/08/15 02:57, Shuai Ruan wrote: > xsaves/xrstors only use compact format, so format convertion > is needed when perform save/restore. > > Signed-off-by: Shuai Ruan <shuai.ruan@xxxxxxxxxxxxxxx> > --- > xen/arch/x86/domain.c | 3 + > xen/arch/x86/hvm/hvm.c | 16 +++-- > xen/arch/x86/xstate.c | 138 > +++++++++++++++++++++++++++++++++++++++++++ > xen/include/asm-x86/xstate.h | 6 ++ > 4 files changed, 158 insertions(+), 5 deletions(-) > > diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c > index e8b8d67..083b70d 100644 > --- a/xen/arch/x86/domain.c > +++ b/xen/arch/x86/domain.c > @@ -845,6 +845,9 @@ int arch_set_info_guest( > memcpy(v->arch.fpu_ctxt, &c.nat->fpu_ctxt, sizeof(c.nat->fpu_ctxt)); > if ( v->arch.xsave_area ) > v->arch.xsave_area->xsave_hdr.xstate_bv = XSTATE_FP_SSE; > + if ( cpu_has_xsaves ) > + v->arch.xsave_area->xsave_hdr.xcomp_bv = v->arch.xcr0_accum | > + > XSTATE_COMPACTION_ENABLED; > } > > if ( !compat ) > diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c > index e5cf761..8495938 100644 > --- a/xen/arch/x86/hvm/hvm.c > +++ b/xen/arch/x86/hvm/hvm.c > @@ -2127,8 +2127,11 @@ static int hvm_save_cpu_xsave_states(struct domain *d, > hvm_domain_context_t *h) > ctxt->xfeature_mask = xfeature_mask; > ctxt->xcr0 = v->arch.xcr0; > ctxt->xcr0_accum = v->arch.xcr0_accum; > - memcpy(&ctxt->save_area, v->arch.xsave_area, > - size - offsetof(struct hvm_hw_cpu_xsave, save_area)); > + if ( cpu_has_xsaves ) > + save_xsave_states(v, (u8 *)&ctxt->save_area); This absolutely needs to take a size parameter, and looks like it should take a void pointer. > + else > + memcpy(&ctxt->save_area, v->arch.xsave_area, > + size - offsetof(struct hvm_hw_cpu_xsave, save_area)); > } > > return 0; > @@ -2227,9 +2230,12 @@ static int hvm_load_cpu_xsave_states(struct domain *d, > hvm_domain_context_t *h) > v->arch.xcr0_accum = ctxt->xcr0_accum; > if ( ctxt->xcr0_accum & XSTATE_NONLAZY ) > v->arch.nonlazy_xstate_used = 1; > - memcpy(v->arch.xsave_area, &ctxt->save_area, > - min(desc->length, size) - offsetof(struct hvm_hw_cpu_xsave, > - save_area)); > + if ( cpu_has_xsaves ) > + load_xsave_states(v, (u8 *)&ctxt->save_area); > + else > + memcpy(v->arch.xsave_area, &ctxt->save_area, > + min(desc->length, size) - offsetof(struct hvm_hw_cpu_xsave, > + save_area)); > > return 0; > } > diff --git a/xen/arch/x86/xstate.c b/xen/arch/x86/xstate.c > index 699058d..0eea146 100644 > --- a/xen/arch/x86/xstate.c > +++ b/xen/arch/x86/xstate.c > @@ -29,6 +29,9 @@ static u32 __read_mostly xsave_cntxt_size; > /* A 64-bit bitmask of the XSAVE/XRSTOR features supported by processor. */ > u64 __read_mostly xfeature_mask; > > +static unsigned int *xstate_offsets, *xstate_sizes; > +static unsigned int xstate_features; > +static unsigned int xstate_comp_offsets[sizeof(xfeature_mask)*8]; > /* Cached xcr0 for fast read */ > static DEFINE_PER_CPU(uint64_t, xcr0); > > @@ -65,6 +68,135 @@ uint64_t get_xcr0(void) > return this_cpu(xcr0); > } > > +static void setup_xstate_features(void) > +{ > + unsigned int eax, ebx, ecx, edx, leaf = 0x2; > + > + xstate_features = fls(xfeature_mask); > + xstate_offsets = _xzalloc(xstate_features, sizeof(int)); > + xstate_sizes = _xzalloc(xstate_features, sizeof(int)); Don't mix and match types. xzalloc_array() is what you should use. > + > + do { > + cpuid_count(XSTATE_CPUID, leaf, &eax, &ebx, &ecx, &edx); > + > + if ( eax == 0 ) > + break; > + > + xstate_offsets[leaf] = ebx; > + xstate_sizes[leaf] = eax; > + > + leaf++; > + } while (1); This is erroneous if there is a break in the feature bits, and liable to wander off the end of the array. This loop should be a for loop over set bits in xfeature_mask, not an infinite while loop. > +} > + > +static void setup_xstate_comp(void) > +{ > + unsigned int xstate_comp_sizes[sizeof(xfeature_mask)*8]; > + int i; unsigned int. > + > + /* > + * The FP xstates and SSE xstates are legacy states. They are always > + * in the fixed offsets in the xsave area in either compacted form > + * or standard form. > + */ > + xstate_comp_offsets[0] = 0; > + xstate_comp_offsets[1] = XSAVE_SSE_OFFSET; > + > + xstate_comp_offsets[2] = FXSAVE_SIZE + XSAVE_HDR_SIZE; > + > + for (i = 2; i < xstate_features; i++) This loop will run off the end of xstate_comp_sizes[] for any processor supporting AVX512 or greater. > + { > + if ( 1 << i & xfeature_mask ) You definitely need some brackets here. > + xstate_comp_sizes[i] = xstate_sizes[i]; > + else > + xstate_comp_sizes[i] = 0; > + > + if ( i > 2 ) > + xstate_comp_offsets[i] = xstate_comp_offsets[i-1] > + + xstate_comp_sizes[i-1]; > + } > +} > + > +static void *get_xsave_addr(struct xsave_struct *xsave, int xstate) > +{ > + int feature = fls(xstate) - 1; > + if ( !(1 << feature & xfeature_mask) ) > + return NULL; > + > + return (void *)xsave + xstate_comp_offsets[feature]; > +} > + > +void save_xsave_states(struct vcpu *v, u8 *dest) > +{ > + struct xsave_struct *xsave = v->arch.xsave_area; > + u64 xstate_bv = xsave->xsave_hdr.xstate_bv; > + u64 valid; > + > + /* > + * Copy legacy XSAVE area, to avoid complications with CPUID > + * leaves 0 and 1 in the loop below. > + */ > + memcpy(dest, xsave, XSAVE_HDR_OFFSET); > + > + /* Set XSTATE_BV */ > + *(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv; > + > + /* > + * Copy each region from the possibly compacted offset to the > + * non-compacted offset. > + */ > + valid = xstate_bv & ~XSTATE_FP_SSE; > + while ( valid ) > + { > + u64 feature = valid & -valid; > + int index = fls(feature) - 1; > + void *src = get_xsave_addr(xsave, feature); > + > + if ( src ) > + memcpy(dest + xstate_offsets[index], src, xstate_sizes[index]); > + else > + WARN_ON(1); These WARN_ON()s are of no use whatsoever. They should either be dropped, or turned to BUG() after printing some emergency state. > + > + valid -= feature; > + } > +} > + > +void load_xsave_states(struct vcpu *v, u8 *src) > +{ > + struct xsave_struct *xsave = v->arch.xsave_area; > + u64 xstate_bv = *(u64 *)(src + XSAVE_HDR_OFFSET); > + u64 valid; > + > + /* > + * Copy legacy XSAVE area, to avoid complications with CPUID > + * leaves 0 and 1 in the loop below. > + */ > + memcpy(xsave, src, XSAVE_HDR_OFFSET); > + > + /* Set XSTATE_BV and possibly XCOMP_BV. */ > + xsave->xsave_hdr.xstate_bv = xstate_bv; > + xsave->xsave_hdr.xcomp_bv = v->arch.xcr0_accum | > XSTATE_COMPACTION_ENABLED; > + > + /* > + * Copy each region from the non-compacted offset to the > + * possibly compacted offset. > + */ > + valid = xstate_bv & ~XSTATE_FP_SSE; > + while ( valid ) > + { > + u64 feature = valid & -valid; > + int index = fls(feature) - 1; > + void *dest = get_xsave_addr(xsave, feature); > + > + if (dest) > + memcpy(dest, src + xstate_offsets[index], xstate_sizes[index]); > + else > + WARN_ON(1); Tabs. ~Andrew _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |