[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 1/2] vm_event: Sanitize vm_event response handling
On 13/09/16 19:12, Tamas K Lengyel wrote: > Setting response flags in vm_event are only ever safe if the vCPUs are paused. > To reflect this we move all checks within the if block that already checks > whether this is the case. Checks that are only supported on one architecture > we relocate the bitmask operations to the arch-specific handlers to avoid > the overhead on architectures that don't support it. > > Furthermore, we clean-up the emulation checks so it more clearly represents > the > decision-logic when emulation should take place. As part of this we also > set the stage to allow emulation in response to other types of events, not > just > mem_access violations. > > Signed-off-by: Tamas K Lengyel <tamas.lengyel@xxxxxxxxxxxx> Tamas, Would you like a detailed review from me for this? I'm happy to ack the p2m bits on the basis that they're only touching mem_access code. A full review may get stuck behind a pretty long review backlog. :-( -George > --- > Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx> > Cc: Jan Beulich <jbeulich@xxxxxxxx> > Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> > Cc: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx> > Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx> > Cc: Julien Grall <julien.grall@xxxxxxx> > --- > xen/arch/x86/mm/p2m.c | 81 > +++++++++++++++++++----------------------- > xen/arch/x86/vm_event.c | 35 +++++++++++++++++- > xen/common/vm_event.c | 53 ++++++++++++++------------- > xen/include/asm-arm/p2m.h | 4 +-- > xen/include/asm-arm/vm_event.h | 9 ++++- > xen/include/asm-x86/p2m.h | 4 +-- > xen/include/asm-x86/vm_event.h | 5 ++- > xen/include/xen/mem_access.h | 12 ------- > 8 files changed, 113 insertions(+), 90 deletions(-) > > diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c > index 7d14c3b..6c01868 100644 > --- a/xen/arch/x86/mm/p2m.c > +++ b/xen/arch/x86/mm/p2m.c > @@ -1588,62 +1588,55 @@ void p2m_mem_paging_resume(struct domain *d, > vm_event_response_t *rsp) > } > } > > -void p2m_mem_access_emulate_check(struct vcpu *v, > - const vm_event_response_t *rsp) > +bool_t p2m_mem_access_emulate_check(struct vcpu *v, > + const vm_event_response_t *rsp) > { > - /* Mark vcpu for skipping one instruction upon rescheduling. */ > - if ( rsp->flags & VM_EVENT_FLAG_EMULATE ) > - { > - xenmem_access_t access; > - bool_t violation = 1; > - const struct vm_event_mem_access *data = &rsp->u.mem_access; > + xenmem_access_t access; > + bool_t violation = 1; > + const struct vm_event_mem_access *data = &rsp->u.mem_access; > > - if ( p2m_get_mem_access(v->domain, _gfn(data->gfn), &access) == 0 ) > + if ( p2m_get_mem_access(v->domain, _gfn(data->gfn), &access) == 0 ) > + { > + switch ( access ) > { > - switch ( access ) > - { > - case XENMEM_access_n: > - case XENMEM_access_n2rwx: > - default: > - violation = data->flags & MEM_ACCESS_RWX; > - break; > + case XENMEM_access_n: > + case XENMEM_access_n2rwx: > + default: > + violation = data->flags & MEM_ACCESS_RWX; > + break; > > - case XENMEM_access_r: > - violation = data->flags & MEM_ACCESS_WX; > - break; > + case XENMEM_access_r: > + violation = data->flags & MEM_ACCESS_WX; > + break; > > - case XENMEM_access_w: > - violation = data->flags & MEM_ACCESS_RX; > - break; > + case XENMEM_access_w: > + violation = data->flags & MEM_ACCESS_RX; > + break; > > - case XENMEM_access_x: > - violation = data->flags & MEM_ACCESS_RW; > - break; > + case XENMEM_access_x: > + violation = data->flags & MEM_ACCESS_RW; > + break; > > - case XENMEM_access_rx: > - case XENMEM_access_rx2rw: > - violation = data->flags & MEM_ACCESS_W; > - break; > + case XENMEM_access_rx: > + case XENMEM_access_rx2rw: > + violation = data->flags & MEM_ACCESS_W; > + break; > > - case XENMEM_access_wx: > - violation = data->flags & MEM_ACCESS_R; > - break; > + case XENMEM_access_wx: > + violation = data->flags & MEM_ACCESS_R; > + break; > > - case XENMEM_access_rw: > - violation = data->flags & MEM_ACCESS_X; > - break; > + case XENMEM_access_rw: > + violation = data->flags & MEM_ACCESS_X; > + break; > > - case XENMEM_access_rwx: > - violation = 0; > - break; > - } > + case XENMEM_access_rwx: > + violation = 0; > + break; > } > - > - v->arch.vm_event->emulate_flags = violation ? rsp->flags : 0; > - > - if ( (rsp->flags & VM_EVENT_FLAG_SET_EMUL_READ_DATA) ) > - v->arch.vm_event->emul_read_data = rsp->data.emul_read_data; > } > + > + return violation; > } > > void p2m_altp2m_check(struct vcpu *v, uint16_t idx) > diff --git a/xen/arch/x86/vm_event.c b/xen/arch/x86/vm_event.c > index e938ca3..343b9c8 100644 > --- a/xen/arch/x86/vm_event.c > +++ b/xen/arch/x86/vm_event.c > @@ -18,6 +18,7 @@ > * License along with this program; If not, see > <http://www.gnu.org/licenses/>. > */ > > +#include <asm/p2m.h> > #include <asm/vm_event.h> > > /* Implicitly serialized by the domctl lock. */ > @@ -56,8 +57,12 @@ void vm_event_cleanup_domain(struct domain *d) > d->arch.mem_access_emulate_each_rep = 0; > } > > -void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v) > +void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v, > + vm_event_response_t *rsp) > { > + if ( !(rsp->flags & VM_EVENT_FLAG_TOGGLE_SINGLESTEP) ) > + return; > + > if ( !is_hvm_domain(d) ) > return; > > @@ -186,6 +191,34 @@ void vm_event_fill_regs(vm_event_request_t *req) > req->data.regs.x86.cs_arbytes = seg.attr.bytes; > } > > +void vm_event_emulate_check(struct vcpu *v, vm_event_response_t *rsp) > +{ > + if ( !(rsp->flags & VM_EVENT_FLAG_EMULATE) ) > + { > + v->arch.vm_event->emulate_flags = 0; > + return; > + } > + > + switch ( rsp->reason ) > + { > + case VM_EVENT_REASON_MEM_ACCESS: > + /* > + * Emulate iff this is a response to a mem_access violation and there > + * are still conflicting mem_access permissions in-place. > + */ > + if ( p2m_mem_access_emulate_check(v, rsp) ) > + { > + if ( rsp->flags & VM_EVENT_FLAG_SET_EMUL_READ_DATA ) > + v->arch.vm_event->emul_read_data = rsp->data.emul_read_data; > + > + v->arch.vm_event->emulate_flags = rsp->flags; > + } > + break; > + default: > + break; > + }; > +} > + > /* > * Local variables: > * mode: C > diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c > index 8398af7..907ab40 100644 > --- a/xen/common/vm_event.c > +++ b/xen/common/vm_event.c > @@ -398,42 +398,41 @@ void vm_event_resume(struct domain *d, struct > vm_event_domain *ved) > * In some cases the response type needs extra handling, so here > * we call the appropriate handlers. > */ > - switch ( rsp.reason ) > - { > -#ifdef CONFIG_X86 > - case VM_EVENT_REASON_MOV_TO_MSR: > -#endif > - case VM_EVENT_REASON_WRITE_CTRLREG: > - vm_event_register_write_resume(v, &rsp); > - break; > - > -#ifdef CONFIG_HAS_MEM_ACCESS > - case VM_EVENT_REASON_MEM_ACCESS: > - mem_access_resume(v, &rsp); > - break; > -#endif > > + /* Check flags which apply only when the vCPU is paused */ > + if ( atomic_read(&v->vm_event_pause_count) ) > + { > #ifdef CONFIG_HAS_MEM_PAGING > - case VM_EVENT_REASON_MEM_PAGING: > - p2m_mem_paging_resume(d, &rsp); > - break; > + if ( rsp.reason == VM_EVENT_REASON_MEM_PAGING ) > + p2m_mem_paging_resume(d, &rsp); > #endif > > - }; > + /* > + * Check emulation flags in the arch-specific handler only, as it > + * has to set arch-specific flags when supported, and to avoid > + * bitmask overhead when it isn't supported. > + */ > + vm_event_emulate_check(v, &rsp); > + > + /* > + * Check in arch-specific handler to avoid bitmask overhead when > + * not supported. > + */ > + vm_event_register_write_resume(v, &rsp); > > - /* Check for altp2m switch */ > - if ( rsp.flags & VM_EVENT_FLAG_ALTERNATE_P2M ) > - p2m_altp2m_check(v, rsp.altp2m_idx); > + /* > + * Check in arch-specific handler to avoid bitmask overhead when > + * not supported. > + */ > + vm_event_toggle_singlestep(d, v, &rsp); > + > + /* Check for altp2m switch */ > + if ( rsp.flags & VM_EVENT_FLAG_ALTERNATE_P2M ) > + p2m_altp2m_check(v, rsp.altp2m_idx); > > - /* Check flags which apply only when the vCPU is paused */ > - if ( atomic_read(&v->vm_event_pause_count) ) > - { > if ( rsp.flags & VM_EVENT_FLAG_SET_REGISTERS ) > vm_event_set_registers(v, &rsp); > > - if ( rsp.flags & VM_EVENT_FLAG_TOGGLE_SINGLESTEP ) > - vm_event_toggle_singlestep(d, v); > - > if ( rsp.flags & VM_EVENT_FLAG_VCPU_PAUSED ) > vm_event_vcpu_unpause(v); > } > diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h > index 53c4d78..5e9bc54 100644 > --- a/xen/include/asm-arm/p2m.h > +++ b/xen/include/asm-arm/p2m.h > @@ -121,10 +121,10 @@ typedef enum { > p2m_to_mask(p2m_map_foreign))) > > static inline > -void p2m_mem_access_emulate_check(struct vcpu *v, > +bool_t p2m_mem_access_emulate_check(struct vcpu *v, > const vm_event_response_t *rsp) > { > - /* Not supported on ARM. */ > + return false; > } > > static inline > diff --git a/xen/include/asm-arm/vm_event.h b/xen/include/asm-arm/vm_event.h > index 9482636..66f2474 100644 > --- a/xen/include/asm-arm/vm_event.h > +++ b/xen/include/asm-arm/vm_event.h > @@ -34,7 +34,8 @@ static inline void vm_event_cleanup_domain(struct domain *d) > memset(&d->monitor, 0, sizeof(d->monitor)); > } > > -static inline void vm_event_toggle_singlestep(struct domain *d, struct vcpu > *v) > +static inline void vm_event_toggle_singlestep(struct domain *d, struct vcpu > *v, > + vm_event_response_t *rsp) > { > /* Not supported on ARM. */ > } > @@ -45,4 +46,10 @@ void vm_event_register_write_resume(struct vcpu *v, > vm_event_response_t *rsp) > /* Not supported on ARM. */ > } > > +static inline > +void vm_event_emulate_check(struct vcpu *v, vm_event_response_t *rsp) > +{ > + /* Not supported on ARM. */ > +} > + > #endif /* __ASM_ARM_VM_EVENT_H__ */ > diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h > index 9fc9ead..1897def 100644 > --- a/xen/include/asm-x86/p2m.h > +++ b/xen/include/asm-x86/p2m.h > @@ -677,8 +677,8 @@ bool_t p2m_mem_access_check(paddr_t gpa, unsigned long > gla, > > /* Check for emulation and mark vcpu for skipping one instruction > * upon rescheduling if required. */ > -void p2m_mem_access_emulate_check(struct vcpu *v, > - const vm_event_response_t *rsp); > +bool_t p2m_mem_access_emulate_check(struct vcpu *v, > + const vm_event_response_t *rsp); > > /* Sanity check for mem_access hardware support */ > static inline bool_t p2m_mem_access_sanity_check(struct domain *d) > diff --git a/xen/include/asm-x86/vm_event.h b/xen/include/asm-x86/vm_event.h > index 294def6..ebb5d88 100644 > --- a/xen/include/asm-x86/vm_event.h > +++ b/xen/include/asm-x86/vm_event.h > @@ -35,8 +35,11 @@ int vm_event_init_domain(struct domain *d); > > void vm_event_cleanup_domain(struct domain *d); > > -void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v); > +void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v, > + vm_event_response_t *rsp); > > void vm_event_register_write_resume(struct vcpu *v, vm_event_response_t > *rsp); > > +void vm_event_emulate_check(struct vcpu *v, vm_event_response_t *rsp); > + > #endif /* __ASM_X86_VM_EVENT_H__ */ > diff --git a/xen/include/xen/mem_access.h b/xen/include/xen/mem_access.h > index 3d054e0..da36e07 100644 > --- a/xen/include/xen/mem_access.h > +++ b/xen/include/xen/mem_access.h > @@ -30,12 +30,6 @@ > int mem_access_memop(unsigned long cmd, > XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t) arg); > > -static inline > -void mem_access_resume(struct vcpu *v, vm_event_response_t *rsp) > -{ > - p2m_mem_access_emulate_check(v, rsp); > -} > - > #else > > static inline > @@ -45,12 +39,6 @@ int mem_access_memop(unsigned long cmd, > return -ENOSYS; > } > > -static inline > -void mem_access_resume(struct vcpu *vcpu, vm_event_response_t *rsp) > -{ > - /* Nothing to do. */ > -} > - > #endif /* HAS_MEM_ACCESS */ > > #endif /* _XEN_ASM_MEM_ACCESS_H */ > _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |