|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 3/4] VMX: Add posted interrupt supporting
On 09/04/2013 07:01, "Yang Zhang" <yang.z.zhang@xxxxxxxxx> wrote:
> From: Yang Zhang <yang.z.zhang@xxxxxxxxx>
>
> Add the supporting of using posted interrupt to deliver interrupt.
>
> Signed-off-by: Yang Zhang <yang.z.zhang@xxxxxxxxx>
> Reviewed-by: Jun Nakajima <jun.nakajima@xxxxxxxxx>
> ---
...
> +static void __vmx_deliver_posted_interrupt(struct vcpu *v)
> +{
> + bool_t running;
> +
> + running = v->is_running;
> + vcpu_unblock(v);
> + if ( running && (in_irq() || (v != current)) )
> + {
> + unsigned int cpu = v->processor;
> +
> + if ( !test_and_set_bit(VCPU_KICK_SOFTIRQ, &softirq_pending(cpu))
> + && (cpu != smp_processor_id()) )
> + send_IPI_mask(cpumask_of(cpu), POSTED_INTERRUPT_VECTOR);
I don't think you need to tickle VCPU_KICK_SOFTIRQ here? You aren't
synchronising with vmx_intr_assist() here, only notifying the processor
itself of pending virtual interrupts. All that requires is an IPI of
POSTED_INTERRUPT_VECTOR in some cases.
I suggest:
if ( running )
{
unsigned int cpu = v->processor;
if ( cpu != smp_processor_id() )
send_IPI_mask(...);
}
That would just work, right? And avoids needing to duplicate some of the
trickier logic in vcpu_kick().
A code comment to say that this is a simplified form of vcpu_kick() is
probably worthwhile too. There are useful code comments in vcpu_kick()
regarding why things are ordered as they are.
-- Keir
> + }
> +}
> +
> +static void vmx_deliver_posted_intr(struct vcpu *v, u8 vector, u8 trig_mode)
> +{
> + vlapic_set_tmr(vcpu_vlapic(v), vector, trig_mode);
> +
> + vmx_update_eoi_exit_bitmap(v, vector, trig_mode);
> +
> + if ( pi_test_and_set_pir(vector, &v->arch.hvm_vmx.pi_desc) )
> + return;
> +
> + if ( unlikely(v->arch.hvm_vmx.eoi_exitmap_changed) )
> + {
> + /* If EOI exitbitmap needs to changed or notification vector
> + * can't be allocated, interrupt will not be injected till
> + * VMEntry as it used to be
> + */
> + pi_test_and_set_on(&v->arch.hvm_vmx.pi_desc);
> + goto out;
> + }
> +
> + if ( !pi_test_and_set_on(&v->arch.hvm_vmx.pi_desc) )
> + {
> + __vmx_deliver_posted_interrupt(v);
> + return;
> + }
> +
> +out:
> + vcpu_kick(v);
> +}
> +
> +static void vmx_sync_pir_to_irr(struct vcpu *v)
> +{
> + struct vlapic *vlapic = vcpu_vlapic(v);
> + u32 val;
> + int offset, group;
> +
> + if ( !pi_test_and_clear_on(&v->arch.hvm_vmx.pi_desc) )
> + return;
> +
> + for (group = 0; group < 8; group++ )
> + {
> + val = pi_get_pir(&v->arch.hvm_vmx.pi_desc, group);
> + offset = APIC_IRR + 0x10 * group;
> + *((uint32_t *)(&vlapic->regs->data[offset])) |= val;
> + }
> +}
> +
> static struct hvm_function_table __read_mostly vmx_function_table = {
> .name = "VMX",
> .cpu_up_prepare = vmx_cpu_up_prepare,
> @@ -1497,6 +1560,8 @@ static struct hvm_function_table __read_mostly
> vmx_function_table = {
> .update_eoi_exit_bitmap = vmx_update_eoi_exit_bitmap,
> .virtual_intr_delivery_enabled = vmx_virtual_intr_delivery_enabled,
> .process_isr = vmx_process_isr,
> + .deliver_posted_intr = vmx_deliver_posted_intr,
> + .sync_pir_to_irr = vmx_sync_pir_to_irr,
> .nhvm_hap_walk_L1_p2m = nvmx_hap_walk_L1_p2m,
> };
>
> @@ -1534,6 +1599,11 @@ struct hvm_function_table * __init start_vmx(void)
> if ( cpu_has_vmx_posted_intr_processing )
> set_direct_apic_vector(POSTED_INTERRUPT_VECTOR,
> posted_interrupt_handler);
> + else
> + {
> + hvm_funcs.deliver_posted_intr = NULL;
> + hvm_funcs.sync_pir_to_irr = NULL;
> + }
>
> setup_vmcs_dump();
>
> diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
> index 2fa2ea5..493cffa 100644
> --- a/xen/include/asm-x86/hvm/hvm.h
> +++ b/xen/include/asm-x86/hvm/hvm.h
> @@ -184,6 +184,8 @@ struct hvm_function_table {
> void (*update_eoi_exit_bitmap)(struct vcpu *v, u8 vector, u8 trig);
> int (*virtual_intr_delivery_enabled)(void);
> void (*process_isr)(int isr, struct vcpu *v);
> + void (*deliver_posted_intr)(struct vcpu *v, u8 vector, u8 trig_mode);
> + void (*sync_pir_to_irr)(struct vcpu *v);
>
> /*Walk nested p2m */
> int (*nhvm_hap_walk_L1_p2m)(struct vcpu *v, paddr_t L2_gpa,
> diff --git a/xen/include/asm-x86/hvm/vlapic.h
> b/xen/include/asm-x86/hvm/vlapic.h
> index 101ef57..b212134 100644
> --- a/xen/include/asm-x86/hvm/vlapic.h
> +++ b/xen/include/asm-x86/hvm/vlapic.h
> @@ -104,6 +104,7 @@ void vlapic_handle_EOI_induced_exit(struct vlapic *vlapic,
> int vector);
> void vlapic_ipi(struct vlapic *vlapic, uint32_t icr_low, uint32_t icr_high);
>
> int vlapic_apicv_write(struct vcpu *v, unsigned int offset);
> +void vlapic_set_tmr(struct vlapic *vlapic, uint8_t vec, uint8_t trig);
>
> struct vlapic *vlapic_lowest_prio(
> struct domain *d, struct vlapic *source,
> diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h
> b/xen/include/asm-x86/hvm/vmx/vmx.h
> index d4d6feb..ed814ce 100644
> --- a/xen/include/asm-x86/hvm/vmx/vmx.h
> +++ b/xen/include/asm-x86/hvm/vmx/vmx.h
> @@ -99,6 +99,28 @@ void vmx_update_exception_bitmap(struct vcpu *v);
> void vmx_update_cpu_exec_control(struct vcpu *v);
> void vmx_update_secondary_exec_control(struct vcpu *v);
>
> +#define POSTED_INTR_ON 0
> +static inline int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
> +{
> + return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
> +}
> +
> +static inline int pi_test_and_set_on(struct pi_desc *pi_desc)
> +{
> + return test_and_set_bit(POSTED_INTR_ON,
> + (unsigned long *)&pi_desc->control);
> +}
> +
> +static inline int pi_test_and_clear_on(struct pi_desc *pi_desc)
> +{
> + return test_and_clear_bit(POSTED_INTR_ON,
> + (unsigned long *)&pi_desc->control);
> +}
> +
> +static inline u32 pi_get_pir(struct pi_desc *pi_desc, int group)
> +{
> + return xchg(&pi_desc->pir[group], 0);
> +}
>
> /*
> * Exit Reasons
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |