|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v5] xen/arm: trap guest WFI
Trap guest WFI, block the guest VCPU unless it has pending interrupts.
Awake the guest vcpu when a new interrupt for it arrrives.
Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
Changes in v5:
- implement local_event_delivery_disable/enable;
- introduce gic_events_need_delivery, that takes into account the
current status of the lr registers;
- check whether evtchn_upcall_mask is set in
local_events_need_delivery;
- local_events_need_delivery: return false if the guest disabled irqs;
- on guest WFI we should not block the guest vcpu if any interrupts need
to be injected, even if the guest disabled interrupts.
Changes in v4:
- local_events_need_delivery: no need to return true if
evtchn_upcall_pending is set and the VGIC_IRQ_EVTCHN_CALLBACK irq is
currently inflight: it just means that events are being handled.
Changes in v3:
- check on the lr_pending list rather the inflight list in
local_events_need_delivery;
- if evtchn_upcall_pending also check the status of the
VGIC_IRQ_EVTCHN_CALLBACK irq.
Changes in v2:
- rebased;
- implement local_events_need_delivery;
- move the case HSR_EC_WFI_WFE to do_trap_hypervisor.
diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index e9c84c7..e2a072b 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -457,7 +457,7 @@ int construct_dom0(struct domain *d)
v->arch.sctlr = SCTLR_BASE;
- WRITE_SYSREG(HCR_PTW|HCR_BSU_OUTER|HCR_AMO|HCR_IMO|HCR_VM, HCR_EL2);
+ WRITE_SYSREG(HCR_PTW|HCR_BSU_OUTER|HCR_AMO|HCR_IMO|HCR_VM|HCR_TWI,
HCR_EL2);
isb();
local_abort_enable();
diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
index 41abdfb..ec828c8 100644
--- a/xen/arch/arm/gic.c
+++ b/xen/arch/arm/gic.c
@@ -544,9 +544,15 @@ static void gic_inject_irq_stop(void)
}
}
+int gic_events_need_delivery(void)
+{
+ return (!list_empty(¤t->arch.vgic.lr_pending) ||
+ this_cpu(lr_mask));
+}
+
void gic_inject(void)
{
- if (!this_cpu(lr_mask))
+ if (!gic_events_need_delivery())
gic_inject_irq_stop();
else
gic_inject_irq_start();
diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
index 600113c..af165ca 100644
--- a/xen/arch/arm/traps.c
+++ b/xen/arch/arm/traps.c
@@ -29,7 +29,9 @@
#include <xen/hypercall.h>
#include <xen/softirq.h>
#include <xen/domain_page.h>
+#include <public/sched.h>
#include <public/xen.h>
+#include <asm/event.h>
#include <asm/regs.h>
#include <asm/cpregs.h>
@@ -920,6 +922,19 @@ asmlinkage void do_trap_hypervisor(struct cpu_user_regs
*regs)
union hsr hsr = { .bits = READ_SYSREG32(ESR_EL2) };
switch (hsr.ec) {
+ /* at the moment we only trap WFI */
+ case HSR_EC_WFI_WFE:
+ do_sched_op_compat(SCHEDOP_block, 0);
+ /* The ARM spec declares that even if local irqs are masked in
+ * the CPSR register, an irq should wake up a cpu from WFI anyway.
+ * For this reason we need to check for irqs that need delivery,
+ * ignoring the CPSR register, *after* calling SCHEDOP_block to
+ * avoid races with vgic_vcpu_inject_irq.
+ */
+ if ( _local_events_need_delivery(0) )
+ vcpu_unblock(current);
+ regs->pc += hsr.len ? 4 : 2;
+ break;
case HSR_EC_CP15_32:
if ( ! is_pv32_domain(current->domain) )
goto bad_trap;
diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
index 0d24df0..8efcefc 100644
--- a/xen/arch/arm/vgic.c
+++ b/xen/arch/arm/vgic.c
@@ -608,12 +608,14 @@ void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int
irq, int virtual)
{
list_add_tail(&n->inflight, &iter->inflight);
spin_unlock_irqrestore(&v->arch.vgic.lock, flags);
- return;
+ goto out;
}
}
list_add_tail(&n->inflight, &v->arch.vgic.inflight_irqs);
spin_unlock_irqrestore(&v->arch.vgic.lock, flags);
/* we have a new higher priority irq, inject it into the guest */
+out:
+ vcpu_unblock(v);
}
/*
diff --git a/xen/include/asm-arm/event.h b/xen/include/asm-arm/event.h
index 10f58af..99a2b13 100644
--- a/xen/include/asm-arm/event.h
+++ b/xen/include/asm-arm/event.h
@@ -1,27 +1,55 @@
#ifndef __ASM_EVENT_H__
#define __ASM_EVENT_H__
+#include <asm/gic.h>
+#include <asm/domain.h>
+
void vcpu_kick(struct vcpu *v);
void vcpu_mark_events_pending(struct vcpu *v);
-static inline int local_events_need_delivery(void)
+static inline int _local_events_need_delivery(int check_masked)
{
- /* TODO
- * return (vcpu_info(v, evtchn_upcall_pending) &&
- !vcpu_info(v, evtchn_upcall_mask)); */
+ struct pending_irq *p = irq_to_pending(current, VGIC_IRQ_EVTCHN_CALLBACK);
+ struct cpu_user_regs *regs = guest_cpu_user_regs();
+
+ /* guest IRQs are masked */
+ if ( check_masked && (regs->cpsr & PSR_IRQ_MASK) )
return 0;
+
+ /* XXX: if the first interrupt has already been delivered, we should
+ * check whether any higher priority interrupts are in the
+ * lr_pending queue or in the LR registers and return 1 only in that
+ * case.
+ * In practice the guest interrupt handler should run with
+ * interrupts disabled so this shouldn't be a problem in the general
+ * case.
+ */
+ if ( gic_events_need_delivery() )
+ return 1;
+
+ if ( vcpu_info(current, evtchn_upcall_pending) &&
+ !vcpu_info(current, evtchn_upcall_mask) &&
+ list_empty(&p->inflight) )
+ return 1;
+
+ return 0;
+}
+
+static inline int local_events_need_delivery(void)
+{
+ return _local_events_need_delivery(1);
}
int local_event_delivery_is_enabled(void);
static inline void local_event_delivery_disable(void)
{
- /* TODO current->vcpu_info->evtchn_upcall_mask = 1; */
+ current->vcpu_info->evtchn_upcall_mask = 1;
}
static inline void local_event_delivery_enable(void)
{
- /* TODO current->vcpu_info->evtchn_upcall_mask = 0; */
+ current->vcpu_info->evtchn_upcall_mask = 0;
}
/* No arch specific virq definition now. Default to global. */
diff --git a/xen/include/asm-arm/gic.h b/xen/include/asm-arm/gic.h
index c6535d1..c3e0229 100644
--- a/xen/include/asm-arm/gic.h
+++ b/xen/include/asm-arm/gic.h
@@ -135,6 +135,7 @@ extern void gic_route_ppis(void);
extern void gic_route_spis(void);
extern void gic_inject(void);
+extern int gic_events_need_delivery(void);
extern void __cpuinit init_maintenance_interrupt(void);
extern void gic_set_guest_irq(struct vcpu *v, unsigned int irq,
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |