|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v10 for-xen-4.5 2/2] dpci: Replace tasklet with an softirq
On Fri, Nov 14, 2014 at 03:13:42PM +0000, Jan Beulich wrote:
> >>> On 12.11.14 at 03:23, <konrad.wilk@xxxxxxxxxx> wrote:
> > +static void pt_pirq_softirq_reset(struct hvm_pirq_dpci *pirq_dpci)
> > +{
> > + struct domain *d = pirq_dpci->dom;
> > +
> > + ASSERT(spin_is_locked(&d->event_lock));
> > +
> > + switch ( cmpxchg(&pirq_dpci->state, 1 << STATE_SCHED, 0) )
> > + {
> > + case (1 << STATE_SCHED):
> > + /*
> > + * We are going to try to de-schedule the softirq before it goes in
> > + * STATE_RUN. Whoever clears STATE_SCHED MUST refcount the 'dom'.
> > + */
> > + put_domain(d);
> > + /* fallthrough. */
>
> Considering Sander's report, the only suspicious place I find is this
> one: When the STATE_SCHED flag is set, pirq_dpci is on some
> CPU's list. What guarantees it to get removed from that list before
> getting inserted on another one?
None. The moment that STATE_SCHED is cleared, 'raise_softirq_for'
is free to manipulate the list.
We could:
- Add another bit, say STATE_ZOMBIE - which pt_pirq_softirq_reset could
set, and dpci_softirq - if it sees it - would clear. Said bit
would stop 'raise_softirq_for' from trying to do anything.
diff --git a/xen/drivers/passthrough/io.c b/xen/drivers/passthrough/io.c
index efc66dc..8e9141e 100644
--- a/xen/drivers/passthrough/io.c
+++ b/xen/drivers/passthrough/io.c
@@ -50,20 +50,25 @@ static DEFINE_PER_CPU(struct list_head, dpci_list);
enum {
STATE_SCHED,
- STATE_RUN
+ STATE_RUN,
+ STATE_ZOMBIE
};
/*
* This can be called multiple times, but the softirq is only raised once.
- * That is until the STATE_SCHED state has been cleared. The state can be
- * cleared by: the 'dpci_softirq' (when it has executed 'hvm_dirq_assist'),
- * or by 'pt_pirq_softirq_reset' (which will try to clear the state before
- * the softirq had a chance to run).
+ * That is until the STATE_SCHED and STATE_ZOMBIE state has been cleared. The
+ * STATE_SCHED and STATE_ZOMBIE state can be cleared by the 'dpci_softirq'
+ * (when it has executed 'hvm_dirq_assist'). The STATE_SCHED can be cleared
+ * by 'pt_pirq_softirq_reset' (which will try to clear the state before the
+ * softirq had a chance to run).
*/
static void raise_softirq_for(struct hvm_pirq_dpci *pirq_dpci)
{
unsigned long flags;
+ if ( test_bit(STATE_ZOMBIE, &pirq_dpci->state) )
+ return;
+
if ( test_and_set_bit(STATE_SCHED, &pirq_dpci->state) )
return;
@@ -85,7 +90,7 @@ static void raise_softirq_for(struct hvm_pirq_dpci *pirq_dpci)
*/
bool_t pt_pirq_softirq_active(struct hvm_pirq_dpci *pirq_dpci)
{
- if ( pirq_dpci->state & ((1 << STATE_RUN) | (1 << STATE_SCHED)) )
+ if ( pirq_dpci->state & ((1 << STATE_RUN) | (1 << STATE_SCHED) | (1 <<
STATE_ZOMBIE) ) )
return 1;
/*
@@ -109,7 +114,7 @@ static void pt_pirq_softirq_reset(struct hvm_pirq_dpci
*pirq_dpci)
ASSERT(spin_is_locked(&d->event_lock));
- switch ( cmpxchg(&pirq_dpci->state, 1 << STATE_SCHED, 0) )
+ switch ( cmpxchg(&pirq_dpci->state, 1 << STATE_SCHED, 1 << STATE_ZOMBIE ) )
{
case (1 << STATE_SCHED):
/*
@@ -120,6 +125,7 @@ static void pt_pirq_softirq_reset(struct hvm_pirq_dpci
*pirq_dpci)
/* fallthrough. */
case (1 << STATE_RUN):
case (1 << STATE_RUN) | (1 << STATE_SCHED):
+ case (1 << STATE_RUN) | (1 << STATE_SCHED) | (1 << STATE_ZOMBIE):
/*
* The reason it is OK to reset 'dom' when STATE_RUN bit is set is due
* to a shortcut the 'dpci_softirq' implements. It stashes the 'dom'
@@ -779,6 +785,7 @@ unlock:
static void dpci_softirq(void)
{
unsigned int cpu = smp_processor_id();
+ unsigned int reset = 0;
LIST_HEAD(our_list);
local_irq_disable();
@@ -805,7 +812,15 @@ static void dpci_softirq(void)
hvm_dirq_assist(d, pirq_dpci);
put_domain(d);
}
+ else
+ reset = 1;
+
clear_bit(STATE_RUN, &pirq_dpci->state);
+ if ( reset )
+ {
+ clear_bit(STATE_ZOMBIE, &pirq_dpci->state);
+ reset = 0;
+ }
}
}
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |