|
|
|
|
|
|
|
|
|
|
xen-changelog
[Xen-changelog] [linux-2.6.18-xen] netback: take net_schedule_list_lock
# HG changeset patch
# User Keir Fraser <keir@xxxxxxx>
# Date 1288772442 0
# Node ID bd2bf7a8468657da58c5039a2dfb573afb7e004d
# Parent f4357b64a3cecb7fea425d7bddbef0c91ab79234
netback: take net_schedule_list_lock when removing entry from net_schedule_list
There is a race in net_tx_build_mops between checking if
net_schedule_list is empty and actually dequeuing the first entry on
the list. If another thread dequeues the only entry on the list during
this window we crash because list_first_entry expects a non-empty
list. Therefore after the initial lock free check for an empty list
check again with the lock held before dequeueing the entry.
Based on a patch by Tomasz Wroblewski.
Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
---
drivers/xen/netback/netback.c | 25 +++++++++++++++++++------
1 files changed, 19 insertions(+), 6 deletions(-)
diff -r f4357b64a3ce -r bd2bf7a84686 drivers/xen/netback/netback.c
--- a/drivers/xen/netback/netback.c Fri Oct 29 10:23:16 2010 +0100
+++ b/drivers/xen/netback/netback.c Wed Nov 03 08:20:42 2010 +0000
@@ -784,15 +784,28 @@ static int __on_net_schedule_list(netif_
return netif->list.next != NULL;
}
+/* Must be called with net_schedule_list_lock held. */
static void remove_from_net_schedule_list(netif_t *netif)
{
- spin_lock_irq(&net_schedule_list_lock);
if (likely(__on_net_schedule_list(netif))) {
list_del(&netif->list);
netif->list.next = NULL;
netif_put(netif);
}
+}
+
+static netif_t *poll_net_schedule_list(void)
+{
+ netif_t *netif = NULL;
+
+ spin_lock_irq(&net_schedule_list_lock);
+ if (!list_empty(&net_schedule_list)) {
+ netif = list_first_entry(&net_schedule_list, netif_t, list);
+ netif_get(netif);
+ remove_from_net_schedule_list(netif);
+ }
spin_unlock_irq(&net_schedule_list_lock);
+ return netif;
}
static void add_to_net_schedule_list_tail(netif_t *netif)
@@ -837,7 +850,9 @@ void netif_schedule_work(netif_t *netif)
void netif_deschedule_work(netif_t *netif)
{
+ spin_lock_irq(&net_schedule_list_lock);
remove_from_net_schedule_list(netif);
+ spin_unlock_irq(&net_schedule_list_lock);
}
@@ -1224,7 +1239,6 @@ static int netbk_set_skb_gso(struct sk_b
/* Called after netfront has transmitted */
static void net_tx_action(unsigned long unused)
{
- struct list_head *ent;
struct sk_buff *skb;
netif_t *netif;
netif_tx_request_t txreq;
@@ -1242,10 +1256,9 @@ static void net_tx_action(unsigned long
while (((NR_PENDING_REQS + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
!list_empty(&net_schedule_list)) {
/* Get a netif from the list with work to do. */
- ent = net_schedule_list.next;
- netif = list_entry(ent, netif_t, list);
- netif_get(netif);
- remove_from_net_schedule_list(netif);
+ netif = poll_net_schedule_list();
+ if (!netif)
+ continue;
RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, work_to_do);
if (!work_to_do) {
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|
<Prev in Thread] |
Current Thread |
[Next in Thread> |
- [Xen-changelog] [linux-2.6.18-xen] netback: take net_schedule_list_lock when removing entry from net_schedule_list,
Xen patchbot-linux-2.6.18-xen <=
|
|
|
|
|