# HG changeset patch # User yamahata@xxxxxxxxxxxxx # Node ID 4f7ee25edd8141b6fa44f385d10cef62e18a2bb9 # Parent e0e68942b205b395fe99f7a338a448ddadd9c9fd change semantics of grant transfers for vp guests so that the operation automatically gets you a fresh page at the same pseudo-physical address as Keir suggested. Signed-off-by: Isaku Yamahata diff -r e0e68942b205 -r 4f7ee25edd81 linux-2.6-xen-sparse/drivers/xen/netback/netback.c --- a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c Fri Apr 28 16:44:00 2006 +0100 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c Sat Apr 29 13:17:03 2006 +0900 @@ -235,23 +235,35 @@ static void net_rx_action(unsigned long vdata = (unsigned long)skb->data; old_mfn = virt_to_mfn(vdata); - /* Memory squeeze? Back off for an arbitrary while. */ - if ((new_mfn = alloc_mfn()) == 0) { - if ( net_ratelimit() ) - WPRINTK("Memory squeeze in netback driver.\n"); - mod_timer(&net_timer, jiffies + HZ); - skb_queue_head(&rx_queue, skb); - break; - } - /* - * Set the new P2M table entry before reassigning the old data - * page. Heed the comment in pgtable-2level.h:pte_page(). :-) - */ - set_phys_to_machine(__pa(skb->data) >> PAGE_SHIFT, new_mfn); - - MULTI_update_va_mapping(mcl, vdata, + if (!xen_feature(XENFEAT_auto_translated_physmap)) { + /* Memory squeeze? Back off for an arbitrary while. */ + if ((new_mfn = alloc_mfn()) == 0) { + if ( net_ratelimit() ) + WPRINTK("Memory squeeze in netback " + "driver.\n"); + mod_timer(&net_timer, jiffies + HZ); + skb_queue_head(&rx_queue, skb); + break; + /* + * Set the new P2M table entry before + * reassigning the old data page. + * Heed the comment in + * pgtable-2level.h:pte_page(). :-) + */ + set_phys_to_machine( + __pa(skb->data) >> PAGE_SHIFT, + new_mfn); + + MULTI_update_va_mapping(mcl, vdata, pfn_pte_ma(new_mfn, PAGE_KERNEL), 0); - mcl++; + mcl++; + } + + mmu->ptr = ((maddr_t)new_mfn << PAGE_SHIFT) | + MMU_MACHPHYS_UPDATE; + mmu->val = __pa(vdata) >> PAGE_SHIFT; + mmu++; + } gop->mfn = old_mfn; gop->domid = netif->domid; @@ -260,13 +272,6 @@ static void net_rx_action(unsigned long netif->rx.req_cons++; gop++; - if (!xen_feature(XENFEAT_auto_translated_physmap)) { - mmu->ptr = ((maddr_t)new_mfn << PAGE_SHIFT) | - MMU_MACHPHYS_UPDATE; - mmu->val = __pa(vdata) >> PAGE_SHIFT; - mmu++; - } - __skb_queue_tail(&rxq, skb); /* Filled the batch queue? */ @@ -274,22 +279,24 @@ static void net_rx_action(unsigned long break; } - if (mcl == rx_mcl) - return; - - mcl[-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL; - - if (mmu - rx_mmu) { - mcl->op = __HYPERVISOR_mmu_update; - mcl->args[0] = (unsigned long)rx_mmu; - mcl->args[1] = mmu - rx_mmu; - mcl->args[2] = 0; - mcl->args[3] = DOMID_SELF; - mcl++; - } - - ret = HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl); - BUG_ON(ret != 0); + if (!xen_feature(XENFEAT_auto_translated_physmap)) { + if (mcl == rx_mcl) + return; + + mcl[-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL; + + if (mmu - rx_mmu) { + mcl->op = __HYPERVISOR_mmu_update; + mcl->args[0] = (unsigned long)rx_mmu; + mcl->args[1] = mmu - rx_mmu; + mcl->args[2] = 0; + mcl->args[3] = DOMID_SELF; + mcl++; + } + + ret = HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl); + BUG_ON(ret != 0); + } ret = HYPERVISOR_grant_table_op(GNTTABOP_transfer, grant_rx_op, gop - grant_rx_op); @@ -308,8 +315,11 @@ static void net_rx_action(unsigned long netif->stats.tx_bytes += size; netif->stats.tx_packets++; - /* The update_va_mapping() must not fail. */ - BUG_ON(mcl->result != 0); + if (!xen_feature(XENFEAT_auto_translated_physmap)) { + /* The update_va_mapping() must not fail. */ + BUG_ON(mcl->result != 0); + mcl++; + } /* Check the reassignment error code. */ status = NETIF_RSP_OKAY; @@ -340,7 +350,6 @@ static void net_rx_action(unsigned long netif_put(netif); dev_kfree_skb(skb); - mcl++; gop++; }