#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct pvcan_private { struct can_priv can; /* must be the first member! */ struct xenbus_device *xendev; struct net_device *candev; int irq; unsigned int evtchn_rx; unsigned int evtchn_tx; /* RX Ring (Input to dom0) */ struct xen_pvcan_rx_front_ring rx_ring; grant_ref_t rx_ring_ref; /* TX Ring (Output from dom0)*/ struct xen_pvcan_tx_front_ring tx_ring; grant_ref_t tx_ring_ref; }; static netdev_tx_t pvcan_tx(struct sk_buff *skb, struct net_device *dev) { struct can_frame *cfd = (struct can_frame *) skb->data; struct net_device_stats *stats = &dev->stats; struct pvcan_private *priv = netdev_priv(dev); struct pvcan_request *req; bool notify; if (can_dropped_invalid_skb(dev, skb)) { printk("can_dropped_invalid_skb\n"); stats->tx_dropped++; return NETDEV_TX_OK; } // Need to wait so that we don't overflow the shared ring buffer. if (abs(priv->rx_ring.sring->req_prod - priv->rx_ring.sring->req_event) >= RING_SIZE(&priv->rx_ring)-2) { netif_stop_queue(dev); } req = RING_GET_REQUEST(&priv->rx_ring, priv->rx_ring.req_prod_pvt); stats->tx_packets++; stats->tx_bytes += cfd->can_dlc; barrier(); memcpy(&req->cfd, cfd, sizeof(*cfd)); wmb(); priv->rx_ring.req_prod_pvt++; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&priv->rx_ring, notify); if (notify) notify_remote_via_evtchn(priv->evtchn_rx); consume_skb(skb); return NETDEV_TX_OK; } static irqreturn_t txqueue_ready_interrupt(int dummy, void *data) { struct pvcan_private *priv = data; struct net_device *candev = priv->candev; netif_wake_queue(candev); return IRQ_HANDLED; } static int pvcan_change_mtu(struct net_device *dev, int new_mtu) { /* Do not allow changing the MTU while running */ if (dev->flags & IFF_UP) return -EBUSY; if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU) return -EINVAL; dev->mtu = new_mtu; return 0; } static int pvcan_open(struct net_device *dev) { if (!netif_carrier_ok(dev)) netif_carrier_on(dev); netif_start_queue(dev); return 0; } static int pvcan_stop(struct net_device *dev) { netif_stop_queue(dev); return 0; } static const struct net_device_ops pvcan_netdev_ops = { .ndo_open = pvcan_open, .ndo_stop = pvcan_stop, .ndo_start_xmit = pvcan_tx, .ndo_change_mtu = pvcan_change_mtu, }; /* Input CAN frames from backend */ static irqreturn_t pvcan_interrupt(int dummy, void *data) { struct pvcan_private *priv = data; struct net_device *candev = priv->candev; struct net_device_stats *stats = &candev->stats; RING_IDX rcons, rprod; struct sk_buff *skb; struct can_frame *new_cfd; struct pvcan_response *rsp; int more_to_do; read_more_responses: rcons = priv->tx_ring.rsp_cons; rprod = priv->tx_ring.sring->rsp_prod; if (!likely(netif_carrier_ok(candev) || !RING_HAS_UNCONSUMED_RESPONSES(&priv->tx_ring))) return IRQ_HANDLED; rmb(); while (rcons != rprod) { rsp = RING_GET_RESPONSE(&priv->tx_ring, rcons); skb = alloc_can_skb(candev, &new_cfd); if (!skb) { stats->rx_dropped++; break; } if (!new_cfd) { stats->rx_dropped++; break; } skb->pkt_type = PACKET_BROADCAST; skb->dev = candev; skb->ip_summed = CHECKSUM_UNNECESSARY; memcpy(skb->data, &rsp->cfd, sizeof(rsp->cfd)); if (can_dropped_invalid_skb(candev, skb)) { stats->rx_dropped++; } else { netif_rx(skb); stats->rx_packets++; stats->rx_bytes += rsp->cfd.len; } (void) RING_GET_REQUEST(&priv->tx_ring, priv->tx_ring.req_prod_pvt); priv->tx_ring.req_prod_pvt++; priv->tx_ring.rsp_cons = ++rcons; } RING_FINAL_CHECK_FOR_RESPONSES(&priv->tx_ring, more_to_do); if (!!more_to_do) { goto read_more_responses; } return IRQ_HANDLED; } static int xen_pvcan_front_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { return 0; } static int pvcan_connect(struct xenbus_device *dev) { struct pvcan_private *priv; struct net_device *candev; struct xenbus_transaction xbt; struct xen_pvcan_rx_sring *rx_sring; struct xen_pvcan_tx_sring *tx_sring; grant_ref_t rx_gref, tx_gref; int rc; candev = alloc_candev(sizeof(struct pvcan_private), 1); if (!candev) { dev_err(&dev->dev, "allocating private structure"); rc = -ENOMEM; goto fail1; } priv = netdev_priv(candev); dev_set_drvdata(&dev->dev, priv); priv->xendev = dev; priv->candev = candev; /* Setup RX ring */ rx_sring = (struct xen_pvcan_rx_sring *) get_zeroed_page(GFP_NOIO|__GFP_HIGH); if (!rx_sring) { rc = -ENOMEM; goto alloc_shr_fail; } SHARED_RING_INIT(rx_sring); FRONT_RING_INIT(&priv->rx_ring, rx_sring, PAGE_SIZE); rc = xenbus_grant_ring(dev, rx_sring, 1, &rx_gref); if (rc < 0) goto grant_rx_ring_fail; priv->rx_ring_ref = rx_gref; dev_dbg(&dev->dev, "rx-ring-ref = %i\n", rx_gref); /* Allocate TX ring */ tx_sring = (struct xen_pvcan_tx_sring *) get_zeroed_page(GFP_NOIO|__GFP_HIGH); if (!tx_sring) { rc = -ENOMEM; goto grant_rx_ring_fail; } SHARED_RING_INIT(tx_sring); FRONT_RING_INIT(&priv->tx_ring, tx_sring, PAGE_SIZE); rc = xenbus_grant_ring(dev, tx_sring, 1, &tx_gref); if (rc < 0) goto grant_tx_ring_fail; priv->tx_ring_ref = tx_gref; dev_dbg(&dev->dev, "tx-ring-ref = %i\n", tx_gref); /* Setup event channel */ rc = xenbus_alloc_evtchn(dev, &priv->evtchn_rx); if (rc) { rc = -ENOMEM; goto grant_tx_ring_fail; } rc = xenbus_alloc_evtchn(dev, &priv->evtchn_tx); if (rc) { rc = -ENOMEM; goto grant_tx_ring_fail; } rc = xenbus_transaction_start(&xbt); if (rc) { rc = -1; dev_err(&dev->dev, "Could not start xenstore transaction\n"); goto irq_bind_fail; } rc = xenbus_printf(xbt, dev->nodename, "tx-ring-ref", "%u", priv->tx_ring_ref); if (rc) { dev_err(&dev->dev, "Could not write tx-ring-ref (%i)\n", rc); rc = -1; goto xenbus_abort_transaction; } rc = xenbus_printf(xbt, dev->nodename, "rx-ring-ref", "%u", priv->rx_ring_ref); if (rc) { dev_err(&dev->dev, "Could not write rx-ring-ref (%i)\n", rc); rc = -1; goto xenbus_abort_transaction; } rc = xenbus_printf(xbt, dev->nodename, "event-channel-rx", "%u", priv->evtchn_rx); if (rc) { rc = -1; dev_err(&dev->dev, "Could not write event-channel\n"); goto xenbus_abort_transaction; } rc = xenbus_printf(xbt, dev->nodename, "event-channel-tx", "%u", priv->evtchn_tx); if (rc) { rc = -1; dev_err(&dev->dev, "Could not write event-channel-tx\n"); goto xenbus_abort_transaction; } rc = xenbus_transaction_end(xbt, 0); rc = bind_evtchn_to_irqhandler(priv->evtchn_rx, pvcan_interrupt, 0, "pvcan-rx", priv); if (rc <= 0) { dev_err(&dev->dev, "allocating IRQ"); rc = -1; goto irq_bind_fail; } rc = bind_evtchn_to_irqhandler(priv->evtchn_tx, txqueue_ready_interrupt, 0, "pvcan-tx", priv); if (rc <= 0) { dev_err(&dev->dev, "allocating IRQ"); rc = -1; goto irq_bind_fail; } priv->irq = rc; dev_dbg(&candev->dev, "pvcan irq = %i\n", priv->irq); /* Create pvcan interface */ candev->type = ARPHRD_CAN; candev->mtu = CAN_MTU; candev->hard_header_len = 0; candev->addr_len = 0; candev->tx_queue_len = 16; candev->flags = IFF_NOARP; candev->netdev_ops = &pvcan_netdev_ops; candev->needs_free_netdev = true; rc = register_candev(candev); if (rc < 0) goto register_dev_fail; return 0; xenbus_abort_transaction: dev_err(&dev->dev, "Failed to configure xenstore\n"); xenbus_transaction_end(xbt, 1); register_dev_fail: if (priv->irq) { unbind_from_irqhandler(priv->irq, priv); priv->irq = 0; } irq_bind_fail: if (priv->evtchn_rx) xenbus_free_evtchn(dev, priv->evtchn_rx); if (priv->evtchn_tx) xenbus_free_evtchn(dev, priv->evtchn_tx); grant_tx_ring_fail: free_page((unsigned long) tx_sring); grant_rx_ring_fail: free_page((unsigned long) rx_sring); alloc_shr_fail: kvfree(priv); fail1: return rc; } static int pvcan_disconnect(struct xenbus_device *dev) { struct pvcan_private *priv; struct net_device *candev; priv = dev_get_drvdata(&dev->dev); if (!priv) { return 0; } candev = priv->candev; if (!candev) { return 0; } if (priv->irq) { unbind_from_irqhandler(priv->irq, priv); priv->irq = 0; } if (priv->rx_ring_ref != 0) { gnttab_end_foreign_access(priv->rx_ring_ref, 0, (unsigned long)priv->rx_ring.sring); priv->rx_ring_ref = 0; } priv->rx_ring.sring = NULL; if (priv->tx_ring_ref != 0) { gnttab_end_foreign_access(priv->tx_ring_ref, 0, (unsigned long)priv->tx_ring.sring); priv->tx_ring_ref = 0; } priv->tx_ring.sring = NULL; if (candev) { unregister_netdev(candev); priv->candev = NULL; } return 0; } static void xen_pvcan_front_otherend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { switch (backend_state) { case XenbusStateInitialising: xenbus_switch_state(dev, XenbusStateInitialising); break; case XenbusStateInitialised: case XenbusStateReconfiguring: case XenbusStateReconfigured: case XenbusStateUnknown: break; case XenbusStateInitWait: if (dev->state != XenbusStateInitialising) break; if (pvcan_connect(dev) != 0) break; xenbus_switch_state(dev, XenbusStateConnected); break; case XenbusStateConnected: pr_info("Other side says it is connected as well.\n"); break; case XenbusStateClosed: if (dev->state == XenbusStateClosed) { break; } pvcan_disconnect(dev); xenbus_frontend_closed(dev); break; case XenbusStateClosing: pvcan_disconnect(dev); xenbus_frontend_closed(dev); break; default: break; } } static const struct xenbus_device_id xen_pvcan_front_ids[] = { { "pvcan" }, { "" } }; static struct xenbus_driver xen_pvcan_front_driver = { .ids = xen_pvcan_front_ids, .probe = xen_pvcan_front_probe, .otherend_changed = xen_pvcan_front_otherend_changed, }; static int __init xen_pvcan_front_init(void) { printk(KERN_NOTICE "XEN PV CAN frontend driver init\n"); return xenbus_register_frontend(&xen_pvcan_front_driver); } static void __exit xen_pvcan_front_exit(void) { xenbus_unregister_driver(&xen_pvcan_front_driver); } module_init(xen_pvcan_front_init); module_exit(xen_pvcan_front_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS("xen:pvcan-front");