[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 13/18] xen/drmfront: use xenbus_setup_ring() and xenbus_teardown_ring()



Simplify drmfront's ring creation and removal via xenbus_setup_ring()
and xenbus_teardown_ring().

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c | 40 ++++++---------------
 1 file changed, 10 insertions(+), 30 deletions(-)

diff --git a/drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 
b/drivers/gpu/drm/xen/xen_drm_front_evtchnl.c
index 4006568b9e32..9b16ea9f523b 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_evtchnl.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_evtchnl.c
@@ -123,12 +123,12 @@ static irqreturn_t evtchnl_interrupt_evt(int irq, void 
*dev_id)
 static void evtchnl_free(struct xen_drm_front_info *front_info,
                         struct xen_drm_front_evtchnl *evtchnl)
 {
-       unsigned long page = 0;
+       void *page = NULL;
 
        if (evtchnl->type == EVTCHNL_TYPE_REQ)
-               page = (unsigned long)evtchnl->u.req.ring.sring;
+               page = evtchnl->u.req.ring.sring;
        else if (evtchnl->type == EVTCHNL_TYPE_EVT)
-               page = (unsigned long)evtchnl->u.evt.page;
+               page = evtchnl->u.evt.page;
        if (!page)
                return;
 
@@ -147,8 +147,7 @@ static void evtchnl_free(struct xen_drm_front_info 
*front_info,
                xenbus_free_evtchn(front_info->xb_dev, evtchnl->port);
 
        /* end access and free the page */
-       if (evtchnl->gref != INVALID_GRANT_REF)
-               gnttab_end_foreign_access(evtchnl->gref, page);
+       xenbus_teardown_ring(&page, 1, &evtchnl->gref);
 
        memset(evtchnl, 0, sizeof(*evtchnl));
 }
@@ -158,8 +157,7 @@ static int evtchnl_alloc(struct xen_drm_front_info 
*front_info, int index,
                         enum xen_drm_front_evtchnl_type type)
 {
        struct xenbus_device *xb_dev = front_info->xb_dev;
-       unsigned long page;
-       grant_ref_t gref;
+       void *page;
        irq_handler_t handler;
        int ret;
 
@@ -168,44 +166,26 @@ static int evtchnl_alloc(struct xen_drm_front_info 
*front_info, int index,
        evtchnl->index = index;
        evtchnl->front_info = front_info;
        evtchnl->state = EVTCHNL_STATE_DISCONNECTED;
-       evtchnl->gref = INVALID_GRANT_REF;
 
-       page = get_zeroed_page(GFP_NOIO | __GFP_HIGH);
-       if (!page) {
-               ret = -ENOMEM;
+       ret = xenbus_setup_ring(xb_dev, GFP_NOIO | __GFP_HIGH, &page,
+                               1, &evtchnl->gref);
+       if (ret)
                goto fail;
-       }
 
        if (type == EVTCHNL_TYPE_REQ) {
                struct xen_displif_sring *sring;
 
                init_completion(&evtchnl->u.req.completion);
                mutex_init(&evtchnl->u.req.req_io_lock);
-               sring = (struct xen_displif_sring *)page;
+               sring = page;
                SHARED_RING_INIT(sring);
                FRONT_RING_INIT(&evtchnl->u.req.ring, sring, XEN_PAGE_SIZE);
 
-               ret = xenbus_grant_ring(xb_dev, sring, 1, &gref);
-               if (ret < 0) {
-                       evtchnl->u.req.ring.sring = NULL;
-                       free_page(page);
-                       goto fail;
-               }
-
                handler = evtchnl_interrupt_ctrl;
        } else {
-               ret = gnttab_grant_foreign_access(xb_dev->otherend_id,
-                                                 virt_to_gfn((void *)page), 0);
-               if (ret < 0) {
-                       free_page(page);
-                       goto fail;
-               }
-
-               evtchnl->u.evt.page = (struct xendispl_event_page *)page;
-               gref = ret;
+               evtchnl->u.evt.page = page;
                handler = evtchnl_interrupt_evt;
        }
-       evtchnl->gref = gref;
 
        ret = xenbus_alloc_evtchn(xb_dev, &evtchnl->port);
        if (ret < 0)
-- 
2.34.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.