[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH] drm/xen-front: Make shmem backed display buffer coherent



On 12/14/18 10:35 AM, Daniel Vetter wrote:
On Fri, Dec 14, 2018 at 09:09:45AM +0200, Oleksandr Andrushchenko wrote:
On 12/13/18 5:48 PM, Daniel Vetter wrote:
On Thu, Dec 13, 2018 at 12:17:54PM +0200, Oleksandr Andrushchenko wrote:
Daniel, could you please comment?
Cross-revieweing someone else's stuff would scale better,
fair enough
   I don't think
I'll get around to anything before next year.
I put you on CC explicitly because you had comments on other patch [1]

and this one tries to solve the issue raised (I tried to figure out

at [2] if this is the way to go, but it seems I have no alternative here).

While at it [3] (I hope) addresses your comments and the series just

needs your single ack/nack to get in: all the rest ack/r-b are already

there. Do you mind looking at it?
As mentioned, much better if you aim for more per review with others, not
just me. And all that dma coherency stuff isn't something a really
understand all that well (I just know we have lots of pain). For options
maybe work together with Gerd Hoffman or Noralf Tronnes, I think either
has some patches pending that also need some review.

Fair enough,

thank you

-Daniel

-Daniel
Thank you very much for your time,

Oleksandr

Thank you

On 11/27/18 12:32 PM, Oleksandr Andrushchenko wrote:
From: Oleksandr Andrushchenko <oleksandr_andrushchenko@xxxxxxxx>

When GEM backing storage is allocated with drm_gem_get_pages
the backing pages may be cached, thus making it possible that
the backend sees only partial content of the buffer which may
lead to screen artifacts. Make sure that the frontend's
memory is coherent and the backend always sees correct display
buffer content.

Fixes: c575b7eeb89f ("drm/xen-front: Add support for Xen PV display frontend")

Signed-off-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@xxxxxxxx>
---
    drivers/gpu/drm/xen/xen_drm_front_gem.c | 62 +++++++++++++++++++------
    1 file changed, 48 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c 
b/drivers/gpu/drm/xen/xen_drm_front_gem.c
index 47ff019d3aef..c592735e49d2 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_gem.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
@@ -33,8 +33,11 @@ struct xen_gem_object {
        /* set for buffers allocated by the backend */
        bool be_alloc;
-       /* this is for imported PRIME buffer */
-       struct sg_table *sgt_imported;
+       /*
+        * this is for imported PRIME buffer or the one allocated via
+        * drm_gem_get_pages.
+        */
+       struct sg_table *sgt;
    };
    static inline struct xen_gem_object *
@@ -77,10 +80,21 @@ static struct xen_gem_object *gem_create_obj(struct 
drm_device *dev,
        return xen_obj;
    }
+struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj)
+{
+       struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
+
+       if (!xen_obj->pages)
+               return ERR_PTR(-ENOMEM);
+
+       return drm_prime_pages_to_sg(xen_obj->pages, xen_obj->num_pages);
+}
+
    static struct xen_gem_object *gem_create(struct drm_device *dev, size_t 
size)
    {
        struct xen_drm_front_drm_info *drm_info = dev->dev_private;
        struct xen_gem_object *xen_obj;
+       struct address_space *mapping;
        int ret;
        size = round_up(size, PAGE_SIZE);
@@ -113,10 +127,14 @@ static struct xen_gem_object *gem_create(struct 
drm_device *dev, size_t size)
                xen_obj->be_alloc = true;
                return xen_obj;
        }
+
        /*
         * need to allocate backing pages now, so we can share those
         * with the backend
         */
+       mapping = xen_obj->base.filp->f_mapping;
+       mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
+
        xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
        xen_obj->pages = drm_gem_get_pages(&xen_obj->base);
        if (IS_ERR_OR_NULL(xen_obj->pages)) {
@@ -125,8 +143,27 @@ static struct xen_gem_object *gem_create(struct drm_device 
*dev, size_t size)
                goto fail;
        }
+       xen_obj->sgt = xen_drm_front_gem_get_sg_table(&xen_obj->base);
+       if (IS_ERR_OR_NULL(xen_obj->sgt)){
+               ret = PTR_ERR(xen_obj->sgt);
+               xen_obj->sgt = NULL;
+               goto fail_put_pages;
+       }
+
+       if (!dma_map_sg(dev->dev, xen_obj->sgt->sgl, xen_obj->sgt->nents,
+                       DMA_BIDIRECTIONAL)) {
+               ret = -EFAULT;
+               goto fail_free_sgt;
+       }
+
        return xen_obj;
+fail_free_sgt:
+       sg_free_table(xen_obj->sgt);
+       xen_obj->sgt = NULL;
+fail_put_pages:
+       drm_gem_put_pages(&xen_obj->base, xen_obj->pages, true, false);
+       xen_obj->pages = NULL;
    fail:
        DRM_ERROR("Failed to allocate buffer with size %zu\n", size);
        return ERR_PTR(ret);
@@ -149,7 +186,7 @@ void xen_drm_front_gem_free_object_unlocked(struct 
drm_gem_object *gem_obj)
        struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
        if (xen_obj->base.import_attach) {
-               drm_prime_gem_destroy(&xen_obj->base, xen_obj->sgt_imported);
+               drm_prime_gem_destroy(&xen_obj->base, xen_obj->sgt);
                gem_free_pages_array(xen_obj);
        } else {
                if (xen_obj->pages) {
@@ -158,6 +195,13 @@ void xen_drm_front_gem_free_object_unlocked(struct 
drm_gem_object *gem_obj)
                                                        xen_obj->pages);
                                gem_free_pages_array(xen_obj);
                        } else {
+                               if (xen_obj->sgt) {
+                                       dma_unmap_sg(xen_obj->base.dev->dev,
+                                                    xen_obj->sgt->sgl,
+                                                    xen_obj->sgt->nents,
+                                                    DMA_BIDIRECTIONAL);
+                                       sg_free_table(xen_obj->sgt);
+                               }
                                drm_gem_put_pages(&xen_obj->base,
                                                  xen_obj->pages, true, false);
                        }
@@ -174,16 +218,6 @@ struct page **xen_drm_front_gem_get_pages(struct 
drm_gem_object *gem_obj)
        return xen_obj->pages;
    }
-struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj)
-{
-       struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
-
-       if (!xen_obj->pages)
-               return ERR_PTR(-ENOMEM);
-
-       return drm_prime_pages_to_sg(xen_obj->pages, xen_obj->num_pages);
-}
-
    struct drm_gem_object *
    xen_drm_front_gem_import_sg_table(struct drm_device *dev,
                                  struct dma_buf_attachment *attach,
@@ -203,7 +237,7 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev,
        if (ret < 0)
                return ERR_PTR(ret);
-       xen_obj->sgt_imported = sgt;
+       xen_obj->sgt = sgt;
        ret = drm_prime_sg_to_page_addr_arrays(sgt, xen_obj->pages,
                                               NULL, xen_obj->num_pages);
_______________________________________________
dri-devel mailing list
dri-devel@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/dri-devel
[1] https://patchwork.kernel.org/patch/10693787/

[2] https://lists.xen.org/archives/html/xen-devel/2018-11/msg02882.html

[3] https://patchwork.kernel.org/patch/10705853/



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.