[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2] drm/xen-front: Make shmem backed display buffer coherent



From: Oleksandr Andrushchenko <oleksandr_andrushchenko@xxxxxxxx>

When GEM backing storage is allocated with drm_gem_get_pages
the backing pages may be cached, thus making it possible that
the backend sees only partial content of the buffer which may
lead to screen artifacts. Make sure that the frontend's
memory is coherent and the backend always sees correct display
buffer content.

Fixes: c575b7eeb89f ("drm/xen-front: Add support for Xen PV display frontend")

Signed-off-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@xxxxxxxx>

---
Changes since v1:
 - Remove GFP_USER|__GFP_DMA32 mapping flags (Gerd)
 - Use drm_prime_pages_to_sg directly (Noralf)

 drivers/gpu/drm/xen/xen_drm_front_gem.c | 38 ++++++++++++++++++++++---
 1 file changed, 34 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c 
b/drivers/gpu/drm/xen/xen_drm_front_gem.c
index 28bc501af450..0b0d9b4f97dc 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_gem.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
@@ -32,8 +32,11 @@ struct xen_gem_object {
        /* set for buffers allocated by the backend */
        bool be_alloc;
 
-       /* this is for imported PRIME buffer */
-       struct sg_table *sgt_imported;
+       /*
+        * this is for imported PRIME buffer or the one allocated via
+        * drm_gem_get_pages.
+        */
+       struct sg_table *sgt;
 };
 
 static inline struct xen_gem_object *
@@ -124,8 +127,28 @@ static struct xen_gem_object *gem_create(struct drm_device 
*dev, size_t size)
                goto fail;
        }
 
+       xen_obj->sgt = drm_prime_pages_to_sg(xen_obj->pages,
+                                            xen_obj->num_pages);
+       if (IS_ERR_OR_NULL(xen_obj->sgt)) {
+               ret = PTR_ERR(xen_obj->sgt);
+               xen_obj->sgt = NULL;
+               goto fail_put_pages;
+       }
+
+       if (!dma_map_sg(dev->dev, xen_obj->sgt->sgl, xen_obj->sgt->nents,
+                       DMA_BIDIRECTIONAL)) {
+               ret = -EFAULT;
+               goto fail_free_sgt;
+       }
+
        return xen_obj;
 
+fail_free_sgt:
+       sg_free_table(xen_obj->sgt);
+       xen_obj->sgt = NULL;
+fail_put_pages:
+       drm_gem_put_pages(&xen_obj->base, xen_obj->pages, true, false);
+       xen_obj->pages = NULL;
 fail:
        DRM_ERROR("Failed to allocate buffer with size %zu\n", size);
        return ERR_PTR(ret);
@@ -148,7 +171,7 @@ void xen_drm_front_gem_free_object_unlocked(struct 
drm_gem_object *gem_obj)
        struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
 
        if (xen_obj->base.import_attach) {
-               drm_prime_gem_destroy(&xen_obj->base, xen_obj->sgt_imported);
+               drm_prime_gem_destroy(&xen_obj->base, xen_obj->sgt);
                gem_free_pages_array(xen_obj);
        } else {
                if (xen_obj->pages) {
@@ -157,6 +180,13 @@ void xen_drm_front_gem_free_object_unlocked(struct 
drm_gem_object *gem_obj)
                                                        xen_obj->pages);
                                gem_free_pages_array(xen_obj);
                        } else {
+                               if (xen_obj->sgt) {
+                                       dma_unmap_sg(xen_obj->base.dev->dev,
+                                                    xen_obj->sgt->sgl,
+                                                    xen_obj->sgt->nents,
+                                                    DMA_BIDIRECTIONAL);
+                                       sg_free_table(xen_obj->sgt);
+                               }
                                drm_gem_put_pages(&xen_obj->base,
                                                  xen_obj->pages, true, false);
                        }
@@ -202,7 +232,7 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev,
        if (ret < 0)
                return ERR_PTR(ret);
 
-       xen_obj->sgt_imported = sgt;
+       xen_obj->sgt = sgt;
 
        ret = drm_prime_sg_to_page_addr_arrays(sgt, xen_obj->pages,
                                               NULL, xen_obj->num_pages);
-- 
2.20.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.