summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2019-11-28 12:39:50 +1000
committerDave Airlie <airlied@redhat.com>2019-11-28 14:33:01 +1000
commit0a6cad5df541108cfd3fbd79eef48eb824c89bdc (patch)
tree767d41bf88ec067b85fe3d87a190ef08e59a1ce7 /drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
parentacc61b8929365e63a3e8c8c8913177795aa45594 (diff)
parent9ca7d19ff8ba6207bccab46536814fe4839df80a (diff)
Merge branch 'vmwgfx-coherent' of git://people.freedesktop.org/~thomash/linux into drm-next
Graphics APIs like OpenGL 4.4 and Vulkan require the graphics driver to provide coherent graphics memory, meaning that the GPU sees any content written to the coherent memory on the next GPU operation that touches that memory, and the CPU sees any content written by the GPU to that memory immediately after any fence object trailing the GPU operation is signaled. Paravirtual drivers that otherwise require explicit synchronization needs to do this by hooking up dirty tracking to pagefault handlers and buffer object validation. Provide mm helpers needed for this and that also allow for huge pmd- and pud entries (patch 1-3), and the associated vmwgfx code (patch 4-7). The code has been tested and exercised by a tailored version of mesa where we disable all explicit synchronization and assume graphics memory is coherent. The performance loss varies of course; a typical number is around 5%. Signed-off-by: Dave Airlie <airlied@redhat.com> From: Thomas Hellstrom <thomas_os@shipmail.org> Link: https://patchwork.freedesktop.org/patch/msgid/20191113131639.4653-1-thomas_os@shipmail.org
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_bo.c')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c10
1 files changed, 7 insertions, 3 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 74016a08d118..8b71bf6b58ef 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -462,6 +462,8 @@ void vmw_bo_bo_free(struct ttm_buffer_object *bo)
{
struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
+ WARN_ON(vmw_bo->dirty);
+ WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree));
vmw_bo_unmap(vmw_bo);
kfree(vmw_bo);
}
@@ -475,8 +477,11 @@ void vmw_bo_bo_free(struct ttm_buffer_object *bo)
static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
{
struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
+ struct vmw_buffer_object *vbo = &vmw_user_bo->vbo;
- vmw_bo_unmap(&vmw_user_bo->vbo);
+ WARN_ON(vbo->dirty);
+ WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
+ vmw_bo_unmap(vbo);
ttm_prime_object_kfree(vmw_user_bo, prime);
}
@@ -511,8 +516,7 @@ int vmw_bo_init(struct vmw_private *dev_priv,
memset(vmw_bo, 0, sizeof(*vmw_bo));
BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
vmw_bo->base.priority = 3;
-
- INIT_LIST_HEAD(&vmw_bo->res_list);
+ vmw_bo->res_tree = RB_ROOT;
ret = ttm_bo_init(bdev, &vmw_bo->base, size,
ttm_bo_type_device, placement,