From 71df0368e9b66afeb1fdb92a88be1a98cc25f310 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Tue, 25 May 2021 17:10:50 +0200 Subject: drm/amdgpu: Implement mmap as GEM object function MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Moving the driver-specific mmap code into a GEM object function allows for using DRM helpers for various mmap callbacks. This change resolves several inconsistencies between regular mmap and prime-based mmap. The vm_ops field in vma is now set for all mmap'ed areas. Previously it way only set for regular mmap calls, prime-based mmap used TTM's default vm_ops. The function amdgpu_verify_access() is no longer being called and therefore removed by this patch. As a side effect, amdgpu_ttm_vm_ops and amdgpu_ttm_fault() are now implemented in amdgpu's GEM code. v4: * rebased v3: * rename mmap function to amdgpu_gem_object_mmap() (Christian) * remove unnecessary checks from mmap (Christian) v2: * rename amdgpu_ttm_vm_ops and amdgpu_ttm_fault() to amdgpu_gem_vm_ops and amdgpu_gem_fault() (Christian) * the check for kfd_bo has meanwhile been removed Signed-off-by: Thomas Zimmermann Reviewed-by: Christian König Link: https://patchwork.freedesktop.org/patch/msgid/20210525151055.8174-3-tzimmermann@suse.de --- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 55 +++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 18974bd081f0..73c76a3e2b12 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -32,6 +32,7 @@ #include #include +#include #include #include "amdgpu.h" @@ -41,6 +42,46 @@ static const struct drm_gem_object_funcs amdgpu_gem_object_funcs; +static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf) +{ + struct ttm_buffer_object *bo = vmf->vma->vm_private_data; + struct drm_device *ddev = bo->base.dev; + vm_fault_t ret; + int idx; + + ret = ttm_bo_vm_reserve(bo, vmf); + if (ret) + return ret; + + if (drm_dev_enter(ddev, &idx)) { + ret = amdgpu_bo_fault_reserve_notify(bo); + if (ret) { + drm_dev_exit(idx); + goto unlock; + } + + ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, + TTM_BO_VM_NUM_PREFAULT, 1); + + drm_dev_exit(idx); + } else { + ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); + } + if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) + return ret; + +unlock: + dma_resv_unlock(bo->base.resv); + return ret; +} + +static const struct vm_operations_struct amdgpu_gem_vm_ops = { + .fault = amdgpu_gem_fault, + .open = ttm_bo_vm_open, + .close = ttm_bo_vm_close, + .access = ttm_bo_vm_access +}; + static void amdgpu_gem_object_free(struct drm_gem_object *gobj) { struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj); @@ -205,6 +246,18 @@ out_unlock: ttm_eu_backoff_reservation(&ticket, &list); } +static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) +{ + struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); + + if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) + return -EPERM; + if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) + return -EPERM; + + return drm_gem_ttm_mmap(obj, vma); +} + static const struct drm_gem_object_funcs amdgpu_gem_object_funcs = { .free = amdgpu_gem_object_free, .open = amdgpu_gem_object_open, @@ -212,6 +265,8 @@ static const struct drm_gem_object_funcs amdgpu_gem_object_funcs = { .export = amdgpu_gem_prime_export, .vmap = drm_gem_ttm_vmap, .vunmap = drm_gem_ttm_vunmap, + .mmap = amdgpu_gem_object_mmap, + .vm_ops = &amdgpu_gem_vm_ops, }; /* -- cgit v1.2.3 From 075e8080c1a7571563171a07fa9ce47c4bc80044 Mon Sep 17 00:00:00 2001 From: Eric Huang Date: Tue, 1 Jun 2021 18:36:34 -0400 Subject: drm/amdgpu: Add table_freed parameter to amdgpu_vm_bo_update MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It is to pass the flag to KFD, and optimize table_freed in amdgpu_vm_bo_update_mapping. Signed-off-by: Eric Huang Reviewed-by: Felix Kuehling Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 6 +++--- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 10 +++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 2 +- 4 files changed, 10 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index f6a8f0c5a52f..89ebbf363e27 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -781,7 +781,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) if (r) return r; - r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false); + r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false, NULL); if (r) return r; @@ -792,7 +792,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) { bo_va = fpriv->csa_va; BUG_ON(!bo_va); - r = amdgpu_vm_bo_update(adev, bo_va, false); + r = amdgpu_vm_bo_update(adev, bo_va, false, NULL); if (r) return r; @@ -811,7 +811,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) if (bo_va == NULL) continue; - r = amdgpu_vm_bo_update(adev, bo_va, false); + r = amdgpu_vm_bo_update(adev, bo_va, false, NULL); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 73c76a3e2b12..88ea77b1e68a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -613,7 +613,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, if (operation == AMDGPU_VA_OP_MAP || operation == AMDGPU_VA_OP_REPLACE) { - r = amdgpu_vm_bo_update(adev, bo_va, false); + r = amdgpu_vm_bo_update(adev, bo_va, false, NULL); if (r) goto error; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 8d218c5cfee8..bdea27909885 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1768,7 +1768,7 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, r = vm->update_funcs->commit(¶ms, fence); if (table_freed) - *table_freed = params.table_freed; + *table_freed = *table_freed || params.table_freed; error_unlock: amdgpu_vm_eviction_unlock(vm); @@ -1833,7 +1833,7 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem, * 0 for success, -EINVAL for failure. */ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, - bool clear) + bool clear, bool *table_freed) { struct amdgpu_bo *bo = bo_va->base.bo; struct amdgpu_vm *vm = bo_va->base.vm; @@ -1912,7 +1912,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, resv, mapping->start, mapping->last, update_flags, mapping->offset, mem, - pages_addr, last_update, NULL); + pages_addr, last_update, table_freed); if (r) return r; } @@ -2165,7 +2165,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { /* Per VM BOs never need to bo cleared in the page tables */ - r = amdgpu_vm_bo_update(adev, bo_va, false); + r = amdgpu_vm_bo_update(adev, bo_va, false, NULL); if (r) return r; } @@ -2184,7 +2184,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, else clear = true; - r = amdgpu_vm_bo_update(adev, bo_va, clear); + r = amdgpu_vm_bo_update(adev, bo_va, clear, NULL); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index bee439dd673a..1f089da1e615 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -413,7 +413,7 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, struct dma_fence **fence, bool *free_table); int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, - bool clear); + bool clear, bool *table_freed); bool amdgpu_vm_evictable(struct amdgpu_bo *bo); void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, struct amdgpu_bo *bo, bool evicted); -- cgit v1.2.3 From 6edbd6abb783d54f6ac4c3ed5cd9e50cff6c15e9 Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 10 May 2021 16:14:09 +0200 Subject: dma-buf: rename and cleanup dma_resv_get_excl v3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When the comment needs to state explicitly that this doesn't get a reference to the object then the function is named rather badly. Rename the function and use rcu_dereference_check(), this way it can be used from both rcu as well as lock protected critical sections. v2: improve kerneldoc as suggested by Daniel v3: use dma_resv_excl_fence as function name Signed-off-by: Christian König Acked-by: Daniel Vetter Reviewed-by: Jason Ekstrand Link: https://patchwork.freedesktop.org/patch/msgid/20210602111714.212426-4-christian.koenig@amd.com --- drivers/dma-buf/dma-buf.c | 5 ++--- drivers/dma-buf/dma-resv.c | 10 +++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | 2 +- drivers/gpu/drm/etnaviv/etnaviv_gem.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_busy.c | 3 +-- drivers/gpu/drm/msm/msm_gem.c | 4 ++-- drivers/gpu/drm/nouveau/nouveau_bo.c | 2 +- drivers/gpu/drm/nouveau/nouveau_fence.c | 2 +- drivers/gpu/drm/radeon/radeon_display.c | 2 +- drivers/gpu/drm/radeon/radeon_sync.c | 2 +- drivers/gpu/drm/radeon/radeon_uvd.c | 2 +- drivers/gpu/drm/ttm/ttm_bo.c | 2 +- drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 2 +- include/linux/dma-resv.h | 14 ++++++-------- 15 files changed, 26 insertions(+), 30 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c') diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index ee04fb442015..d419cf90ee73 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -234,7 +234,7 @@ retry: shared_count = fobj->shared_count; else shared_count = 0; - fence_excl = rcu_dereference(resv->fence_excl); + fence_excl = dma_resv_excl_fence(resv); if (read_seqcount_retry(&resv->seq, seq)) { rcu_read_unlock(); goto retry; @@ -1382,8 +1382,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) buf_obj->name ?: ""); robj = buf_obj->resv; - fence = rcu_dereference_protected(robj->fence_excl, - dma_resv_held(robj)); + fence = dma_resv_excl_fence(robj); if (fence) seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n", fence->ops->get_driver_name(fence), diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 6132ba631991..ed7b4e8f002f 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -284,7 +284,7 @@ EXPORT_SYMBOL(dma_resv_add_shared_fence); */ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence) { - struct dma_fence *old_fence = dma_resv_get_excl(obj); + struct dma_fence *old_fence = dma_resv_excl_fence(obj); struct dma_resv_list *old; u32 i = 0; @@ -380,7 +380,7 @@ retry: rcu_read_unlock(); src_list = dma_resv_get_list(dst); - old = dma_resv_get_excl(dst); + old = dma_resv_excl_fence(dst); write_seqcount_begin(&dst->seq); /* write_seqcount_begin provides the necessary memory barrier */ @@ -428,7 +428,7 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj, rcu_read_lock(); seq = read_seqcount_begin(&obj->seq); - fence_excl = rcu_dereference(obj->fence_excl); + fence_excl = dma_resv_excl_fence(obj); if (fence_excl && !dma_fence_get_rcu(fence_excl)) goto unlock; @@ -523,7 +523,7 @@ retry: rcu_read_lock(); i = -1; - fence = rcu_dereference(obj->fence_excl); + fence = dma_resv_excl_fence(obj); if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { if (!dma_fence_get_rcu(fence)) goto unlock_retry; @@ -645,7 +645,7 @@ retry: } if (!shared_count) { - struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl); + struct dma_fence *fence_excl = dma_resv_excl_fence(obj); if (fence_excl) { ret = dma_resv_test_signaled_single(fence_excl); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 73c76a3e2b12..7d5aaf584634 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -226,7 +226,7 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj, if (!amdgpu_vm_ready(vm)) goto out_unlock; - fence = dma_resv_get_excl(bo->tbo.base.resv); + fence = dma_resv_excl_fence(bo->tbo.base.resv); if (fence) { amdgpu_bo_fence(bo, fence, true); fence = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index 4e558632a5d2..2bdc9df5c6b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -210,7 +210,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, return -EINVAL; /* always sync to the exclusive fence */ - f = dma_resv_get_excl(resv); + f = dma_resv_excl_fence(resv); r = amdgpu_sync_fence(sync, f); flist = dma_resv_get_list(resv); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index db69f19ab5bc..2237fe5204d0 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -471,7 +471,7 @@ static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m) } } - fence = rcu_dereference(robj->fence_excl); + fence = dma_resv_excl_fence(robj); if (fence) etnaviv_gem_describe_fence(fence, "Exclusive", m); rcu_read_unlock(); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c index 25235ef630c1..088d375b3395 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c @@ -113,8 +113,7 @@ retry: seq = raw_read_seqcount(&obj->base.resv->seq); /* Translate the exclusive fence to the READ *and* WRITE engine */ - args->busy = - busy_check_writer(rcu_dereference(obj->base.resv->fence_excl)); + args->busy = busy_check_writer(dma_resv_excl_fence(obj->base.resv)); /* Translate shared fences to READ set of engines */ list = rcu_dereference(obj->base.resv->fence); diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 56df86e5f740..a5a2a922e3e8 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -819,7 +819,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj, fobj = dma_resv_get_list(obj->resv); if (!fobj || (fobj->shared_count == 0)) { - fence = dma_resv_get_excl(obj->resv); + fence = dma_resv_excl_fence(obj->resv); /* don't need to wait on our own fences, since ring is fifo */ if (fence && (fence->context != fctx->context)) { ret = dma_fence_wait(fence, true); @@ -1035,7 +1035,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m, } } - fence = rcu_dereference(robj->fence_excl); + fence = dma_resv_excl_fence(robj); if (fence) describe_fence(fence, "Exclusive", m); rcu_read_unlock(); diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index c3d20bc80022..520b1ea9d16c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -951,7 +951,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct drm_device *dev = drm->dev; - struct dma_fence *fence = dma_resv_get_excl(bo->base.resv); + struct dma_fence *fence = dma_resv_excl_fence(bo->base.resv); nv10_bo_put_tile_region(dev, *old_tile, fence); *old_tile = new_tile; diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index e5dcbf67de7e..19c096de5bdc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -356,7 +356,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e } fobj = dma_resv_get_list(resv); - fence = dma_resv_get_excl(resv); + fence = dma_resv_excl_fence(resv); if (fence && (!exclusive || !fobj || !fobj->shared_count)) { struct nouveau_channel *prev = NULL; diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 652af7a134bd..406681317419 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -533,7 +533,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc, DRM_ERROR("failed to pin new rbo buffer before flip\n"); goto cleanup; } - work->fence = dma_fence_get(dma_resv_get_excl(new_rbo->tbo.base.resv)); + work->fence = dma_fence_get(dma_resv_excl_fence(new_rbo->tbo.base.resv)); radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL); radeon_bo_unreserve(new_rbo); diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c index 5d3302945076..c8a1711325de 100644 --- a/drivers/gpu/drm/radeon/radeon_sync.c +++ b/drivers/gpu/drm/radeon/radeon_sync.c @@ -98,7 +98,7 @@ int radeon_sync_resv(struct radeon_device *rdev, int r = 0; /* always sync to the exclusive fence */ - f = dma_resv_get_excl(resv); + f = dma_resv_excl_fence(resv); fence = f ? to_radeon_fence(f) : NULL; if (fence && fence->rdev == rdev) radeon_sync_fence(sync, fence); diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index dfa9fdbe98da..1f5b1a5c0a09 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -477,7 +477,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, return -EINVAL; } - f = dma_resv_get_excl(bo->tbo.base.resv); + f = dma_resv_excl_fence(bo->tbo.base.resv); if (f) { r = radeon_fence_wait((struct radeon_fence *)f, false); if (r) { diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 4ed56520b81d..1752f8e523e7 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -262,7 +262,7 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) rcu_read_lock(); fobj = rcu_dereference(resv->fence); - fence = rcu_dereference(resv->fence_excl); + fence = dma_resv_excl_fence(resv); if (fence && !fence->ops->signaled) dma_fence_enable_sw_signaling(fence); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 62ea920addc3..7b45393ad98e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -1166,7 +1166,7 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start, if (bo->moving) dma_fence_put(bo->moving); bo->moving = dma_fence_get - (dma_resv_get_excl(bo->base.resv)); + (dma_resv_excl_fence(bo->base.resv)); } return 0; diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index f32a3d176513..e3a7f740bb06 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -226,22 +226,20 @@ static inline void dma_resv_unlock(struct dma_resv *obj) } /** - * dma_resv_get_excl - get the reservation object's - * exclusive fence, with update-side lock held + * dma_resv_exclusive - return the object's exclusive fence * @obj: the reservation object * - * Returns the exclusive fence (if any). Does NOT take a - * reference. Writers must hold obj->lock, readers may only - * hold a RCU read side lock. + * Returns the exclusive fence (if any). Caller must either hold the objects + * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(), + * or one of the variants of each * * RETURNS * The exclusive fence or NULL */ static inline struct dma_fence * -dma_resv_get_excl(struct dma_resv *obj) +dma_resv_excl_fence(struct dma_resv *obj) { - return rcu_dereference_protected(obj->fence_excl, - dma_resv_held(obj)); + return rcu_dereference_check(obj->fence_excl, dma_resv_held(obj)); } /** -- cgit v1.2.3 From d3fae3b3daac09961ab871a25093b0ae404282d5 Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 2 Jun 2021 13:01:15 +0200 Subject: dma-buf: drop the _rcu postfix on function names v3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The functions can be called both in _rcu context as well as while holding the lock. v2: add some kerneldoc as suggested by Daniel v3: fix indentation Signed-off-by: Christian König Reviewed-by: Jason Ekstrand Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20210602111714.212426-7-christian.koenig@amd.com --- drivers/dma-buf/dma-buf.c | 3 +-- drivers/dma-buf/dma-resv.c | 32 ++++++++++++----------- drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | 5 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 3 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 5 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | 4 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 4 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 5 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 11 ++++---- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 5 ++-- drivers/gpu/drm/drm_gem.c | 5 ++-- drivers/gpu/drm/etnaviv/etnaviv_gem.c | 6 ++--- drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | 6 ++--- drivers/gpu/drm/i915/dma_resv_utils.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_busy.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 4 +-- drivers/gpu/drm/i915/gem/i915_gem_wait.c | 6 ++--- drivers/gpu/drm/i915/i915_request.c | 4 +-- drivers/gpu/drm/i915/i915_sw_fence.c | 2 +- drivers/gpu/drm/msm/msm_gem.c | 3 +-- drivers/gpu/drm/nouveau/nouveau_gem.c | 4 +-- drivers/gpu/drm/panfrost/panfrost_drv.c | 3 +-- drivers/gpu/drm/radeon/radeon_gem.c | 6 ++--- drivers/gpu/drm/radeon/radeon_mn.c | 4 +-- drivers/gpu/drm/ttm/ttm_bo.c | 18 ++++++------- drivers/gpu/drm/vgem/vgem_fence.c | 3 +-- drivers/gpu/drm/virtio/virtgpu_ioctl.c | 5 ++-- drivers/gpu/drm/vmwgfx/vmwgfx_bo.c | 6 ++--- include/linux/dma-resv.h | 17 ++++-------- 31 files changed, 84 insertions(+), 103 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c') diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index d419cf90ee73..511fe0d217a0 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -1147,8 +1147,7 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf, long ret; /* Wait on any implicit rendering fences */ - ret = dma_resv_wait_timeout_rcu(resv, write, true, - MAX_SCHEDULE_TIMEOUT); + ret = dma_resv_wait_timeout(resv, write, true, MAX_SCHEDULE_TIMEOUT); if (ret < 0) return ret; diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 62e7e055ac62..f26c71747d43 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -396,7 +396,7 @@ retry: EXPORT_SYMBOL(dma_resv_copy_fences); /** - * dma_resv_get_fences_rcu - Get an object's shared and exclusive + * dma_resv_get_fences - Get an object's shared and exclusive * fences without update side lock held * @obj: the reservation object * @pfence_excl: the returned exclusive fence (or NULL) @@ -408,10 +408,9 @@ EXPORT_SYMBOL(dma_resv_copy_fences); * exclusive fence is not specified the fence is put into the array of the * shared fences as well. Returns either zero or -ENOMEM. */ -int dma_resv_get_fences_rcu(struct dma_resv *obj, - struct dma_fence **pfence_excl, - unsigned int *pshared_count, - struct dma_fence ***pshared) +int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl, + unsigned int *pshared_count, + struct dma_fence ***pshared) { struct dma_fence **shared = NULL; struct dma_fence *fence_excl; @@ -494,23 +493,24 @@ unlock: *pshared = shared; return ret; } -EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu); +EXPORT_SYMBOL_GPL(dma_resv_get_fences); /** - * dma_resv_wait_timeout_rcu - Wait on reservation's objects + * dma_resv_wait_timeout - Wait on reservation's objects * shared and/or exclusive fences. * @obj: the reservation object * @wait_all: if true, wait on all fences, else wait on just exclusive fence * @intr: if true, do interruptible wait * @timeout: timeout value in jiffies or zero to return immediately * + * Callers are not required to hold specific locks, but maybe hold + * dma_resv_lock() already * RETURNS * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or * greater than zer on success. */ -long dma_resv_wait_timeout_rcu(struct dma_resv *obj, - bool wait_all, bool intr, - unsigned long timeout) +long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr, + unsigned long timeout) { long ret = timeout ? timeout : 1; unsigned int seq, shared_count; @@ -582,7 +582,7 @@ unlock_retry: rcu_read_unlock(); goto retry; } -EXPORT_SYMBOL_GPL(dma_resv_wait_timeout_rcu); +EXPORT_SYMBOL_GPL(dma_resv_wait_timeout); static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence) @@ -602,16 +602,18 @@ static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence) } /** - * dma_resv_test_signaled_rcu - Test if a reservation object's - * fences have been signaled. + * dma_resv_test_signaled - Test if a reservation object's fences have been + * signaled. * @obj: the reservation object * @test_all: if true, test all fences, otherwise only test the exclusive * fence * + * Callers are not required to hold specific locks, but maybe hold + * dma_resv_lock() already * RETURNS * true if all fences signaled, else false */ -bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all) +bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all) { unsigned int seq, shared_count; int ret; @@ -660,7 +662,7 @@ retry: rcu_read_unlock(); return ret; } -EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu); +EXPORT_SYMBOL_GPL(dma_resv_test_signaled); #if IS_ENABLED(CONFIG_LOCKDEP) static int __init dma_resv_lockdep(void) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 49f73b5b89b0..ac7b37dfff5e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -203,9 +203,8 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc, goto unpin; } - r = dma_resv_get_fences_rcu(new_abo->tbo.base.resv, &work->excl, - &work->shared_count, - &work->shared); + r = dma_resv_get_fences(new_abo->tbo.base.resv, &work->excl, + &work->shared_count, &work->shared); if (unlikely(r != 0)) { DRM_ERROR("failed to get fences for buffer\n"); goto unpin; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index 04caa31056d0..c3053b83b80c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -52,7 +52,7 @@ __dma_resv_make_exclusive(struct dma_resv *obj) if (!dma_resv_shared_list(obj)) /* no shared fences to convert */ return 0; - r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences); + r = dma_resv_get_fences(obj, NULL, &count, &fences); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 7d5aaf584634..1c3e3b608332 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -526,8 +526,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, return -ENOENT; } robj = gem_to_amdgpu_bo(gobj); - ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, - timeout); + ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, timeout); /* ret == 0 means not signaled, * ret > 0 means signaled diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index b4971e90b98c..df69b1e9e451 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -112,7 +112,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv, unsigned count; int r; - r = dma_resv_get_fences_rcu(resv, NULL, &count, &fences); + r = dma_resv_get_fences(resv, NULL, &count, &fences); if (r) goto fallback; @@ -156,8 +156,7 @@ fallback: /* Not enough memory for the delayed delete, as last resort * block for all the fences to complete. */ - dma_resv_wait_timeout_rcu(resv, true, false, - MAX_SCHEDULE_TIMEOUT); + dma_resv_wait_timeout(resv, true, false, MAX_SCHEDULE_TIMEOUT); amdgpu_pasid_free(pasid); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index 2741c28ff1b5..d6c54c7f7679 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -75,8 +75,8 @@ static bool amdgpu_mn_invalidate_gfx(struct mmu_interval_notifier *mni, mmu_interval_set_seq(mni, cur_seq); - r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false, - MAX_SCHEDULE_TIMEOUT); + r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false, + MAX_SCHEDULE_TIMEOUT); mutex_unlock(&adev->notifier_lock); if (r <= 0) DRM_ERROR("(%ld) failed to wait for user bo\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 19c1384a133f..96447e1d4c9c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -756,8 +756,8 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) return 0; } - r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false, - MAX_SCHEDULE_TIMEOUT); + r = dma_resv_wait_timeout(bo->tbo.base.resv, false, false, + MAX_SCHEDULE_TIMEOUT); if (r < 0) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 82f0542c7792..a692a4570627 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -1126,9 +1126,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, ib->length_dw = 16; if (direct) { - r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, - true, false, - msecs_to_jiffies(10)); + r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false, + msecs_to_jiffies(10)); if (r == 0) r = -ETIMEDOUT; if (r < 0) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index bcfd4a8d0288..d1a229212e7a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2022,13 +2022,12 @@ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) unsigned i, shared_count; int r; - r = dma_resv_get_fences_rcu(resv, &excl, - &shared_count, &shared); + r = dma_resv_get_fences(resv, &excl, &shared_count, &shared); if (r) { /* Not enough memory to grab the fence list, as last resort * block for all the fences to complete. */ - dma_resv_wait_timeout_rcu(resv, true, false, + dma_resv_wait_timeout(resv, true, false, MAX_SCHEDULE_TIMEOUT); return; } @@ -2640,7 +2639,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo) return true; /* Don't evict VM page tables while they are busy */ - if (!dma_resv_test_signaled_rcu(bo->tbo.base.resv, true)) + if (!dma_resv_test_signaled(bo->tbo.base.resv, true)) return false; /* Try to block ongoing updates */ @@ -2820,8 +2819,8 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, */ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) { - timeout = dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv, - true, true, timeout); + timeout = dma_resv_wait_timeout(vm->root.base.bo->tbo.base.resv, true, + true, timeout); if (timeout <= 0) return timeout; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 3267eb2e35dd..6dde2873d47b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -8400,9 +8400,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, * deadlock during GPU reset when this fence will not signal * but we hold reservation lock for the BO. */ - r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true, - false, - msecs_to_jiffies(5000)); + r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false, + msecs_to_jiffies(5000)); if (unlikely(r <= 0)) DRM_ERROR("Waiting for fences timed out!"); diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 263b4fb03303..d62fb1a3c916 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -770,8 +770,7 @@ long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, return -EINVAL; } - ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all, - true, timeout); + ret = dma_resv_wait_timeout(obj->resv, wait_all, true, timeout); if (ret == 0) ret = -ETIME; else if (ret > 0) @@ -1380,7 +1379,7 @@ int drm_gem_fence_array_add_implicit(struct xarray *fence_array, return drm_gem_fence_array_add(fence_array, fence); } - ret = dma_resv_get_fences_rcu(obj->resv, NULL, + ret = dma_resv_get_fences(obj->resv, NULL, &fence_count, &fences); if (ret || !fence_count) return ret; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 8792d8dd5106..b8fa6ed3dd73 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -390,14 +390,12 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op, } if (op & ETNA_PREP_NOSYNC) { - if (!dma_resv_test_signaled_rcu(obj->resv, - write)) + if (!dma_resv_test_signaled(obj->resv, write)) return -EBUSY; } else { unsigned long remain = etnaviv_timeout_to_jiffies(timeout); - ret = dma_resv_wait_timeout_rcu(obj->resv, - write, true, remain); + ret = dma_resv_wait_timeout(obj->resv, write, true, remain); if (ret <= 0) return ret == 0 ? -ETIMEDOUT : ret; } diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index c942d2a8c252..d53856d7a747 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -189,9 +189,9 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit) continue; if (bo->flags & ETNA_SUBMIT_BO_WRITE) { - ret = dma_resv_get_fences_rcu(robj, &bo->excl, - &bo->nr_shared, - &bo->shared); + ret = dma_resv_get_fences(robj, &bo->excl, + &bo->nr_shared, + &bo->shared); if (ret) return ret; } else { diff --git a/drivers/gpu/drm/i915/dma_resv_utils.c b/drivers/gpu/drm/i915/dma_resv_utils.c index 9e508e7d4629..7df91b7e4ca8 100644 --- a/drivers/gpu/drm/i915/dma_resv_utils.c +++ b/drivers/gpu/drm/i915/dma_resv_utils.c @@ -10,7 +10,7 @@ void dma_resv_prune(struct dma_resv *resv) { if (dma_resv_trylock(resv)) { - if (dma_resv_test_signaled_rcu(resv, true)) + if (dma_resv_test_signaled(resv, true)) dma_resv_add_excl_fence(resv, NULL); dma_resv_unlock(resv); } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c index 35279dd561f5..6234e17259c1 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c @@ -105,7 +105,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, * Alternatively, we can trade that extra information on read/write * activity with * args->busy = - * !dma_resv_test_signaled_rcu(obj->resv, true); + * !dma_resv_test_signaled(obj->resv, true); * to report the overall busyness. This is what the wait-ioctl does. * */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 297143511f99..66789111a24b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -1481,7 +1481,7 @@ static inline bool use_reloc_gpu(struct i915_vma *vma) if (DBG_FORCE_RELOC) return false; - return !dma_resv_test_signaled_rcu(vma->resv, true); + return !dma_resv_test_signaled(vma->resv, true); } static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index a657b99ec760..b5cbbe659a77 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -85,8 +85,8 @@ static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni, return true; /* we will unbind on next submission, still have userptr pins */ - r = dma_resv_wait_timeout_rcu(obj->base.resv, true, false, - MAX_SCHEDULE_TIMEOUT); + r = dma_resv_wait_timeout(obj->base.resv, true, false, + MAX_SCHEDULE_TIMEOUT); if (r <= 0) drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c index c13aeddf5aa7..1e97520c62b2 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c @@ -45,7 +45,7 @@ i915_gem_object_wait_reservation(struct dma_resv *resv, unsigned int count, i; int ret; - ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared); + ret = dma_resv_get_fences(resv, &excl, &count, &shared); if (ret) return ret; @@ -158,8 +158,8 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, unsigned int count, i; int ret; - ret = dma_resv_get_fences_rcu(obj->base.resv, - &excl, &count, &shared); + ret = dma_resv_get_fences(obj->base.resv, &excl, &count, + &shared); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index c85494f411f4..6cb91f042642 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -1594,8 +1594,8 @@ i915_request_await_object(struct i915_request *to, struct dma_fence **shared; unsigned int count, i; - ret = dma_resv_get_fences_rcu(obj->base.resv, - &excl, &count, &shared); + ret = dma_resv_get_fences(obj->base.resv, &excl, &count, + &shared); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index 7aaf74552d06..c589a681da77 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c @@ -582,7 +582,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, struct dma_fence **shared; unsigned int count, i; - ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared); + ret = dma_resv_get_fences(resv, &excl, &count, &shared); if (ret) return ret; diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 410a93a7e77f..a94a43de95ef 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -915,8 +915,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout); long ret; - ret = dma_resv_wait_timeout_rcu(obj->resv, write, - true, remain); + ret = dma_resv_wait_timeout(obj->resv, write, true, remain); if (ret == 0) return remain == 0 ? -EBUSY : -ETIMEDOUT; else if (ret < 0) diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index d863e5ed954a..5b27845075a1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -964,8 +964,8 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, return -ENOENT; nvbo = nouveau_gem_object(gem); - lret = dma_resv_wait_timeout_rcu(nvbo->bo.base.resv, write, true, - no_wait ? 0 : 30 * HZ); + lret = dma_resv_wait_timeout(nvbo->bo.base.resv, write, true, + no_wait ? 0 : 30 * HZ); if (!lret) ret = -EBUSY; else if (lret > 0) diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index 1596559f3d14..075ec0ef746c 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -312,8 +312,7 @@ panfrost_ioctl_wait_bo(struct drm_device *dev, void *data, if (!gem_obj) return -ENOENT; - ret = dma_resv_wait_timeout_rcu(gem_obj->resv, true, - true, timeout); + ret = dma_resv_wait_timeout(gem_obj->resv, true, true, timeout); if (!ret) ret = timeout ? -ETIMEDOUT : -EBUSY; diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 3272c33af8fe..458f92a70887 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -161,7 +161,7 @@ static int radeon_gem_set_domain(struct drm_gem_object *gobj, } if (domain == RADEON_GEM_DOMAIN_CPU) { /* Asking for cpu access wait for object idle */ - r = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ); + r = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ); if (!r) r = -EBUSY; @@ -523,7 +523,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, } robj = gem_to_radeon_bo(gobj); - r = dma_resv_test_signaled_rcu(robj->tbo.base.resv, true); + r = dma_resv_test_signaled(robj->tbo.base.resv, true); if (r == 0) r = -EBUSY; else @@ -552,7 +552,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, } robj = gem_to_radeon_bo(gobj); - ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ); + ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ); if (ret == 0) r = -EBUSY; else if (ret < 0) diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c index e37c9a57a7c3..9fa88549c89e 100644 --- a/drivers/gpu/drm/radeon/radeon_mn.c +++ b/drivers/gpu/drm/radeon/radeon_mn.c @@ -66,8 +66,8 @@ static bool radeon_mn_invalidate(struct mmu_interval_notifier *mn, return true; } - r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false, - MAX_SCHEDULE_TIMEOUT); + r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false, + MAX_SCHEDULE_TIMEOUT); if (r <= 0) DRM_ERROR("(%ld) failed to wait for user bo\n", r); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index f04a269b7065..7e7284da5630 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -296,7 +296,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, struct dma_resv *resv = &bo->base._resv; int ret; - if (dma_resv_test_signaled_rcu(resv, true)) + if (dma_resv_test_signaled(resv, true)) ret = 0; else ret = -EBUSY; @@ -308,8 +308,8 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, dma_resv_unlock(bo->base.resv); spin_unlock(&bo->bdev->lru_lock); - lret = dma_resv_wait_timeout_rcu(resv, true, interruptible, - 30 * HZ); + lret = dma_resv_wait_timeout(resv, true, interruptible, + 30 * HZ); if (lret < 0) return lret; @@ -411,8 +411,8 @@ static void ttm_bo_release(struct kref *kref) /* Last resort, if we fail to allocate memory for the * fences block for the BO to become idle */ - dma_resv_wait_timeout_rcu(bo->base.resv, true, false, - 30 * HZ); + dma_resv_wait_timeout(bo->base.resv, true, false, + 30 * HZ); } if (bo->bdev->funcs->release_notify) @@ -422,7 +422,7 @@ static void ttm_bo_release(struct kref *kref) ttm_mem_io_free(bdev, bo->resource); } - if (!dma_resv_test_signaled_rcu(bo->base.resv, true) || + if (!dma_resv_test_signaled(bo->base.resv, true) || !dma_resv_trylock(bo->base.resv)) { /* The BO is not idle, resurrect it for delayed destroy */ ttm_bo_flush_all_fences(bo); @@ -1094,14 +1094,14 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, long timeout = 15 * HZ; if (no_wait) { - if (dma_resv_test_signaled_rcu(bo->base.resv, true)) + if (dma_resv_test_signaled(bo->base.resv, true)) return 0; else return -EBUSY; } - timeout = dma_resv_wait_timeout_rcu(bo->base.resv, true, - interruptible, timeout); + timeout = dma_resv_wait_timeout(bo->base.resv, true, interruptible, + timeout); if (timeout < 0) return timeout; diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c index 2902dc6e64fa..bd6f75285fd9 100644 --- a/drivers/gpu/drm/vgem/vgem_fence.c +++ b/drivers/gpu/drm/vgem/vgem_fence.c @@ -151,8 +151,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev, /* Check for a conflicting fence */ resv = obj->resv; - if (!dma_resv_test_signaled_rcu(resv, - arg->flags & VGEM_FENCE_WRITE)) { + if (!dma_resv_test_signaled(resv, arg->flags & VGEM_FENCE_WRITE)) { ret = -EBUSY; goto err_fence; } diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index 669f2ee39515..5c1ad1596889 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -451,10 +451,9 @@ static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data, return -ENOENT; if (args->flags & VIRTGPU_WAIT_NOWAIT) { - ret = dma_resv_test_signaled_rcu(obj->resv, true); + ret = dma_resv_test_signaled(obj->resv, true); } else { - ret = dma_resv_wait_timeout_rcu(obj->resv, true, true, - timeout); + ret = dma_resv_wait_timeout(obj->resv, true, true, timeout); } if (ret == 0) ret = -EBUSY; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c index 176b6201ef2b..362f56d5b12b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c @@ -743,9 +743,9 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo, if (flags & drm_vmw_synccpu_allow_cs) { long lret; - lret = dma_resv_wait_timeout_rcu - (bo->base.resv, true, true, - nonblock ? 0 : MAX_SCHEDULE_TIMEOUT); + lret = dma_resv_wait_timeout(bo->base.resv, true, true, + nonblock ? 0 : + MAX_SCHEDULE_TIMEOUT); if (!lret) return -EBUSY; else if (lret < 0) diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index 3e0eefcead44..562b885cf9c3 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -271,19 +271,12 @@ void dma_resv_init(struct dma_resv *obj); void dma_resv_fini(struct dma_resv *obj); int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences); void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence); - void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence); - -int dma_resv_get_fences_rcu(struct dma_resv *obj, - struct dma_fence **pfence_excl, - unsigned *pshared_count, - struct dma_fence ***pshared); - +int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl, + unsigned *pshared_count, struct dma_fence ***pshared); int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src); - -long dma_resv_wait_timeout_rcu(struct dma_resv *obj, bool wait_all, bool intr, - unsigned long timeout); - -bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all); +long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr, + unsigned long timeout); +bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all); #endif /* _LINUX_RESERVATION_H */ -- cgit v1.2.3 From 391629bdfcb9014e8bcd1be216b59854877e70ed Mon Sep 17 00:00:00 2001 From: Nirmoy Das Date: Tue, 15 Jun 2021 17:23:11 -0400 Subject: drm/amdgpu: remove amdgpu_vm_pt MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Page table entries are now in embedded in VM BO, so we do not need struct amdgpu_vm_pt. This patch replaces struct amdgpu_vm_pt with struct amdgpu_vm_bo_base. Signed-off-by: Nirmoy Das Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 26 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 12 +- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 164 +++++++++++------------ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 9 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 2 +- 12 files changed, 105 insertions(+), 126 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 3844d1b5b5f1..3b8e1ee8c475 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -356,7 +356,7 @@ static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo) */ static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm) { - struct amdgpu_bo *pd = vm->root.base.bo; + struct amdgpu_bo *pd = vm->root.bo; struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); int ret; @@ -372,7 +372,7 @@ static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm) return ret; } - vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.base.bo); + vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.bo); if (vm->use_cpu_for_update) { ret = amdgpu_bo_kmap(pd, NULL); @@ -387,7 +387,7 @@ static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm) static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync) { - struct amdgpu_bo *pd = vm->root.base.bo; + struct amdgpu_bo *pd = vm->root.bo; struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); int ret; @@ -1153,7 +1153,7 @@ static int process_sync_pds_resv(struct amdkfd_process_info *process_info, list_for_each_entry(peer_vm, &process_info->vm_list_head, vm_list_node) { - struct amdgpu_bo *pd = peer_vm->root.base.bo; + struct amdgpu_bo *pd = peer_vm->root.bo; ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv, AMDGPU_SYNC_NE_OWNER, @@ -1220,7 +1220,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, vm->process_info = *process_info; /* Validate page directory and attach eviction fence */ - ret = amdgpu_bo_reserve(vm->root.base.bo, true); + ret = amdgpu_bo_reserve(vm->root.bo, true); if (ret) goto reserve_pd_fail; ret = vm_validate_pt_pd_bos(vm); @@ -1228,16 +1228,16 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, pr_err("validate_pt_pd_bos() failed\n"); goto validate_pd_fail; } - ret = amdgpu_bo_sync_wait(vm->root.base.bo, + ret = amdgpu_bo_sync_wait(vm->root.bo, AMDGPU_FENCE_OWNER_KFD, false); if (ret) goto wait_pd_fail; - ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1); + ret = dma_resv_reserve_shared(vm->root.bo->tbo.base.resv, 1); if (ret) goto reserve_shared_fail; - amdgpu_bo_fence(vm->root.base.bo, + amdgpu_bo_fence(vm->root.bo, &vm->process_info->eviction_fence->base, true); - amdgpu_bo_unreserve(vm->root.base.bo); + amdgpu_bo_unreserve(vm->root.bo); /* Update process info */ mutex_lock(&vm->process_info->lock); @@ -1251,7 +1251,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, reserve_shared_fail: wait_pd_fail: validate_pd_fail: - amdgpu_bo_unreserve(vm->root.base.bo); + amdgpu_bo_unreserve(vm->root.bo); reserve_pd_fail: vm->process_info = NULL; if (info) { @@ -1306,7 +1306,7 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, struct amdgpu_vm *vm) { struct amdkfd_process_info *process_info = vm->process_info; - struct amdgpu_bo *pd = vm->root.base.bo; + struct amdgpu_bo *pd = vm->root.bo; if (!process_info) return; @@ -1362,7 +1362,7 @@ void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *drm_priv) uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv) { struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); - struct amdgpu_bo *pd = avm->root.base.bo; + struct amdgpu_bo *pd = avm->root.bo; struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); if (adev->asic_type < CHIP_VEGA10) @@ -2389,7 +2389,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) /* Attach eviction fence to PD / PT BOs */ list_for_each_entry(peer_vm, &process_info->vm_list_head, vm_list_node) { - struct amdgpu_bo *bo = peer_vm->root.base.bo; + struct amdgpu_bo *bo = peer_vm->root.bo; amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 1476236f5c7c..76fe5b71e35d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -832,7 +832,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) if (r) return r; - p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.base.bo); + p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.bo); if (amdgpu_vm_debug) { /* Invalidate all BOs to test for userspace bugs */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index a9bbb0034e1e..536005bff24a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -1304,11 +1304,11 @@ static int amdgpu_debugfs_vm_info_show(struct seq_file *m, void *unused) seq_printf(m, "pid:%d\tProcess:%s ----------\n", vm->task_info.pid, vm->task_info.process_name); - r = amdgpu_bo_reserve(vm->root.base.bo, true); + r = amdgpu_bo_reserve(vm->root.bo, true); if (r) break; amdgpu_debugfs_vm_bo_info(vm, m); - amdgpu_bo_unreserve(vm->root.base.bo); + amdgpu_bo_unreserve(vm->root.bo); } mutex_unlock(&dev->filelist_mutex); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index c3053b83b80c..a3daaa89330c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -448,7 +448,7 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach) for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) { struct amdgpu_vm *vm = bo_base->vm; - struct dma_resv *resv = vm->root.base.bo->tbo.base.resv; + struct dma_resv *resv = vm->root.bo->tbo.base.resv; if (ticket) { /* When we get an error here it means that somebody diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c index dbebbe16e3b3..d94c5419ec25 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c @@ -69,13 +69,13 @@ void amdgpu_show_fdinfo(struct seq_file *m, struct file *f) dev = PCI_SLOT(adev->pdev->devfn); fn = PCI_FUNC(adev->pdev->devfn); - ret = amdgpu_bo_reserve(fpriv->vm.root.base.bo, false); + ret = amdgpu_bo_reserve(fpriv->vm.root.bo, false); if (ret) { DRM_ERROR("Fail to reserve bo\n"); return; } amdgpu_vm_get_memory(&fpriv->vm, &vram_mem, >t_mem, &cpu_mem); - amdgpu_bo_unreserve(fpriv->vm.root.base.bo); + amdgpu_bo_unreserve(fpriv->vm.root.bo); seq_printf(m, "pdev:\t%04x:%02x:%02x.%d\npasid:\t%u\n", domain, bus, dev, fn, fpriv->vm.pasid); seq_printf(m, "vram mem:\t%llu kB\n", vram_mem/1024UL); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 9cf4beaf646c..b3404c43a911 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -170,7 +170,7 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj, return -EPERM; if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID && - abo->tbo.base.resv != vm->root.base.bo->tbo.base.resv) + abo->tbo.base.resv != vm->root.bo->tbo.base.resv) return -EPERM; r = amdgpu_bo_reserve(abo, false); @@ -320,11 +320,11 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, } if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) { - r = amdgpu_bo_reserve(vm->root.base.bo, false); + r = amdgpu_bo_reserve(vm->root.bo, false); if (r) return r; - resv = vm->root.base.bo->tbo.base.resv; + resv = vm->root.bo->tbo.base.resv; } initial_domain = (u32)(0xffffffff & args->in.domains); @@ -353,9 +353,9 @@ retry: if (!r) { struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj); - abo->parent = amdgpu_bo_ref(vm->root.base.bo); + abo->parent = amdgpu_bo_ref(vm->root.bo); } - amdgpu_bo_unreserve(vm->root.base.bo); + amdgpu_bo_unreserve(vm->root.bo); } if (r) return r; @@ -841,7 +841,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, } for (base = robj->vm_bo; base; base = base->next) if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev), - amdgpu_ttm_adev(base->vm->root.base.bo->tbo.bdev))) { + amdgpu_ttm_adev(base->vm->root.bo->tbo.bdev))) { r = -EINVAL; amdgpu_bo_unreserve(robj); goto out; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 617fcbafc75d..96ef3f1051d8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -1262,7 +1262,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, } pasid = fpriv->vm.pasid; - pd = amdgpu_bo_ref(fpriv->vm.root.base.bo); + pd = amdgpu_bo_ref(fpriv->vm.root.bo); amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr); amdgpu_vm_fini(adev, &fpriv->vm); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 126df03a7066..2cbc1d023f11 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -126,7 +126,7 @@ struct amdgpu_bo_user { struct amdgpu_bo_vm { struct amdgpu_bo bo; struct amdgpu_bo *shadow; - struct amdgpu_vm_pt entries[]; + struct amdgpu_vm_bo_base entries[]; }; static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 18246b5b6ee3..750cdf52d525 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -332,7 +332,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, base->next = bo->vm_bo; bo->vm_bo = base; - if (bo->tbo.base.resv != vm->root.base.bo->tbo.base.resv) + if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv) return; vm->bulk_moveable = false; @@ -361,14 +361,14 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, * Helper to get the parent entry for the child page table. NULL if we are at * the root page directory. */ -static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt) +static struct amdgpu_vm_bo_base *amdgpu_vm_pt_parent(struct amdgpu_vm_bo_base *pt) { - struct amdgpu_bo *parent = pt->base.bo->parent; + struct amdgpu_bo *parent = pt->bo->parent; if (!parent) return NULL; - return container_of(parent->vm_bo, struct amdgpu_vm_pt, base); + return parent->vm_bo; } /* @@ -376,8 +376,8 @@ static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt) */ struct amdgpu_vm_pt_cursor { uint64_t pfn; - struct amdgpu_vm_pt *parent; - struct amdgpu_vm_pt *entry; + struct amdgpu_vm_bo_base *parent; + struct amdgpu_vm_bo_base *entry; unsigned level; }; @@ -416,17 +416,17 @@ static bool amdgpu_vm_pt_descendant(struct amdgpu_device *adev, { unsigned mask, shift, idx; - if (!cursor->entry->entries) + if ((cursor->level == AMDGPU_VM_PTB) || !cursor->entry || + !cursor->entry->bo) return false; - BUG_ON(!cursor->entry->base.bo); mask = amdgpu_vm_entries_mask(adev, cursor->level); shift = amdgpu_vm_level_shift(adev, cursor->level); ++cursor->level; idx = (cursor->pfn >> shift) & mask; cursor->parent = cursor->entry; - cursor->entry = &cursor->entry->entries[idx]; + cursor->entry = &to_amdgpu_bo_vm(cursor->entry->bo)->entries[idx]; return true; } @@ -453,7 +453,7 @@ static bool amdgpu_vm_pt_sibling(struct amdgpu_device *adev, shift = amdgpu_vm_level_shift(adev, cursor->level - 1); num_entries = amdgpu_vm_num_entries(adev, cursor->level - 1); - if (cursor->entry == &cursor->parent->entries[num_entries - 1]) + if (cursor->entry == &to_amdgpu_bo_vm(cursor->parent->bo)->entries[num_entries - 1]) return false; cursor->pfn += 1ULL << shift; @@ -539,7 +539,7 @@ static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev, * True when the search should continue, false otherwise. */ static bool amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor *start, - struct amdgpu_vm_pt *entry) + struct amdgpu_vm_bo_base *entry) { return entry && (!start || entry != start->entry); } @@ -590,7 +590,7 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, struct amdgpu_bo_list_entry *entry) { entry->priority = 0; - entry->tv.bo = &vm->root.base.bo->tbo; + entry->tv.bo = &vm->root.bo->tbo; /* Two for VM updates, one for TTM and one for the CS job */ entry->tv.num_shared = 4; entry->user_pages = NULL; @@ -622,7 +622,7 @@ void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo) for (bo_base = abo->vm_bo; bo_base; bo_base = bo_base->next) { struct amdgpu_vm *vm = bo_base->vm; - if (abo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) + if (abo->tbo.base.resv == vm->root.bo->tbo.base.resv) vm->bulk_moveable = false; } @@ -781,11 +781,11 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, entries -= ats_entries; } else { - struct amdgpu_vm_pt *pt; + struct amdgpu_vm_bo_base *pt; - pt = container_of(ancestor->vm_bo, struct amdgpu_vm_pt, base); + pt = ancestor->vm_bo; ats_entries = amdgpu_vm_num_ats_entries(adev); - if ((pt - vm->root.entries) >= ats_entries) { + if ((pt - to_amdgpu_bo_vm(vm->root.bo)->entries) >= ats_entries) { ats_entries = 0; } else { ats_entries = entries; @@ -902,8 +902,8 @@ static int amdgpu_vm_pt_create(struct amdgpu_device *adev, bp.type = ttm_bo_type_kernel; bp.no_wait_gpu = immediate; - if (vm->root.base.bo) - bp.resv = vm->root.base.bo->tbo.base.resv; + if (vm->root.bo) + bp.resv = vm->root.bo->tbo.base.resv; r = amdgpu_bo_create_vm(adev, &bp, vmbo); if (r) @@ -962,19 +962,13 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, struct amdgpu_vm_pt_cursor *cursor, bool immediate) { - struct amdgpu_vm_pt *entry = cursor->entry; + struct amdgpu_vm_bo_base *entry = cursor->entry; struct amdgpu_bo *pt_bo; struct amdgpu_bo_vm *pt; int r; - if (entry->base.bo) { - if (cursor->level < AMDGPU_VM_PTB) - entry->entries = - to_amdgpu_bo_vm(entry->base.bo)->entries; - else - entry->entries = NULL; + if (entry->bo) return 0; - } r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt); if (r) @@ -984,13 +978,8 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, * freeing them up in the wrong order. */ pt_bo = &pt->bo; - pt_bo->parent = amdgpu_bo_ref(cursor->parent->base.bo); - amdgpu_vm_bo_base_init(&entry->base, vm, pt_bo); - if (cursor->level < AMDGPU_VM_PTB) - entry->entries = pt->entries; - else - entry->entries = NULL; - + pt_bo->parent = amdgpu_bo_ref(cursor->parent->bo); + amdgpu_vm_bo_base_init(entry, vm, pt_bo); r = amdgpu_vm_clear_bo(adev, vm, pt, immediate); if (r) goto error_free_pt; @@ -1008,18 +997,17 @@ error_free_pt: * * @entry: PDE to free */ -static void amdgpu_vm_free_table(struct amdgpu_vm_pt *entry) +static void amdgpu_vm_free_table(struct amdgpu_vm_bo_base *entry) { struct amdgpu_bo *shadow; - if (entry->base.bo) { - shadow = amdgpu_bo_shadowed(entry->base.bo); - entry->base.bo->vm_bo = NULL; - list_del(&entry->base.vm_status); - amdgpu_bo_unref(&shadow); - amdgpu_bo_unref(&entry->base.bo); - } - entry->entries = NULL; + if (!entry->bo) + return; + shadow = amdgpu_bo_shadowed(entry->bo); + entry->bo->vm_bo = NULL; + list_del(&entry->vm_status); + amdgpu_bo_unref(&shadow); + amdgpu_bo_unref(&entry->bo); } /** @@ -1036,7 +1024,7 @@ static void amdgpu_vm_free_pts(struct amdgpu_device *adev, struct amdgpu_vm_pt_cursor *start) { struct amdgpu_vm_pt_cursor cursor; - struct amdgpu_vm_pt *entry; + struct amdgpu_vm_bo_base *entry; vm->bulk_moveable = false; @@ -1304,10 +1292,10 @@ uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr) */ static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params, struct amdgpu_vm *vm, - struct amdgpu_vm_pt *entry) + struct amdgpu_vm_bo_base *entry) { - struct amdgpu_vm_pt *parent = amdgpu_vm_pt_parent(entry); - struct amdgpu_bo *bo = parent->base.bo, *pbo; + struct amdgpu_vm_bo_base *parent = amdgpu_vm_pt_parent(entry); + struct amdgpu_bo *bo = parent->bo, *pbo; uint64_t pde, pt, flags; unsigned level; @@ -1315,8 +1303,8 @@ static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params, pbo = pbo->parent; level += params->adev->vm_manager.root_level; - amdgpu_gmc_get_pde_for_bo(entry->base.bo, level, &pt, &flags); - pde = (entry - parent->entries) * 8; + amdgpu_gmc_get_pde_for_bo(entry->bo, level, &pt, &flags); + pde = (entry - to_amdgpu_bo_vm(parent->bo)->entries) * 8; return vm->update_funcs->update(params, to_amdgpu_bo_vm(bo), pde, pt, 1, 0, flags); } @@ -1333,11 +1321,11 @@ static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev, struct amdgpu_vm *vm) { struct amdgpu_vm_pt_cursor cursor; - struct amdgpu_vm_pt *entry; + struct amdgpu_vm_bo_base *entry; for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry) - if (entry->base.bo && !entry->base.moved) - amdgpu_vm_bo_relocated(&entry->base); + if (entry->bo && !entry->moved) + amdgpu_vm_bo_relocated(entry); } /** @@ -1371,11 +1359,12 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev, return r; while (!list_empty(&vm->relocated)) { - struct amdgpu_vm_pt *entry; + struct amdgpu_vm_bo_base *entry; - entry = list_first_entry(&vm->relocated, struct amdgpu_vm_pt, - base.vm_status); - amdgpu_vm_bo_idle(&entry->base); + entry = list_first_entry(&vm->relocated, + struct amdgpu_vm_bo_base, + vm_status); + amdgpu_vm_bo_idle(entry); r = amdgpu_vm_update_pde(¶ms, vm, entry); if (r) @@ -1555,7 +1544,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params, continue; } - pt = cursor.entry->base.bo; + pt = cursor.entry->bo; if (!pt) { /* We need all PDs and PTs for mapping something, */ if (flags & AMDGPU_PTE_VALID) @@ -1567,7 +1556,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params, if (!amdgpu_vm_pt_ancestor(&cursor)) return -EINVAL; - pt = cursor.entry->base.bo; + pt = cursor.entry->bo; shift = parent_shift; frag_end = max(frag_end, ALIGN(frag_start + 1, 1ULL << shift)); @@ -1622,7 +1611,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params, */ while (cursor.pfn < frag_start) { /* Make sure previous mapping is freed */ - if (cursor.entry->base.bo) { + if (cursor.entry->bo) { params->table_freed = true; amdgpu_vm_free_pts(adev, params->vm, &cursor); } @@ -1704,7 +1693,7 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) { struct dma_fence *tmp = dma_fence_get_stub(); - amdgpu_bo_fence(vm->root.base.bo, vm->last_unlocked, true); + amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true); swap(vm->last_unlocked, tmp); dma_fence_put(tmp); } @@ -1850,7 +1839,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, if (clear || !bo) { mem = NULL; - resv = vm->root.base.bo->tbo.base.resv; + resv = vm->root.bo->tbo.base.resv; } else { struct drm_gem_object *obj = &bo->tbo.base; @@ -1881,7 +1870,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, } if (clear || (bo && bo->tbo.base.resv == - vm->root.base.bo->tbo.base.resv)) + vm->root.bo->tbo.base.resv)) last_update = &vm->last_update; else last_update = &bo_va->last_pt_update; @@ -1923,7 +1912,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, * the evicted list so that it gets validated again on the * next command submission. */ - if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) { + if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) { uint32_t mem_type = bo->tbo.resource->mem_type; if (!(bo->preferred_domains & @@ -2060,7 +2049,7 @@ static void amdgpu_vm_free_mapping(struct amdgpu_device *adev, */ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) { - struct dma_resv *resv = vm->root.base.bo->tbo.base.resv; + struct dma_resv *resv = vm->root.bo->tbo.base.resv; struct dma_fence *excl, **shared; unsigned i, shared_count; int r; @@ -2106,7 +2095,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct dma_fence **fence) { - struct dma_resv *resv = vm->root.base.bo->tbo.base.resv; + struct dma_resv *resv = vm->root.bo->tbo.base.resv; struct amdgpu_bo_va_mapping *mapping; uint64_t init_pte_value = 0; struct dma_fence *f = NULL; @@ -2265,7 +2254,7 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev, if (mapping->flags & AMDGPU_PTE_PRT) amdgpu_vm_prt_get(adev); - if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv && + if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv && !bo_va->base.moved) { list_move(&bo_va->base.vm_status, &vm->moved); } @@ -2627,7 +2616,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, struct amdgpu_vm_bo_base **base; if (bo) { - if (bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) + if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv) vm->bulk_moveable = false; for (base = &bo_va->base.bo->vm_bo; *base; @@ -2721,7 +2710,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) { struct amdgpu_vm *vm = bo_base->vm; - if (evicted && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) { + if (evicted && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) { amdgpu_vm_bo_evicted(bo_base); continue; } @@ -2732,7 +2721,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, if (bo->tbo.type == ttm_bo_type_kernel) amdgpu_vm_bo_relocated(bo_base); - else if (bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) + else if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv) amdgpu_vm_bo_moved(bo_base); else amdgpu_vm_bo_invalidated(bo_base); @@ -2862,7 +2851,7 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, */ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) { - timeout = dma_resv_wait_timeout(vm->root.base.bo->tbo.base.resv, true, + timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, true, true, timeout); if (timeout <= 0) return timeout; @@ -2948,13 +2937,13 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid) if (r) goto error_unreserve; - amdgpu_vm_bo_base_init(&vm->root.base, vm, root_bo); + amdgpu_vm_bo_base_init(&vm->root, vm, root_bo); r = amdgpu_vm_clear_bo(adev, vm, root, false); if (r) goto error_unreserve; - amdgpu_bo_unreserve(vm->root.base.bo); + amdgpu_bo_unreserve(vm->root.bo); if (pasid) { unsigned long flags; @@ -2974,12 +2963,12 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid) return 0; error_unreserve: - amdgpu_bo_unreserve(vm->root.base.bo); + amdgpu_bo_unreserve(vm->root.bo); error_free_root: amdgpu_bo_unref(&root->shadow); amdgpu_bo_unref(&root_bo); - vm->root.base.bo = NULL; + vm->root.bo = NULL; error_free_delayed: dma_fence_put(vm->last_unlocked); @@ -3005,17 +2994,14 @@ error_free_immediate: * 0 if this VM is clean */ static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev, - struct amdgpu_vm *vm) + struct amdgpu_vm *vm) { enum amdgpu_vm_level root = adev->vm_manager.root_level; unsigned int entries = amdgpu_vm_num_entries(adev, root); unsigned int i = 0; - if (!(vm->root.entries)) - return 0; - for (i = 0; i < entries; i++) { - if (vm->root.entries[i].base.bo) + if (to_amdgpu_bo_vm(vm->root.bo)->entries[i].bo) return -EINVAL; } @@ -3049,7 +3035,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, bool pte_support_ats = (adev->asic_type == CHIP_RAVEN); int r; - r = amdgpu_bo_reserve(vm->root.base.bo, true); + r = amdgpu_bo_reserve(vm->root.bo, true); if (r) return r; @@ -3077,7 +3063,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, if (pte_support_ats != vm->pte_support_ats) { vm->pte_support_ats = pte_support_ats; r = amdgpu_vm_clear_bo(adev, vm, - to_amdgpu_bo_vm(vm->root.base.bo), + to_amdgpu_bo_vm(vm->root.bo), false); if (r) goto free_idr; @@ -3094,7 +3080,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, if (vm->use_cpu_for_update) { /* Sync with last SDMA update/clear before switching to CPU */ - r = amdgpu_bo_sync_wait(vm->root.base.bo, + r = amdgpu_bo_sync_wait(vm->root.bo, AMDGPU_FENCE_OWNER_UNDEFINED, true); if (r) goto free_idr; @@ -3122,7 +3108,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, } /* Free the shadow bo for compute VM */ - amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.base.bo)->shadow); + amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)->shadow); if (pasid) vm->pasid = pasid; @@ -3138,7 +3124,7 @@ free_idr: spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); } unreserve_bo: - amdgpu_bo_unreserve(vm->root.base.bo); + amdgpu_bo_unreserve(vm->root.bo); return r; } @@ -3181,7 +3167,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm); - root = amdgpu_bo_ref(vm->root.base.bo); + root = amdgpu_bo_ref(vm->root.bo); amdgpu_bo_reserve(root, true); if (vm->pasid) { unsigned long flags; @@ -3208,7 +3194,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) amdgpu_vm_free_pts(adev, vm, NULL); amdgpu_bo_unreserve(root); amdgpu_bo_unref(&root); - WARN_ON(vm->root.base.bo); + WARN_ON(vm->root.bo); drm_sched_entity_destroy(&vm->immediate); drm_sched_entity_destroy(&vm->delayed); @@ -3325,7 +3311,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) /* Wait vm idle to make sure the vmid set in SPM_VMID is * not referenced anymore. */ - r = amdgpu_bo_reserve(fpriv->vm.root.base.bo, true); + r = amdgpu_bo_reserve(fpriv->vm.root.bo, true); if (r) return r; @@ -3333,7 +3319,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) if (r < 0) return r; - amdgpu_bo_unreserve(fpriv->vm.root.base.bo); + amdgpu_bo_unreserve(fpriv->vm.root.bo); amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0); break; default: @@ -3406,7 +3392,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, spin_lock(&adev->vm_manager.pasid_lock); vm = idr_find(&adev->vm_manager.pasid_idr, pasid); if (vm) { - root = amdgpu_bo_ref(vm->root.base.bo); + root = amdgpu_bo_ref(vm->root.bo); is_compute_context = vm->is_compute_context; } else { root = NULL; @@ -3431,7 +3417,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, /* Double check that the VM still exists */ spin_lock(&adev->vm_manager.pasid_lock); vm = idr_find(&adev->vm_manager.pasid_idr, pasid); - if (vm && vm->root.base.bo != root) + if (vm && vm->root.bo != root) vm = NULL; spin_unlock(&adev->vm_manager.pasid_lock); if (!vm) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 1f089da1e615..ddb85a85cbba 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -152,13 +152,6 @@ struct amdgpu_vm_bo_base { bool moved; }; -struct amdgpu_vm_pt { - struct amdgpu_vm_bo_base base; - - /* array of page tables, one for each directory entry */ - struct amdgpu_vm_pt *entries; -}; - /* provided by hw blocks that can write ptes, e.g., sdma */ struct amdgpu_vm_pte_funcs { /* number of dw to reserve per operation */ @@ -284,7 +277,7 @@ struct amdgpu_vm { struct list_head done; /* contains the page directory */ - struct amdgpu_vm_pt root; + struct amdgpu_vm_bo_base root; struct dma_fence *last_update; /* Scheduler entities for page table updates */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c index 422958152c2b..dbb551762805 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c @@ -112,7 +112,7 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p, swap(p->vm->last_unlocked, f); dma_fence_put(tmp); } else { - amdgpu_bo_fence(p->vm->root.base.bo, f, true); + amdgpu_bo_fence(p->vm->root.bo, f, true); } if (fence && !p->immediate) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 451e2ff5c062..dff1011dd7ee 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1273,7 +1273,7 @@ static int svm_range_reserve_bos(struct svm_validate_context *ctx) adev = (struct amdgpu_device *)pdd->dev->kgd; vm = drm_priv_to_vm(pdd->drm_priv); - ctx->tv[gpuidx].bo = &vm->root.base.bo->tbo; + ctx->tv[gpuidx].bo = &vm->root.bo->tbo; ctx->tv[gpuidx].num_shared = 4; list_add(&ctx->tv[gpuidx].head, &ctx->validate_list); } -- cgit v1.2.3