| From 3fcbbb65562398020956ca46311f38db530a2ea3 Mon Sep 17 00:00:00 2001 |
| From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com> |
| Date: Wed, 2 Jun 2021 13:01:15 +0200 |
| Subject: [PATCH] BACKPORT: dma-buf: drop the _rcu postfix on function names v3 |
| MIME-Version: 1.0 |
| Content-Type: text/plain; charset=UTF-8 |
| Content-Transfer-Encoding: 8bit |
| |
| The functions can be called both in _rcu context as well |
| as while holding the lock. |
| |
| v2: add some kerneldoc as suggested by Daniel |
| v3: fix indentation |
| |
| Signed-off-by: Christian Kรถnig <christian.koenig@amd.com> |
| Reviewed-by: Jason Ekstrand <jason@jlekstrand.net> |
| Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch> |
| Link: https://patchwork.freedesktop.org/patch/msgid/20210602111714.212426-7-christian.koenig@amd.com |
| (cherry picked from commit d3fae3b3daac09961ab871a25093b0ae404282d5) |
| Signed-off-by: Sean Paul <seanpaul@chromium.org> |
| |
| Downstream changes: |
| -Added kbase changes |
| |
| Change-Id: Ib2285b659cc3ea1e650688f217f0341bb33b5ec8 |
| --- |
| drivers/dma-buf/dma-buf.c | 3 +- |
| drivers/dma-buf/dma-resv.c | 32 ++++++++++--------- |
| drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | 5 ++- |
| drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c | 2 +- |
| drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 3 +- |
| drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 5 ++- |
| drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | 4 +-- |
| drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 4 +-- |
| drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 5 ++- |
| drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 11 +++---- |
| .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 5 ++- |
| drivers/gpu/drm/drm_gem.c | 5 ++- |
| drivers/gpu/drm/etnaviv/etnaviv_gem.c | 6 ++-- |
| drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | 6 ++-- |
| drivers/gpu/drm/i915/dma_resv_utils.c | 2 +- |
| drivers/gpu/drm/i915/gem/i915_gem_busy.c | 2 +- |
| .../gpu/drm/i915/gem/i915_gem_execbuffer.c | 2 +- |
| drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 4 +-- |
| drivers/gpu/drm/i915/gem/i915_gem_wait.c | 6 ++-- |
| drivers/gpu/drm/i915/i915_request.c | 4 +-- |
| drivers/gpu/drm/i915/i915_sw_fence.c | 2 +- |
| drivers/gpu/drm/msm/msm_gem.c | 3 +- |
| drivers/gpu/drm/nouveau/nouveau_gem.c | 4 +-- |
| drivers/gpu/drm/panfrost/panfrost_drv.c | 3 +- |
| drivers/gpu/drm/radeon/radeon_gem.c | 6 ++-- |
| drivers/gpu/drm/radeon/radeon_mn.c | 4 +-- |
| drivers/gpu/drm/ttm/ttm_bo.c | 18 +++++------ |
| drivers/gpu/drm/vgem/vgem_fence.c | 3 +- |
| drivers/gpu/drm/virtio/virtgpu_ioctl.c | 5 ++- |
| drivers/gpu/drm/vmwgfx/vmwgfx_bo.c | 6 ++-- |
| include/linux/dma-resv.h | 17 +++------- |
| 31 files changed, 84 insertions(+), 103 deletions(-) |
| |
| diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c |
| index eadd1eaa2fb5..ceaf4ebfc275 100644 |
| --- a/drivers/dma-buf/dma-buf.c |
| +++ b/drivers/dma-buf/dma-buf.c |
| @@ -1147,8 +1147,7 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf, |
| long ret; |
| |
| /* Wait on any implicit rendering fences */ |
| - ret = dma_resv_wait_timeout_rcu(resv, write, true, |
| - MAX_SCHEDULE_TIMEOUT); |
| + ret = dma_resv_wait_timeout(resv, write, true, MAX_SCHEDULE_TIMEOUT); |
| if (ret < 0) |
| return ret; |
| |
| diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c |
| index 87f5d82d992a..646bb2dd5f3b 100644 |
| --- a/drivers/dma-buf/dma-resv.c |
| +++ b/drivers/dma-buf/dma-resv.c |
| @@ -375,7 +375,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src) |
| EXPORT_SYMBOL(dma_resv_copy_fences); |
| |
| /** |
| - * dma_resv_get_fences_rcu - Get an object's shared and exclusive |
| + * dma_resv_get_fences - Get an object's shared and exclusive |
| * fences without update side lock held |
| * @obj: the reservation object |
| * @pfence_excl: the returned exclusive fence (or NULL) |
| @@ -387,10 +387,9 @@ EXPORT_SYMBOL(dma_resv_copy_fences); |
| * exclusive fence is not specified the fence is put into the array of the |
| * shared fences as well. Returns either zero or -ENOMEM. |
| */ |
| -int dma_resv_get_fences_rcu(struct dma_resv *obj, |
| - struct dma_fence **pfence_excl, |
| - unsigned int *pshared_count, |
| - struct dma_fence ***pshared) |
| +int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl, |
| + unsigned int *pshared_count, |
| + struct dma_fence ***pshared) |
| { |
| struct dma_fence **shared = NULL; |
| struct dma_fence *fence_excl; |
| @@ -473,23 +472,24 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj, |
| *pshared = shared; |
| return ret; |
| } |
| -EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu); |
| +EXPORT_SYMBOL_GPL(dma_resv_get_fences); |
| |
| /** |
| - * dma_resv_wait_timeout_rcu - Wait on reservation's objects |
| + * dma_resv_wait_timeout - Wait on reservation's objects |
| * shared and/or exclusive fences. |
| * @obj: the reservation object |
| * @wait_all: if true, wait on all fences, else wait on just exclusive fence |
| * @intr: if true, do interruptible wait |
| * @timeout: timeout value in jiffies or zero to return immediately |
| * |
| + * Callers are not required to hold specific locks, but maybe hold |
| + * dma_resv_lock() already |
| * RETURNS |
| * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or |
| * greater than zer on success. |
| */ |
| -long dma_resv_wait_timeout_rcu(struct dma_resv *obj, |
| - bool wait_all, bool intr, |
| - unsigned long timeout) |
| +long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr, |
| + unsigned long timeout) |
| { |
| long ret = timeout ? timeout : 1; |
| unsigned int seq, shared_count; |
| @@ -561,7 +561,7 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj, |
| rcu_read_unlock(); |
| goto retry; |
| } |
| -EXPORT_SYMBOL_GPL(dma_resv_wait_timeout_rcu); |
| +EXPORT_SYMBOL_GPL(dma_resv_wait_timeout); |
| |
| |
| static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence) |
| @@ -581,16 +581,18 @@ static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence) |
| } |
| |
| /** |
| - * dma_resv_test_signaled_rcu - Test if a reservation object's |
| - * fences have been signaled. |
| + * dma_resv_test_signaled - Test if a reservation object's fences have been |
| + * signaled. |
| * @obj: the reservation object |
| * @test_all: if true, test all fences, otherwise only test the exclusive |
| * fence |
| * |
| + * Callers are not required to hold specific locks, but maybe hold |
| + * dma_resv_lock() already |
| * RETURNS |
| * true if all fences signaled, else false |
| */ |
| -bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all) |
| +bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all) |
| { |
| unsigned int seq, shared_count; |
| int ret; |
| @@ -639,7 +641,7 @@ bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all) |
| rcu_read_unlock(); |
| return ret; |
| } |
| -EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu); |
| +EXPORT_SYMBOL_GPL(dma_resv_test_signaled); |
| |
| #if IS_ENABLED(CONFIG_LOCKDEP) |
| static int __init dma_resv_lockdep(void) |
| diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c |
| index 2a4cd7d377bf..2140c2a93ab4 100644 |
| --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c |
| +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c |
| @@ -203,9 +203,8 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc, |
| goto unpin; |
| } |
| |
| - r = dma_resv_get_fences_rcu(new_abo->tbo.base.resv, &work->excl, |
| - &work->shared_count, |
| - &work->shared); |
| + r = dma_resv_get_fences(new_abo->tbo.base.resv, &work->excl, |
| + &work->shared_count, &work->shared); |
| if (unlikely(r != 0)) { |
| DRM_ERROR("failed to get fences for buffer\n"); |
| goto unpin; |
| diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c |
| index 37ec59365080..f73c96bf6d5e 100644 |
| --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c |
| +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c |
| @@ -98,7 +98,7 @@ __dma_resv_make_exclusive(struct dma_resv *obj) |
| if (!dma_resv_get_list(obj)) /* no shared fences to convert */ |
| return 0; |
| |
| - r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences); |
| + r = dma_resv_get_fences(obj, NULL, &count, &fences); |
| if (r) |
| return r; |
| |
| diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c |
| index b7c424291702..cac6a54169c0 100644 |
| --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c |
| +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c |
| @@ -471,8 +471,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, |
| return -ENOENT; |
| } |
| robj = gem_to_amdgpu_bo(gobj); |
| - ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, |
| - timeout); |
| + ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, timeout); |
| |
| /* ret == 0 means not signaled, |
| * ret > 0 means signaled |
| diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c |
| index c7f3aae23c62..b7fb72bff2c1 100644 |
| --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c |
| +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c |
| @@ -112,7 +112,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv, |
| unsigned count; |
| int r; |
| |
| - r = dma_resv_get_fences_rcu(resv, NULL, &count, &fences); |
| + r = dma_resv_get_fences(resv, NULL, &count, &fences); |
| if (r) |
| goto fallback; |
| |
| @@ -156,8 +156,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv, |
| /* Not enough memory for the delayed delete, as last resort |
| * block for all the fences to complete. |
| */ |
| - dma_resv_wait_timeout_rcu(resv, true, false, |
| - MAX_SCHEDULE_TIMEOUT); |
| + dma_resv_wait_timeout(resv, true, false, MAX_SCHEDULE_TIMEOUT); |
| amdgpu_pasid_free(pasid); |
| } |
| |
| diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c |
| index 2741c28ff1b5..d6c54c7f7679 100644 |
| --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c |
| +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c |
| @@ -75,8 +75,8 @@ static bool amdgpu_mn_invalidate_gfx(struct mmu_interval_notifier *mni, |
| |
| mmu_interval_set_seq(mni, cur_seq); |
| |
| - r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false, |
| - MAX_SCHEDULE_TIMEOUT); |
| + r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false, |
| + MAX_SCHEDULE_TIMEOUT); |
| mutex_unlock(&adev->notifier_lock); |
| if (r <= 0) |
| DRM_ERROR("(%ld) failed to wait for user bo\n", r); |
| diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c |
| index 9092ac12a270..a7fec74c4e81 100644 |
| --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c |
| +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c |
| @@ -801,8 +801,8 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) |
| return 0; |
| } |
| |
| - r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false, |
| - MAX_SCHEDULE_TIMEOUT); |
| + r = dma_resv_wait_timeout(bo->tbo.base.resv, false, false, |
| + MAX_SCHEDULE_TIMEOUT); |
| if (r < 0) |
| return r; |
| |
| diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c |
| index 35f68748ad26..76d4ea341376 100644 |
| --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c |
| +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c |
| @@ -1114,9 +1114,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, |
| ib->length_dw = 16; |
| |
| if (direct) { |
| - r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, |
| - true, false, |
| - msecs_to_jiffies(10)); |
| + r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false, |
| + msecs_to_jiffies(10)); |
| if (r == 0) |
| r = -ETIMEDOUT; |
| if (r < 0) |
| diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c |
| index 3b6c0b48d0b1..1fc0f6ee0b59 100644 |
| --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c |
| +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c |
| @@ -2059,13 +2059,12 @@ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) |
| unsigned i, shared_count; |
| int r; |
| |
| - r = dma_resv_get_fences_rcu(resv, &excl, |
| - &shared_count, &shared); |
| + r = dma_resv_get_fences(resv, &excl, &shared_count, &shared); |
| if (r) { |
| /* Not enough memory to grab the fence list, as last resort |
| * block for all the fences to complete. |
| */ |
| - dma_resv_wait_timeout_rcu(resv, true, false, |
| + dma_resv_wait_timeout(resv, true, false, |
| MAX_SCHEDULE_TIMEOUT); |
| return; |
| } |
| @@ -2677,7 +2676,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo) |
| return true; |
| |
| /* Don't evict VM page tables while they are busy */ |
| - if (!dma_resv_test_signaled_rcu(bo->tbo.base.resv, true)) |
| + if (!dma_resv_test_signaled(bo->tbo.base.resv, true)) |
| return false; |
| |
| /* Try to block ongoing updates */ |
| @@ -2857,8 +2856,8 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, |
| */ |
| long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) |
| { |
| - timeout = dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv, |
| - true, true, timeout); |
| + timeout = dma_resv_wait_timeout(vm->root.base.bo->tbo.base.resv, true, |
| + true, timeout); |
| if (timeout <= 0) |
| return timeout; |
| |
| diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |
| index f2a150223e70..d70cb95e4e17 100644 |
| --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |
| +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |
| @@ -8425,9 +8425,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, |
| * deadlock during GPU reset when this fence will not signal |
| * but we hold reservation lock for the BO. |
| */ |
| - r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true, |
| - false, |
| - msecs_to_jiffies(5000)); |
| + r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false, |
| + msecs_to_jiffies(5000)); |
| if (unlikely(r <= 0)) |
| DRM_ERROR("Waiting for fences timed out!"); |
| |
| diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c |
| index 9989425e9875..8ce0725f620c 100644 |
| --- a/drivers/gpu/drm/drm_gem.c |
| +++ b/drivers/gpu/drm/drm_gem.c |
| @@ -770,8 +770,7 @@ long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, |
| return -EINVAL; |
| } |
| |
| - ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all, |
| - true, timeout); |
| + ret = dma_resv_wait_timeout(obj->resv, wait_all, true, timeout); |
| if (ret == 0) |
| ret = -ETIME; |
| else if (ret > 0) |
| @@ -1380,7 +1379,7 @@ int drm_gem_fence_array_add_implicit(struct xarray *fence_array, |
| return drm_gem_fence_array_add(fence_array, fence); |
| } |
| |
| - ret = dma_resv_get_fences_rcu(obj->resv, NULL, |
| + ret = dma_resv_get_fences(obj->resv, NULL, |
| &fence_count, &fences); |
| if (ret || !fence_count) |
| return ret; |
| diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c |
| index db69f19ab5bc..dd36d295c03d 100644 |
| --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c |
| +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c |
| @@ -390,14 +390,12 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op, |
| } |
| |
| if (op & ETNA_PREP_NOSYNC) { |
| - if (!dma_resv_test_signaled_rcu(obj->resv, |
| - write)) |
| + if (!dma_resv_test_signaled(obj->resv, write)) |
| return -EBUSY; |
| } else { |
| unsigned long remain = etnaviv_timeout_to_jiffies(timeout); |
| |
| - ret = dma_resv_wait_timeout_rcu(obj->resv, |
| - write, true, remain); |
| + ret = dma_resv_wait_timeout(obj->resv, write, true, remain); |
| if (ret <= 0) |
| return ret == 0 ? -ETIMEDOUT : ret; |
| } |
| diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c |
| index d05c35994579..90ae6e242f6a 100644 |
| --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c |
| +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c |
| @@ -189,9 +189,9 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit) |
| continue; |
| |
| if (bo->flags & ETNA_SUBMIT_BO_WRITE) { |
| - ret = dma_resv_get_fences_rcu(robj, &bo->excl, |
| - &bo->nr_shared, |
| - &bo->shared); |
| + ret = dma_resv_get_fences(robj, &bo->excl, |
| + &bo->nr_shared, |
| + &bo->shared); |
| if (ret) |
| return ret; |
| } else { |
| diff --git a/drivers/gpu/drm/i915/dma_resv_utils.c b/drivers/gpu/drm/i915/dma_resv_utils.c |
| index 9e508e7d4629..7df91b7e4ca8 100644 |
| --- a/drivers/gpu/drm/i915/dma_resv_utils.c |
| +++ b/drivers/gpu/drm/i915/dma_resv_utils.c |
| @@ -10,7 +10,7 @@ |
| void dma_resv_prune(struct dma_resv *resv) |
| { |
| if (dma_resv_trylock(resv)) { |
| - if (dma_resv_test_signaled_rcu(resv, true)) |
| + if (dma_resv_test_signaled(resv, true)) |
| dma_resv_add_excl_fence(resv, NULL); |
| dma_resv_unlock(resv); |
| } |
| diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c |
| index 25235ef630c1..c36d1b1a44c2 100644 |
| --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c |
| +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c |
| @@ -105,7 +105,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, |
| * Alternatively, we can trade that extra information on read/write |
| * activity with |
| * args->busy = |
| - * !dma_resv_test_signaled_rcu(obj->resv, true); |
| + * !dma_resv_test_signaled(obj->resv, true); |
| * to report the overall busyness. This is what the wait-ioctl does. |
| * |
| */ |
| diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c |
| index 297143511f99..66789111a24b 100644 |
| --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c |
| +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c |
| @@ -1481,7 +1481,7 @@ static inline bool use_reloc_gpu(struct i915_vma *vma) |
| if (DBG_FORCE_RELOC) |
| return false; |
| |
| - return !dma_resv_test_signaled_rcu(vma->resv, true); |
| + return !dma_resv_test_signaled(vma->resv, true); |
| } |
| |
| static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset) |
| diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c |
| index a657b99ec760..b5cbbe659a77 100644 |
| --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c |
| +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c |
| @@ -85,8 +85,8 @@ static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni, |
| return true; |
| |
| /* we will unbind on next submission, still have userptr pins */ |
| - r = dma_resv_wait_timeout_rcu(obj->base.resv, true, false, |
| - MAX_SCHEDULE_TIMEOUT); |
| + r = dma_resv_wait_timeout(obj->base.resv, true, false, |
| + MAX_SCHEDULE_TIMEOUT); |
| if (r <= 0) |
| drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r); |
| |
| diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c |
| index 4b9856d5ba14..c91f2b72f615 100644 |
| --- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c |
| +++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c |
| @@ -45,7 +45,7 @@ i915_gem_object_wait_reservation(struct dma_resv *resv, |
| unsigned int count, i; |
| int ret; |
| |
| - ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared); |
| + ret = dma_resv_get_fences(resv, &excl, &count, &shared); |
| if (ret) |
| return ret; |
| |
| @@ -158,8 +158,8 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, |
| unsigned int count, i; |
| int ret; |
| |
| - ret = dma_resv_get_fences_rcu(obj->base.resv, |
| - &excl, &count, &shared); |
| + ret = dma_resv_get_fences(obj->base.resv, &excl, &count, |
| + &shared); |
| if (ret) |
| return ret; |
| |
| diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c |
| index c8a6ed8617f3..98184a76b2dd 100644 |
| --- a/drivers/gpu/drm/i915/i915_request.c |
| +++ b/drivers/gpu/drm/i915/i915_request.c |
| @@ -1594,8 +1594,8 @@ i915_request_await_object(struct i915_request *to, |
| struct dma_fence **shared; |
| unsigned int count, i; |
| |
| - ret = dma_resv_get_fences_rcu(obj->base.resv, |
| - &excl, &count, &shared); |
| + ret = dma_resv_get_fences(obj->base.resv, &excl, &count, |
| + &shared); |
| if (ret) |
| return ret; |
| |
| diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c |
| index 2744558f3050..2d14aba93570 100644 |
| --- a/drivers/gpu/drm/i915/i915_sw_fence.c |
| +++ b/drivers/gpu/drm/i915/i915_sw_fence.c |
| @@ -582,7 +582,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, |
| struct dma_fence **shared; |
| unsigned int count, i; |
| |
| - ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared); |
| + ret = dma_resv_get_fences(resv, &excl, &count, &shared); |
| if (ret) |
| return ret; |
| |
| diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c |
| index 369d91e6361e..0095efd9d872 100644 |
| --- a/drivers/gpu/drm/msm/msm_gem.c |
| +++ b/drivers/gpu/drm/msm/msm_gem.c |
| @@ -915,8 +915,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) |
| op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout); |
| long ret; |
| |
| - ret = dma_resv_wait_timeout_rcu(obj->resv, write, |
| - true, remain); |
| + ret = dma_resv_wait_timeout(obj->resv, write, true, remain); |
| if (ret == 0) |
| return remain == 0 ? -EBUSY : -ETIMEDOUT; |
| else if (ret < 0) |
| diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c |
| index c88cbb85f101..5665456103f7 100644 |
| --- a/drivers/gpu/drm/nouveau/nouveau_gem.c |
| +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c |
| @@ -928,8 +928,8 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, |
| return -ENOENT; |
| nvbo = nouveau_gem_object(gem); |
| |
| - lret = dma_resv_wait_timeout_rcu(nvbo->bo.base.resv, write, true, |
| - no_wait ? 0 : 30 * HZ); |
| + lret = dma_resv_wait_timeout(nvbo->bo.base.resv, write, true, |
| + no_wait ? 0 : 30 * HZ); |
| if (!lret) |
| ret = -EBUSY; |
| else if (lret > 0) |
| diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c |
| index 83a461bdeea8..28ed44808009 100644 |
| --- a/drivers/gpu/drm/panfrost/panfrost_drv.c |
| +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c |
| @@ -311,8 +311,7 @@ panfrost_ioctl_wait_bo(struct drm_device *dev, void *data, |
| if (!gem_obj) |
| return -ENOENT; |
| |
| - ret = dma_resv_wait_timeout_rcu(gem_obj->resv, true, |
| - true, timeout); |
| + ret = dma_resv_wait_timeout(gem_obj->resv, true, true, timeout); |
| if (!ret) |
| ret = timeout ? -ETIMEDOUT : -EBUSY; |
| |
| diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c |
| index 05ea2f39f626..72f3890bd047 100644 |
| --- a/drivers/gpu/drm/radeon/radeon_gem.c |
| +++ b/drivers/gpu/drm/radeon/radeon_gem.c |
| @@ -125,7 +125,7 @@ static int radeon_gem_set_domain(struct drm_gem_object *gobj, |
| } |
| if (domain == RADEON_GEM_DOMAIN_CPU) { |
| /* Asking for cpu access wait for object idle */ |
| - r = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ); |
| + r = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ); |
| if (!r) |
| r = -EBUSY; |
| |
| @@ -474,7 +474,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, |
| } |
| robj = gem_to_radeon_bo(gobj); |
| |
| - r = dma_resv_test_signaled_rcu(robj->tbo.base.resv, true); |
| + r = dma_resv_test_signaled(robj->tbo.base.resv, true); |
| if (r == 0) |
| r = -EBUSY; |
| else |
| @@ -503,7 +503,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, |
| } |
| robj = gem_to_radeon_bo(gobj); |
| |
| - ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ); |
| + ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ); |
| if (ret == 0) |
| r = -EBUSY; |
| else if (ret < 0) |
| diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c |
| index e37c9a57a7c3..9fa88549c89e 100644 |
| --- a/drivers/gpu/drm/radeon/radeon_mn.c |
| +++ b/drivers/gpu/drm/radeon/radeon_mn.c |
| @@ -66,8 +66,8 @@ static bool radeon_mn_invalidate(struct mmu_interval_notifier *mn, |
| return true; |
| } |
| |
| - r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false, |
| - MAX_SCHEDULE_TIMEOUT); |
| + r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false, |
| + MAX_SCHEDULE_TIMEOUT); |
| if (r <= 0) |
| DRM_ERROR("(%ld) failed to wait for user bo\n", r); |
| |
| diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c |
| index 83f0f5ccb497..7e8446b192ad 100644 |
| --- a/drivers/gpu/drm/ttm/ttm_bo.c |
| +++ b/drivers/gpu/drm/ttm/ttm_bo.c |
| @@ -294,7 +294,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, |
| struct dma_resv *resv = &bo->base._resv; |
| int ret; |
| |
| - if (dma_resv_test_signaled_rcu(resv, true)) |
| + if (dma_resv_test_signaled(resv, true)) |
| ret = 0; |
| else |
| ret = -EBUSY; |
| @@ -306,8 +306,8 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, |
| dma_resv_unlock(bo->base.resv); |
| spin_unlock(&bo->bdev->lru_lock); |
| |
| - lret = dma_resv_wait_timeout_rcu(resv, true, interruptible, |
| - 30 * HZ); |
| + lret = dma_resv_wait_timeout(resv, true, interruptible, |
| + 30 * HZ); |
| |
| if (lret < 0) |
| return lret; |
| @@ -407,8 +407,8 @@ static void ttm_bo_release(struct kref *kref) |
| /* Last resort, if we fail to allocate memory for the |
| * fences block for the BO to become idle |
| */ |
| - dma_resv_wait_timeout_rcu(bo->base.resv, true, false, |
| - 30 * HZ); |
| + dma_resv_wait_timeout(bo->base.resv, true, false, |
| + 30 * HZ); |
| } |
| |
| if (bo->bdev->funcs->release_notify) |
| @@ -418,7 +418,7 @@ static void ttm_bo_release(struct kref *kref) |
| ttm_mem_io_free(bdev, &bo->mem); |
| } |
| |
| - if (!dma_resv_test_signaled_rcu(bo->base.resv, true) || |
| + if (!dma_resv_test_signaled(bo->base.resv, true) || |
| !dma_resv_trylock(bo->base.resv)) { |
| /* The BO is not idle, resurrect it for delayed destroy */ |
| ttm_bo_flush_all_fences(bo); |
| @@ -1144,14 +1144,14 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, |
| long timeout = CONFIG_DRM_TTM_BO_WAIT_TIMEOUT * HZ; |
| |
| if (no_wait) { |
| - if (dma_resv_test_signaled_rcu(bo->base.resv, true)) |
| + if (dma_resv_test_signaled(bo->base.resv, true)) |
| return 0; |
| else |
| return -EBUSY; |
| } |
| |
| - timeout = dma_resv_wait_timeout_rcu(bo->base.resv, true, |
| - interruptible, timeout); |
| + timeout = dma_resv_wait_timeout(bo->base.resv, true, interruptible, |
| + timeout); |
| if (timeout < 0) |
| return timeout; |
| |
| diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c |
| index 2902dc6e64fa..bd6f75285fd9 100644 |
| --- a/drivers/gpu/drm/vgem/vgem_fence.c |
| +++ b/drivers/gpu/drm/vgem/vgem_fence.c |
| @@ -151,8 +151,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev, |
| |
| /* Check for a conflicting fence */ |
| resv = obj->resv; |
| - if (!dma_resv_test_signaled_rcu(resv, |
| - arg->flags & VGEM_FENCE_WRITE)) { |
| + if (!dma_resv_test_signaled(resv, arg->flags & VGEM_FENCE_WRITE)) { |
| ret = -EBUSY; |
| goto err_fence; |
| } |
| diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c |
| index 44d52381854c..7eb8b182b0e2 100644 |
| --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c |
| +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c |
| @@ -500,10 +500,9 @@ static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data, |
| return -ENOENT; |
| |
| if (args->flags & VIRTGPU_WAIT_NOWAIT) { |
| - ret = dma_resv_test_signaled_rcu(obj->resv, true); |
| + ret = dma_resv_test_signaled(obj->resv, true); |
| } else { |
| - ret = dma_resv_wait_timeout_rcu(obj->resv, true, true, |
| - timeout); |
| + ret = dma_resv_wait_timeout(obj->resv, true, true, timeout); |
| } |
| if (ret == 0) |
| ret = -EBUSY; |
| diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c |
| index 50e529a01677..85ec59fae8d8 100644 |
| --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c |
| +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c |
| @@ -754,9 +754,9 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo, |
| if (flags & drm_vmw_synccpu_allow_cs) { |
| long lret; |
| |
| - lret = dma_resv_wait_timeout_rcu |
| - (bo->base.resv, true, true, |
| - nonblock ? 0 : MAX_SCHEDULE_TIMEOUT); |
| + lret = dma_resv_wait_timeout(bo->base.resv, true, true, |
| + nonblock ? 0 : |
| + MAX_SCHEDULE_TIMEOUT); |
| if (!lret) |
| return -EBUSY; |
| else if (lret < 0) |
| diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h |
| index d44a77e8a7e3..ab31d332503a 100644 |
| --- a/include/linux/dma-resv.h |
| +++ b/include/linux/dma-resv.h |
| @@ -275,19 +275,12 @@ void dma_resv_init(struct dma_resv *obj); |
| void dma_resv_fini(struct dma_resv *obj); |
| int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences); |
| void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence); |
| - |
| void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence); |
| - |
| -int dma_resv_get_fences_rcu(struct dma_resv *obj, |
| - struct dma_fence **pfence_excl, |
| - unsigned *pshared_count, |
| - struct dma_fence ***pshared); |
| - |
| +int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl, |
| + unsigned *pshared_count, struct dma_fence ***pshared); |
| int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src); |
| - |
| -long dma_resv_wait_timeout_rcu(struct dma_resv *obj, bool wait_all, bool intr, |
| - unsigned long timeout); |
| - |
| -bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all); |
| +long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr, |
| + unsigned long timeout); |
| +bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all); |
| |
| #endif /* _LINUX_RESERVATION_H */ |
| -- |
| 2.17.1 |
| |