diff options
Diffstat (limited to 'drivers/dma-buf/dma-buf.c')
-rw-r--r-- | drivers/dma-buf/dma-buf.c | 206 |
1 files changed, 84 insertions, 122 deletions
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 63d32261b63f..6437b2e978fb 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -67,14 +67,11 @@ static void dma_buf_release(struct dentry *dentry) BUG_ON(dmabuf->vmapping_counter); /* - * Any fences that a dma-buf poll can wait on should be signaled - * before releasing dma-buf. This is the responsibility of each - * driver that uses the reservation objects. - * - * If you hit this BUG() it means someone dropped their ref to the - * dma-buf while still having pending operation to the buffer. + * If you hit this BUG() it could mean: + * * There's a file reference imbalance in dma_buf_poll / dma_buf_poll_cb or somewhere else + * * dmabuf->cb_in/out.active are non-0 despite no pending fence callback */ - BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active); + BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active); dma_buf_stats_teardown(dmabuf); dmabuf->ops->release(dmabuf); @@ -82,6 +79,7 @@ static void dma_buf_release(struct dentry *dentry) if (dmabuf->resv == (struct dma_resv *)&dmabuf[1]) dma_resv_fini(dmabuf->resv); + WARN_ON(!list_empty(&dmabuf->attachments)); module_put(dmabuf->owner); kfree(dmabuf->name); kfree(dmabuf); @@ -199,22 +197,41 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence) static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb) { struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb; + struct dma_buf *dmabuf = container_of(dcb->poll, struct dma_buf, poll); unsigned long flags; spin_lock_irqsave(&dcb->poll->lock, flags); wake_up_locked_poll(dcb->poll, dcb->active); dcb->active = 0; spin_unlock_irqrestore(&dcb->poll->lock, flags); + dma_fence_put(fence); + /* Paired with get_file in dma_buf_poll */ + fput(dmabuf->file); +} + +static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write, + struct dma_buf_poll_cb_t *dcb) +{ + struct dma_resv_iter cursor; + struct dma_fence *fence; + int r; + + dma_resv_for_each_fence(&cursor, resv, write, fence) { + dma_fence_get(fence); + r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb); + if (!r) + return true; + dma_fence_put(fence); + } + + return false; } static __poll_t dma_buf_poll(struct file *file, poll_table *poll) { struct dma_buf *dmabuf; struct dma_resv *resv; - struct dma_resv_list *fobj; - struct dma_fence *fence_excl; __poll_t events; - unsigned shared_count, seq; dmabuf = file->private_data; if (!dmabuf || !dmabuf->resv) @@ -228,101 +245,55 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll) if (!events) return 0; -retry: - seq = read_seqcount_begin(&resv->seq); - rcu_read_lock(); + dma_resv_lock(resv, NULL); - fobj = rcu_dereference(resv->fence); - if (fobj) - shared_count = fobj->shared_count; - else - shared_count = 0; - fence_excl = dma_resv_excl_fence(resv); - if (read_seqcount_retry(&resv->seq, seq)) { - rcu_read_unlock(); - goto retry; - } - - if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) { - struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl; - __poll_t pevents = EPOLLIN; - - if (shared_count == 0) - pevents |= EPOLLOUT; + if (events & EPOLLOUT) { + struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out; + /* Check that callback isn't busy */ spin_lock_irq(&dmabuf->poll.lock); - if (dcb->active) { - dcb->active |= pevents; - events &= ~pevents; - } else - dcb->active = pevents; + if (dcb->active) + events &= ~EPOLLOUT; + else + dcb->active = EPOLLOUT; spin_unlock_irq(&dmabuf->poll.lock); - if (events & pevents) { - if (!dma_fence_get_rcu(fence_excl)) { - /* force a recheck */ - events &= ~pevents; - dma_buf_poll_cb(NULL, &dcb->cb); - } else if (!dma_fence_add_callback(fence_excl, &dcb->cb, - dma_buf_poll_cb)) { - events &= ~pevents; - dma_fence_put(fence_excl); - } else { - /* - * No callback queued, wake up any additional - * waiters. - */ - dma_fence_put(fence_excl); + if (events & EPOLLOUT) { + /* Paired with fput in dma_buf_poll_cb */ + get_file(dmabuf->file); + + if (!dma_buf_poll_add_cb(resv, true, dcb)) + /* No callback queued, wake up any other waiters */ dma_buf_poll_cb(NULL, &dcb->cb); - } + else + events &= ~EPOLLOUT; } } - if ((events & EPOLLOUT) && shared_count > 0) { - struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared; - int i; + if (events & EPOLLIN) { + struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in; - /* Only queue a new callback if no event has fired yet */ + /* Check that callback isn't busy */ spin_lock_irq(&dmabuf->poll.lock); if (dcb->active) - events &= ~EPOLLOUT; + events &= ~EPOLLIN; else - dcb->active = EPOLLOUT; + dcb->active = EPOLLIN; spin_unlock_irq(&dmabuf->poll.lock); - if (!(events & EPOLLOUT)) - goto out; - - for (i = 0; i < shared_count; ++i) { - struct dma_fence *fence = rcu_dereference(fobj->shared[i]); + if (events & EPOLLIN) { + /* Paired with fput in dma_buf_poll_cb */ + get_file(dmabuf->file); - if (!dma_fence_get_rcu(fence)) { - /* - * fence refcount dropped to zero, this means - * that fobj has been freed - * - * call dma_buf_poll_cb and force a recheck! - */ - events &= ~EPOLLOUT; + if (!dma_buf_poll_add_cb(resv, false, dcb)) + /* No callback queued, wake up any other waiters */ dma_buf_poll_cb(NULL, &dcb->cb); - break; - } - if (!dma_fence_add_callback(fence, &dcb->cb, - dma_buf_poll_cb)) { - dma_fence_put(fence); - events &= ~EPOLLOUT; - break; - } - dma_fence_put(fence); + else + events &= ~EPOLLIN; } - - /* No callback queued, wake up any additional waiters. */ - if (i == shared_count) - dma_buf_poll_cb(NULL, &dcb->cb); } -out: - rcu_read_unlock(); + dma_resv_unlock(resv); return events; } @@ -565,8 +536,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) dmabuf->owner = exp_info->owner; spin_lock_init(&dmabuf->name_lock); init_waitqueue_head(&dmabuf->poll); - dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll; - dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0; + dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll; + dmabuf->cb_in.active = dmabuf->cb_out.active = 0; if (!resv) { resv = (struct dma_resv *)&dmabuf[1]; @@ -610,7 +581,7 @@ err_module: module_put(exp_info->owner); return ERR_PTR(ret); } -EXPORT_SYMBOL_GPL(dma_buf_export); +EXPORT_SYMBOL_NS_GPL(dma_buf_export, DMA_BUF); /** * dma_buf_fd - returns a file descriptor for the given struct dma_buf @@ -634,7 +605,7 @@ int dma_buf_fd(struct dma_buf *dmabuf, int flags) return fd; } -EXPORT_SYMBOL_GPL(dma_buf_fd); +EXPORT_SYMBOL_NS_GPL(dma_buf_fd, DMA_BUF); /** * dma_buf_get - returns the struct dma_buf related to an fd @@ -660,7 +631,7 @@ struct dma_buf *dma_buf_get(int fd) return file->private_data; } -EXPORT_SYMBOL_GPL(dma_buf_get); +EXPORT_SYMBOL_NS_GPL(dma_buf_get, DMA_BUF); /** * dma_buf_put - decreases refcount of the buffer @@ -679,7 +650,7 @@ void dma_buf_put(struct dma_buf *dmabuf) fput(dmabuf->file); } -EXPORT_SYMBOL_GPL(dma_buf_put); +EXPORT_SYMBOL_NS_GPL(dma_buf_put, DMA_BUF); static void mangle_sg_table(struct sg_table *sg_table) { @@ -810,7 +781,7 @@ err_unlock: dma_buf_detach(dmabuf, attach); return ERR_PTR(ret); } -EXPORT_SYMBOL_GPL(dma_buf_dynamic_attach); +EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, DMA_BUF); /** * dma_buf_attach - Wrapper for dma_buf_dynamic_attach @@ -825,7 +796,7 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, { return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL); } -EXPORT_SYMBOL_GPL(dma_buf_attach); +EXPORT_SYMBOL_NS_GPL(dma_buf_attach, DMA_BUF); static void __unmap_dma_buf(struct dma_buf_attachment *attach, struct sg_table *sg_table, @@ -871,7 +842,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) kfree(attach); } -EXPORT_SYMBOL_GPL(dma_buf_detach); +EXPORT_SYMBOL_NS_GPL(dma_buf_detach, DMA_BUF); /** * dma_buf_pin - Lock down the DMA-buf @@ -901,7 +872,7 @@ int dma_buf_pin(struct dma_buf_attachment *attach) return ret; } -EXPORT_SYMBOL_GPL(dma_buf_pin); +EXPORT_SYMBOL_NS_GPL(dma_buf_pin, DMA_BUF); /** * dma_buf_unpin - Unpin a DMA-buf @@ -922,7 +893,7 @@ void dma_buf_unpin(struct dma_buf_attachment *attach) if (dmabuf->ops->unpin) dmabuf->ops->unpin(attach); } -EXPORT_SYMBOL_GPL(dma_buf_unpin); +EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, DMA_BUF); /** * dma_buf_map_attachment - Returns the scatterlist table of the attachment; @@ -1012,7 +983,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, #endif /* CONFIG_DMA_API_DEBUG */ return sg_table; } -EXPORT_SYMBOL_GPL(dma_buf_map_attachment); +EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, DMA_BUF); /** * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might @@ -1048,7 +1019,7 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) dma_buf_unpin(attach); } -EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment); +EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, DMA_BUF); /** * dma_buf_move_notify - notify attachments that DMA-buf is moving @@ -1068,7 +1039,7 @@ void dma_buf_move_notify(struct dma_buf *dmabuf) if (attach->importer_ops) attach->importer_ops->move_notify(attach); } -EXPORT_SYMBOL_GPL(dma_buf_move_notify); +EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF); /** * DOC: cpu access @@ -1212,7 +1183,7 @@ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, return ret; } -EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access); +EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, DMA_BUF); /** * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the @@ -1240,7 +1211,7 @@ int dma_buf_end_cpu_access(struct dma_buf *dmabuf, return ret; } -EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access); +EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, DMA_BUF); /** @@ -1282,7 +1253,7 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma, return dmabuf->ops->mmap(dmabuf, vma); } -EXPORT_SYMBOL_GPL(dma_buf_mmap); +EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF); /** * dma_buf_vmap - Create virtual mapping for the buffer object into kernel @@ -1336,7 +1307,7 @@ out_unlock: mutex_unlock(&dmabuf->lock); return ret; } -EXPORT_SYMBOL_GPL(dma_buf_vmap); +EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, DMA_BUF); /** * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap. @@ -1360,17 +1331,16 @@ void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map) } mutex_unlock(&dmabuf->lock); } -EXPORT_SYMBOL_GPL(dma_buf_vunmap); +EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, DMA_BUF); #ifdef CONFIG_DEBUG_FS static int dma_buf_debug_show(struct seq_file *s, void *unused) { struct dma_buf *buf_obj; struct dma_buf_attachment *attach_obj; - struct dma_resv *robj; - struct dma_resv_list *fobj; + struct dma_resv_iter cursor; struct dma_fence *fence; - int count = 0, attach_count, shared_count, i; + int count = 0, attach_count; size_t size = 0; int ret; @@ -1389,6 +1359,8 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) if (ret) goto error_unlock; + + spin_lock(&buf_obj->name_lock); seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n", buf_obj->size, buf_obj->file->f_flags, buf_obj->file->f_mode, @@ -1396,22 +1368,12 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) buf_obj->exp_name, file_inode(buf_obj->file)->i_ino, buf_obj->name ?: ""); + spin_unlock(&buf_obj->name_lock); - robj = buf_obj->resv; - fence = dma_resv_excl_fence(robj); - if (fence) - seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n", - fence->ops->get_driver_name(fence), - fence->ops->get_timeline_name(fence), - dma_fence_is_signaled(fence) ? "" : "un"); - - fobj = rcu_dereference_protected(robj->fence, - dma_resv_held(robj)); - shared_count = fobj ? fobj->shared_count : 0; - for (i = 0; i < shared_count; i++) { - fence = rcu_dereference_protected(fobj->shared[i], - dma_resv_held(robj)); - seq_printf(s, "\tShared fence: %s %s %ssignalled\n", + dma_resv_for_each_fence(&cursor, buf_obj->resv, true, fence) { + seq_printf(s, "\t%s fence: %s %s %ssignalled\n", + dma_resv_iter_is_exclusive(&cursor) ? + "Exclusive" : "Shared", fence->ops->get_driver_name(fence), fence->ops->get_timeline_name(fence), dma_fence_is_signaled(fence) ? "" : "un"); |