summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/io_uring_types.h15
-rw-r--r--io_uring/cancel.c4
-rw-r--r--io_uring/fdinfo.c10
-rw-r--r--io_uring/filetable.c44
-rw-r--r--io_uring/filetable.h2
-rw-r--r--io_uring/io_uring.c7
-rw-r--r--io_uring/msg_ring.c4
-rw-r--r--io_uring/net.c6
-rw-r--r--io_uring/nop.c6
-rw-r--r--io_uring/register.c3
-rw-r--r--io_uring/rsrc.c209
-rw-r--r--io_uring/rsrc.h7
-rw-r--r--io_uring/rw.c6
-rw-r--r--io_uring/splice.c6
-rw-r--r--io_uring/uring_cmd.c6
15 files changed, 123 insertions, 212 deletions
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 696f2a05a98b..77fd508d043a 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -55,8 +55,13 @@ struct io_wq_work {
int cancel_seq;
};
+struct io_rsrc_data {
+ unsigned int nr;
+ struct io_rsrc_node **nodes;
+};
+
struct io_file_table {
- struct io_rsrc_node **nodes;
+ struct io_rsrc_data data;
unsigned long *bitmap;
unsigned int alloc_hint;
};
@@ -276,9 +281,7 @@ struct io_ring_ctx {
struct io_wq_work_list iopoll_list;
struct io_file_table file_table;
- struct io_rsrc_node **user_bufs;
- unsigned nr_user_files;
- unsigned nr_user_bufs;
+ struct io_rsrc_data buf_table;
struct io_submit_state submit_state;
@@ -366,10 +369,6 @@ struct io_ring_ctx {
struct wait_queue_head poll_wq;
struct io_restriction restrictions;
- /* slow path rsrc auxilary data, used by update/register */
- struct io_rsrc_data *file_data;
- struct io_rsrc_data *buf_data;
-
u32 pers_next;
struct xarray personalities;
diff --git a/io_uring/cancel.c b/io_uring/cancel.c
index cc3475b22ae5..3a2996307025 100644
--- a/io_uring/cancel.c
+++ b/io_uring/cancel.c
@@ -240,9 +240,9 @@ static int __io_sync_cancel(struct io_uring_task *tctx,
/* fixed must be grabbed every time since we drop the uring_lock */
if ((cd->flags & IORING_ASYNC_CANCEL_FD) &&
(cd->flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
- if (unlikely(fd >= ctx->nr_user_files))
+ if (unlikely(fd >= ctx->file_table.data.nr))
return -EBADF;
- fd = array_index_nospec(fd, ctx->nr_user_files);
+ fd = array_index_nospec(fd, ctx->file_table.data.nr);
cd->file = io_file_from_index(&ctx->file_table, fd);
if (!cd->file)
return -EBADF;
diff --git a/io_uring/fdinfo.c b/io_uring/fdinfo.c
index 064a79475c5f..e3f5e9fe5562 100644
--- a/io_uring/fdinfo.c
+++ b/io_uring/fdinfo.c
@@ -165,8 +165,8 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file)
seq_printf(m, "SqThreadCpu:\t%d\n", sq_cpu);
seq_printf(m, "SqTotalTime:\t%llu\n", sq_total_time);
seq_printf(m, "SqWorkTime:\t%llu\n", sq_work_time);
- seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
- for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
+ seq_printf(m, "UserFiles:\t%u\n", ctx->file_table.data.nr);
+ for (i = 0; has_lock && i < ctx->file_table.data.nr; i++) {
struct file *f = io_file_from_index(&ctx->file_table, i);
if (f)
@@ -174,9 +174,9 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file)
else
seq_printf(m, "%5u: <none>\n", i);
}
- seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
- for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
- struct io_mapped_ubuf *buf = ctx->user_bufs[i]->buf;
+ seq_printf(m, "UserBufs:\t%u\n", ctx->buf_table.nr);
+ for (i = 0; has_lock && i < ctx->buf_table.nr; i++) {
+ struct io_mapped_ubuf *buf = ctx->buf_table.nodes[i]->buf;
seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, buf->len);
}
diff --git a/io_uring/filetable.c b/io_uring/filetable.c
index 1b12a9a1cc16..c1f9f9550446 100644
--- a/io_uring/filetable.c
+++ b/io_uring/filetable.c
@@ -38,25 +38,19 @@ static int io_file_bitmap_get(struct io_ring_ctx *ctx)
bool io_alloc_file_tables(struct io_file_table *table, unsigned nr_files)
{
- table->nodes = kvmalloc_array(nr_files, sizeof(struct io_src_node *),
- GFP_KERNEL_ACCOUNT | __GFP_ZERO);
- if (unlikely(!table->nodes))
+ if (io_rsrc_data_alloc(&table->data, nr_files))
return false;
-
table->bitmap = bitmap_zalloc(nr_files, GFP_KERNEL_ACCOUNT);
- if (unlikely(!table->bitmap)) {
- kvfree(table->nodes);
- return false;
- }
-
- return true;
+ if (table->bitmap)
+ return true;
+ io_rsrc_data_free(&table->data);
+ return false;
}
void io_free_file_tables(struct io_file_table *table)
{
- kvfree(table->nodes);
+ io_rsrc_data_free(&table->data);
bitmap_free(table->bitmap);
- table->nodes = NULL;
table->bitmap = NULL;
}
@@ -68,22 +62,22 @@ static int io_install_fixed_file(struct io_ring_ctx *ctx, struct file *file,
if (io_is_uring_fops(file))
return -EBADF;
- if (!ctx->file_data)
+ if (!ctx->file_table.data.nr)
return -ENXIO;
- if (slot_index >= ctx->nr_user_files)
+ if (slot_index >= ctx->file_table.data.nr)
return -EINVAL;
node = io_rsrc_node_alloc(ctx, IORING_RSRC_FILE);
if (!node)
return -ENOMEM;
- slot_index = array_index_nospec(slot_index, ctx->nr_user_files);
- if (ctx->file_table.nodes[slot_index])
- io_put_rsrc_node(ctx->file_table.nodes[slot_index]);
+ slot_index = array_index_nospec(slot_index, ctx->file_table.data.nr);
+ if (ctx->file_table.data.nodes[slot_index])
+ io_put_rsrc_node(ctx->file_table.data.nodes[slot_index]);
else
io_file_bitmap_set(&ctx->file_table, slot_index);
- ctx->file_table.nodes[slot_index] = node;
+ ctx->file_table.data.nodes[slot_index] = node;
io_fixed_file_set(node, file);
return 0;
}
@@ -129,16 +123,16 @@ int io_fixed_fd_install(struct io_kiocb *req, unsigned int issue_flags,
int io_fixed_fd_remove(struct io_ring_ctx *ctx, unsigned int offset)
{
- if (unlikely(!ctx->file_data))
+ if (unlikely(!ctx->file_table.data.nr))
return -ENXIO;
- if (offset >= ctx->nr_user_files)
+ if (offset >= ctx->file_table.data.nr)
return -EINVAL;
- offset = array_index_nospec(offset, ctx->nr_user_files);
- if (!ctx->file_table.nodes[offset])
+ offset = array_index_nospec(offset, ctx->file_table.data.nr);
+ if (!ctx->file_table.data.nodes[offset])
return -EBADF;
- io_put_rsrc_node(ctx->file_table.nodes[offset]);
- ctx->file_table.nodes[offset] = NULL;
+ io_put_rsrc_node(ctx->file_table.data.nodes[offset]);
+ ctx->file_table.data.nodes[offset] = NULL;
io_file_bitmap_clear(&ctx->file_table, offset);
return 0;
}
@@ -153,7 +147,7 @@ int io_register_file_alloc_range(struct io_ring_ctx *ctx,
return -EFAULT;
if (check_add_overflow(range.off, range.len, &end))
return -EOVERFLOW;
- if (range.resv || end > ctx->nr_user_files)
+ if (range.resv || end > ctx->file_table.data.nr)
return -EINVAL;
io_file_table_set_alloc_range(ctx, range.off, range.len);
diff --git a/io_uring/filetable.h b/io_uring/filetable.h
index 47616079abaa..664c31502dbb 100644
--- a/io_uring/filetable.h
+++ b/io_uring/filetable.h
@@ -52,7 +52,7 @@ static inline struct file *io_slot_file(struct io_rsrc_node *node)
static inline struct file *io_file_from_index(struct io_file_table *table,
int index)
{
- struct io_rsrc_node *node = table->nodes[index];
+ struct io_rsrc_node *node = table->data.nodes[index];
if (node)
return io_slot_file(node);
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 60c947114fa3..78df515fb3a7 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1879,11 +1879,10 @@ inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
struct file *file = NULL;
io_ring_submit_lock(ctx, issue_flags);
-
- if (unlikely((unsigned int)fd >= ctx->nr_user_files))
+ if (unlikely((unsigned int)fd >= ctx->file_table.data.nr))
goto out;
- fd = array_index_nospec(fd, ctx->nr_user_files);
- node = ctx->file_table.nodes[fd];
+ fd = array_index_nospec(fd, ctx->file_table.data.nr);
+ node = ctx->file_table.data.nodes[fd];
if (node) {
io_req_assign_rsrc_node(req, node);
req->flags |= io_slot_flags(node);
diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c
index edea1ffd501c..b90ab3b8f5e0 100644
--- a/io_uring/msg_ring.c
+++ b/io_uring/msg_ring.c
@@ -180,8 +180,8 @@ static struct file *io_msg_grab_file(struct io_kiocb *req, unsigned int issue_fl
int idx = msg->src_fd;
io_ring_submit_lock(ctx, issue_flags);
- if (likely(idx < ctx->nr_user_files)) {
- idx = array_index_nospec(idx, ctx->nr_user_files);
+ if (likely(idx < ctx->file_table.data.nr)) {
+ idx = array_index_nospec(idx, ctx->file_table.data.nr);
file = io_file_from_index(&ctx->file_table, idx);
if (file)
get_file(file);
diff --git a/io_uring/net.c b/io_uring/net.c
index ce1156551d10..3e1f31574abb 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -1347,9 +1347,9 @@ static int io_send_zc_import(struct io_kiocb *req, unsigned int issue_flags)
ret = -EFAULT;
io_ring_submit_lock(ctx, issue_flags);
- if (sr->buf_index < ctx->nr_user_bufs) {
- idx = array_index_nospec(sr->buf_index, ctx->nr_user_bufs);
- node = ctx->user_bufs[idx];
+ if (sr->buf_index < ctx->buf_table.nr) {
+ idx = array_index_nospec(sr->buf_index, ctx->buf_table.nr);
+ node = ctx->buf_table.nodes[idx];
io_req_assign_rsrc_node(sr->notif, node);
ret = 0;
}
diff --git a/io_uring/nop.c b/io_uring/nop.c
index de91600a3bc6..0dac01127de5 100644
--- a/io_uring/nop.c
+++ b/io_uring/nop.c
@@ -66,9 +66,9 @@ int io_nop(struct io_kiocb *req, unsigned int issue_flags)
ret = -EFAULT;
io_ring_submit_lock(ctx, issue_flags);
- if (nop->buffer < ctx->nr_user_bufs) {
- idx = array_index_nospec(nop->buffer, ctx->nr_user_bufs);
- node = READ_ONCE(ctx->user_bufs[idx]);
+ if (nop->buffer < ctx->buf_table.nr) {
+ idx = array_index_nospec(nop->buffer, ctx->buf_table.nr);
+ node = READ_ONCE(ctx->buf_table.nodes[idx]);
io_req_assign_rsrc_node(req, node);
ret = 0;
}
diff --git a/io_uring/register.c b/io_uring/register.c
index 1eb686eaa310..45edfc57963a 100644
--- a/io_uring/register.c
+++ b/io_uring/register.c
@@ -937,7 +937,8 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
mutex_lock(&ctx->uring_lock);
ret = __io_uring_register(ctx, opcode, arg, nr_args);
mutex_unlock(&ctx->uring_lock);
- trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs, ret);
+ trace_io_uring_register(ctx, opcode, ctx->file_table.data.nr,
+ ctx->buf_table.nr, ret);
if (!use_registered_ring)
fput(file);
return ret;
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index 69a9cd82460d..49a6ab5f3ae9 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -142,39 +142,28 @@ struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx, int type)
return node;
}
-static void io_rsrc_data_free(struct io_rsrc_data *data)
+__cold void io_rsrc_data_free(struct io_rsrc_data *data)
{
- int i;
-
- for (i = 0; i < data->nr; i++) {
- struct io_rsrc_node *node = data->nodes[i];
-
- if (node)
- io_put_rsrc_node(node);
+ if (!data->nr)
+ return;
+ while (data->nr--) {
+ if (data->nodes[data->nr])
+ io_put_rsrc_node(data->nodes[data->nr]);
}
kvfree(data->nodes);
- kfree(data);
+ data->nodes = NULL;
+ data->nr = 0;
}
-__cold static int io_rsrc_data_alloc(struct io_ring_ctx *ctx, unsigned nr,
- struct io_rsrc_data **pdata)
+__cold int io_rsrc_data_alloc(struct io_rsrc_data *data, unsigned nr)
{
- struct io_rsrc_data *data;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
data->nodes = kvmalloc_array(nr, sizeof(struct io_rsrc_node *),
- GFP_KERNEL | __GFP_ZERO);
- if (!data->nodes) {
- kfree(data);
- return -ENOMEM;
+ GFP_KERNEL_ACCOUNT | __GFP_ZERO);
+ if (data->nodes) {
+ data->nr = nr;
+ return 0;
}
-
- data->nr = nr;
- *pdata = data;
- return 0;
+ return -ENOMEM;
}
static int __io_sqe_files_update(struct io_ring_ctx *ctx,
@@ -186,9 +175,9 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
int fd, i, err = 0;
unsigned int done;
- if (!ctx->file_data)
+ if (!ctx->file_table.data.nr)
return -ENXIO;
- if (up->offset + nr_args > ctx->nr_user_files)
+ if (up->offset + nr_args > ctx->file_table.data.nr)
return -EINVAL;
for (done = 0; done < nr_args; done++) {
@@ -206,10 +195,10 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
if (fd == IORING_REGISTER_FILES_SKIP)
continue;
- i = array_index_nospec(up->offset + done, ctx->nr_user_files);
- if (ctx->file_table.nodes[i]) {
- io_put_rsrc_node(ctx->file_table.nodes[i]);
- ctx->file_table.nodes[i] = NULL;
+ i = array_index_nospec(up->offset + done, ctx->file_table.data.nr);
+ if (ctx->file_table.data.nodes[i]) {
+ io_put_rsrc_node(ctx->file_table.data.nodes[i]);
+ ctx->file_table.data.nodes[i] = NULL;
io_file_bitmap_clear(&ctx->file_table, i);
}
if (fd != -1) {
@@ -234,7 +223,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
fput(file);
break;
}
- ctx->file_table.nodes[i] = node;
+ ctx->file_table.data.nodes[i] = node;
if (tag)
node->tag = tag;
io_fixed_file_set(node, file);
@@ -256,9 +245,9 @@ static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
__u32 done;
int i, err;
- if (!ctx->buf_data)
+ if (!ctx->buf_table.nr)
return -ENXIO;
- if (up->offset + nr_args > ctx->nr_user_bufs)
+ if (up->offset + nr_args > ctx->buf_table.nr)
return -EINVAL;
for (done = 0; done < nr_args; done++) {
@@ -282,16 +271,16 @@ static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
err = -EINVAL;
break;
}
- i = array_index_nospec(up->offset + done, ctx->nr_user_bufs);
node = io_sqe_buffer_register(ctx, iov, &last_hpage);
if (IS_ERR(node)) {
err = PTR_ERR(node);
break;
}
- if (ctx->user_bufs[i])
- io_put_rsrc_node(ctx->user_bufs[i]);
+ i = array_index_nospec(up->offset + done, ctx->buf_table.nr);
+ if (ctx->buf_table.nodes[i])
+ io_put_rsrc_node(ctx->buf_table.nodes[i]);
- ctx->user_bufs[i] = node;
+ ctx->buf_table.nodes[i] = node;
if (tag)
node->tag = tag;
if (ctx->compat)
@@ -409,7 +398,7 @@ static int io_files_update_with_index_alloc(struct io_kiocb *req,
struct file *file;
int ret, fd;
- if (!req->ctx->file_data)
+ if (!req->ctx->file_table.data.nr)
return -ENXIO;
for (done = 0; done < up->nr_args; done++) {
@@ -494,35 +483,13 @@ void io_free_rsrc_node(struct io_rsrc_node *node)
kfree(node);
}
-static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
-{
- int i;
-
- lockdep_assert_held(&ctx->uring_lock);
-
- for (i = 0; i < ctx->nr_user_files; i++) {
- struct io_rsrc_node *node = ctx->file_table.nodes[i];
-
- if (node) {
- io_put_rsrc_node(node);
- io_file_bitmap_clear(&ctx->file_table, i);
- ctx->file_table.nodes[i] = NULL;
- }
- }
-
- io_free_file_tables(&ctx->file_table);
- io_file_table_set_alloc_range(ctx, 0, 0);
- io_rsrc_data_free(ctx->file_data);
- ctx->file_data = NULL;
- ctx->nr_user_files = 0;
-}
-
int io_sqe_files_unregister(struct io_ring_ctx *ctx)
{
- if (!ctx->file_data)
+ if (!ctx->file_table.data.nr)
return -ENXIO;
- __io_sqe_files_unregister(ctx);
+ io_free_file_tables(&ctx->file_table);
+ io_file_table_set_alloc_range(ctx, 0, 0);
return 0;
}
@@ -534,7 +501,7 @@ int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
int fd, ret;
unsigned i;
- if (ctx->file_data)
+ if (ctx->file_table.data.nr)
return -EBUSY;
if (!nr_args)
return -EINVAL;
@@ -542,17 +509,10 @@ int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
return -EMFILE;
if (nr_args > rlimit(RLIMIT_NOFILE))
return -EMFILE;
- ret = io_rsrc_data_alloc(ctx, nr_args, &ctx->file_data);
- if (ret)
- return ret;
-
- if (!io_alloc_file_tables(&ctx->file_table, nr_args)) {
- io_rsrc_data_free(ctx->file_data);
- ctx->file_data = NULL;
+ if (!io_alloc_file_tables(&ctx->file_table, nr_args))
return -ENOMEM;
- }
- for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
+ for (i = 0; i < nr_args; i++) {
struct io_rsrc_node *node;
u64 tag = 0;
@@ -589,44 +549,24 @@ int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
}
if (tag)
node->tag = tag;
- ctx->file_table.nodes[i] = node;
+ ctx->file_table.data.nodes[i] = node;
io_fixed_file_set(node, file);
io_file_bitmap_set(&ctx->file_table, i);
}
/* default it to the whole table */
- io_file_table_set_alloc_range(ctx, 0, ctx->nr_user_files);
+ io_file_table_set_alloc_range(ctx, 0, ctx->file_table.data.nr);
return 0;
fail:
- __io_sqe_files_unregister(ctx);
+ io_sqe_files_unregister(ctx);
return ret;
}
-static void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
-{
- unsigned int i;
-
- lockdep_assert_held(&ctx->uring_lock);
-
- for (i = 0; i < ctx->nr_user_bufs; i++) {
- if (ctx->user_bufs[i]) {
- io_put_rsrc_node(ctx->user_bufs[i]);
- ctx->user_bufs[i] = NULL;
- }
- }
- kvfree(ctx->user_bufs);
- ctx->user_bufs = NULL;
- io_rsrc_data_free(ctx->buf_data);
- ctx->buf_data = NULL;
- ctx->nr_user_bufs = 0;
-}
-
int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
{
- if (!ctx->buf_data)
+ if (!ctx->buf_table.nr)
return -ENXIO;
-
- __io_sqe_buffers_unregister(ctx);
+ io_rsrc_data_free(&ctx->buf_table);
return 0;
}
@@ -653,8 +593,8 @@ static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
}
/* check previously registered pages */
- for (i = 0; i < ctx->nr_user_bufs; i++) {
- struct io_rsrc_node *node = ctx->user_bufs[i];
+ for (i = 0; i < ctx->buf_table.nr; i++) {
+ struct io_rsrc_node *node = ctx->buf_table.nodes[i];
struct io_mapped_ubuf *imu = node->buf;
for (j = 0; j < imu->nr_bvecs; j++) {
@@ -805,6 +745,9 @@ static struct io_rsrc_node *io_sqe_buffer_register(struct io_ring_ctx *ctx,
struct io_imu_folio_data data;
bool coalesced;
+ if (!iov->iov_base)
+ return rsrc_empty_node;
+
node = io_rsrc_node_alloc(ctx, IORING_RSRC_BUFFER);
if (!node)
return ERR_PTR(-ENOMEM);
@@ -864,40 +807,29 @@ done:
return node;
}
-static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
-{
- ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL);
- return ctx->user_bufs ? 0 : -ENOMEM;
-}
-
int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
unsigned int nr_args, u64 __user *tags)
{
struct page *last_hpage = NULL;
- struct io_rsrc_data *data;
+ struct io_rsrc_data data;
struct iovec fast_iov, *iov = &fast_iov;
const struct iovec __user *uvec;
int i, ret;
BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16));
- if (ctx->user_bufs)
+ if (ctx->buf_table.nr)
return -EBUSY;
if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
return -EINVAL;
- ret = io_rsrc_data_alloc(ctx, nr_args, &data);
+ ret = io_rsrc_data_alloc(&data, nr_args);
if (ret)
return ret;
- ret = io_buffers_map_alloc(ctx, nr_args);
- if (ret) {
- io_rsrc_data_free(data);
- return ret;
- }
if (!arg)
memset(iov, 0, sizeof(*iov));
- for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
+ for (i = 0; i < nr_args; i++) {
struct io_rsrc_node *node;
u64 tag = 0;
@@ -935,14 +867,12 @@ int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
}
if (tag)
node->tag = tag;
- ctx->user_bufs[i] = node;
+ data.nodes[i] = node;
}
- WARN_ON_ONCE(ctx->buf_data);
-
- ctx->buf_data = data;
+ ctx->buf_table = data;
if (ret)
- __io_sqe_buffers_unregister(ctx);
+ io_sqe_buffers_unregister(ctx);
return ret;
}
@@ -1009,8 +939,7 @@ int io_import_fixed(int ddir, struct iov_iter *iter,
static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx)
{
- struct io_rsrc_node **user_bufs;
- struct io_rsrc_data *data;
+ struct io_rsrc_data data;
int i, ret, nbufs;
/*
@@ -1021,43 +950,37 @@ static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx
mutex_lock(&src_ctx->uring_lock);
ret = -ENXIO;
- nbufs = src_ctx->nr_user_bufs;
+ nbufs = src_ctx->buf_table.nr;
if (!nbufs)
goto out_unlock;
- ret = io_rsrc_data_alloc(ctx, nbufs, &data);
+ ret = io_rsrc_data_alloc(&data, nbufs);
if (ret)
goto out_unlock;
- ret = -ENOMEM;
- user_bufs = kvmalloc_array(nbufs, sizeof(struct io_rsrc_node *),
- GFP_KERNEL | __GFP_ZERO);
- if (!user_bufs)
- goto out_free_data;
-
for (i = 0; i < nbufs; i++) {
- struct io_rsrc_node *src_node = src_ctx->user_bufs[i];
+ struct io_rsrc_node *src_node = src_ctx->buf_table.nodes[i];
struct io_rsrc_node *dst_node;
if (src_node == rsrc_empty_node) {
dst_node = rsrc_empty_node;
} else {
dst_node = io_rsrc_node_alloc(ctx, IORING_RSRC_BUFFER);
- if (!dst_node)
+ if (!dst_node) {
+ ret = -ENOMEM;
goto out_put_free;
+ }
refcount_inc(&src_node->buf->refs);
dst_node->buf = src_node->buf;
}
- user_bufs[i] = dst_node;
+ data.nodes[i] = dst_node;
}
/* Have a ref on the bufs now, drop src lock and re-grab our own lock */
mutex_unlock(&src_ctx->uring_lock);
mutex_lock(&ctx->uring_lock);
- if (!ctx->user_bufs) {
- ctx->user_bufs = user_bufs;
- ctx->buf_data = data;
- ctx->nr_user_bufs = nbufs;
+ if (!ctx->buf_table.nr) {
+ ctx->buf_table = data;
return 0;
}
@@ -1068,12 +991,10 @@ static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx
i = nbufs;
out_put_free:
while (i--) {
- io_buffer_unmap(src_ctx, user_bufs[i]);
- kfree(user_bufs[i]);
+ io_buffer_unmap(src_ctx, data.nodes[i]);
+ kfree(data.nodes[i]);
}
- kvfree(user_bufs);
-out_free_data:
- io_rsrc_data_free(data);
+ io_rsrc_data_free(&data);
out_unlock:
mutex_unlock(&src_ctx->uring_lock);
mutex_lock(&ctx->uring_lock);
@@ -1094,7 +1015,7 @@ int io_register_clone_buffers(struct io_ring_ctx *ctx, void __user *arg)
struct file *file;
int ret;
- if (ctx->user_bufs || ctx->nr_user_bufs)
+ if (ctx->buf_table.nr)
return -EBUSY;
if (copy_from_user(&buf, arg, sizeof(buf)))
return -EFAULT;
diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h
index 48f712488f6b..569ea9ce1405 100644
--- a/io_uring/rsrc.h
+++ b/io_uring/rsrc.h
@@ -13,11 +13,6 @@ enum {
IORING_RSRC_BUFFER = 1,
};
-struct io_rsrc_data {
- unsigned int nr;
- struct io_rsrc_node **nodes;
-};
-
struct io_rsrc_node {
struct io_ring_ctx *ctx;
int refs;
@@ -50,6 +45,8 @@ struct io_imu_folio_data {
struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx, int type);
void io_free_rsrc_node(struct io_rsrc_node *node);
+void io_rsrc_data_free(struct io_rsrc_data *data);
+int io_rsrc_data_alloc(struct io_rsrc_data *data, unsigned nr);
int io_import_fixed(int ddir, struct iov_iter *iter,
struct io_mapped_ubuf *imu,
diff --git a/io_uring/rw.c b/io_uring/rw.c
index 65491f4f2c7e..28fff18ebb19 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -339,10 +339,10 @@ static int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe
if (unlikely(ret))
return ret;
- if (unlikely(req->buf_index >= ctx->nr_user_bufs))
+ if (unlikely(req->buf_index >= ctx->buf_table.nr))
return -EFAULT;
- index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
- node = ctx->user_bufs[index];
+ index = array_index_nospec(req->buf_index, ctx->buf_table.nr);
+ node = ctx->buf_table.nodes[index];
io_req_assign_rsrc_node(req, node);
io = req->async_data;
diff --git a/io_uring/splice.c b/io_uring/splice.c
index f78afb575ae6..aaaddb66e90a 100644
--- a/io_uring/splice.c
+++ b/io_uring/splice.c
@@ -66,10 +66,10 @@ static struct file *io_splice_get_file(struct io_kiocb *req,
return io_file_get_normal(req, sp->splice_fd_in);
io_ring_submit_lock(ctx, issue_flags);
- if (unlikely(sp->splice_fd_in >= ctx->nr_user_files))
+ if (unlikely(sp->splice_fd_in >= ctx->file_table.data.nr))
goto out;
- sp->splice_fd_in = array_index_nospec(sp->splice_fd_in, ctx->nr_user_files);
- node = ctx->file_table.nodes[sp->splice_fd_in];
+ sp->splice_fd_in = array_index_nospec(sp->splice_fd_in, ctx->file_table.data.nr);
+ node = ctx->file_table.data.nodes[sp->splice_fd_in];
if (node) {
node->refs++;
sp->rsrc_node = node;
diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
index 0899c71008ae..17d5f5004702 100644
--- a/io_uring/uring_cmd.c
+++ b/io_uring/uring_cmd.c
@@ -212,15 +212,15 @@ int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
u16 index;
index = READ_ONCE(sqe->buf_index);
- if (unlikely(index >= ctx->nr_user_bufs))
+ if (unlikely(index >= ctx->buf_table.nr))
return -EFAULT;
- req->buf_index = array_index_nospec(index, ctx->nr_user_bufs);
+ req->buf_index = array_index_nospec(index, ctx->buf_table.nr);
/*
* Pi node upfront, prior to io_uring_cmd_import_fixed()
* being called. This prevents destruction of the mapped buffer
* we'll need at actual import time.
*/
- io_req_assign_rsrc_node(req, ctx->user_bufs[req->buf_index]);
+ io_req_assign_rsrc_node(req, ctx->buf_table.nodes[req->buf_index]);
}
ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);