diff options
author | Kent Overstreet <koverstreet@google.com> | 2013-05-31 15:23:53 -0700 |
---|---|---|
committer | Kent Overstreet <koverstreet@google.com> | 2013-06-17 19:39:45 -0700 |
commit | 5be1ce5a6c33c74f0e1bffca7c215338be6411dc (patch) | |
tree | 8cad5b53cf6e6fd793d9e4ff60fe309d72ad484c /block | |
parent | 413156bc35c3d8eaa57e58a770f1c0c5d27a06e8 (diff) |
block: Bio cancellationaio-ida
If a bio is associated with a kiocb, allow it to be cancelled.
This is accomplished by adding a pointer to a kiocb in struct bio, and
when we go to dequeue a request we check if its bio has been cancelled -
if so, we end the request with -ECANCELED.
We don't currently try to cancel bios if IO has already been started -
that'd require a per bio callback function, and a way to find all the
outstanding bios for a given kiocb. Such a mechanism may or may not be
added in the future but this patch tries to start simple.
Currently this can only be triggered with aio and io_cancel(), but the
mechanism can be used for sync io too.
It can also be used for bios created by stacking drivers, and bio clones
in general - when cloning a bio, if the bi_iocb pointer is copied as
well the clone will then be cancellable. bio_clone() could be modified
to do this, but hasn't in this patch because all the bio_clone() users
would need to be auditied to make sure that it's safe. We can't blindly
make e.g. raid5 writes cancellable without the knowledge of the md code.
Initial patch by Anatol Pomazau (anatol@google.com).
Signed-off-by: Kent Overstreet <koverstreet@google.com>
Cc: Zach Brown <zab@redhat.com>
Cc: Joel Becker <jlbec@evilplan.org>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Jeff Moyer <jmoyer@redhat.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Benjamin LaHaise <bcrl@kvack.org>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 29 |
1 files changed, 29 insertions, 0 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 814e360dc007..07d46b9ef369 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -31,6 +31,7 @@ #include <linux/delay.h> #include <linux/ratelimit.h> #include <linux/pm_runtime.h> +#include <linux/aio.h> #define CREATE_TRACE_POINTS #include <trace/events/block.h> @@ -1744,6 +1745,11 @@ generic_make_request_checks(struct bio *bio) goto end_io; } + if (bio_cancelled(bio)) { + err = -ECANCELED; + goto end_io; + } + /* * Various block parts want %current->io_context and lazy ioc * allocation ends up trading a lot of pain for a small amount of @@ -2079,6 +2085,20 @@ static inline struct request *blk_pm_peek_request(struct request_queue *q, } #endif +static bool request_cancelled(struct request *rq) +{ + struct bio *bio; + + if (!rq->bio) + return false; + + for (bio = rq->bio; bio; bio = bio->bi_next) + if (!bio_cancelled(bio)) + return false; + + return true; +} + /** * blk_peek_request - peek at the top of a request queue * @q: request queue to peek at @@ -2124,6 +2144,12 @@ struct request *blk_peek_request(struct request_queue *q) trace_block_rq_issue(q, rq); } + if (request_cancelled(rq)) { + blk_start_request(rq); + __blk_end_request_all(rq, -ECANCELED); + continue; + } + if (!q->boundary_rq || q->boundary_rq == rq) { q->end_sector = rq_end_sector(rq); q->boundary_rq = NULL; @@ -2308,6 +2334,8 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes, char *error_type; switch (error) { + case -ECANCELED: + goto noerr; case -ENOLINK: error_type = "recoverable transport"; break; @@ -2328,6 +2356,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes, (unsigned long long)blk_rq_pos(req)); } +noerr: blk_account_io_completion(req, nr_bytes); |