diff options
author | Kent Overstreet <kmo@daterainc.com> | 2013-11-23 21:36:29 -0800 |
---|---|---|
committer | Kent Overstreet <kmo@daterainc.com> | 2014-02-26 15:34:42 -0800 |
commit | faba25b68ad27aaa5ea73aacc01e5a3da2f5a8ae (patch) | |
tree | a067ea6af791e9e90b7d3c6ffedfc07fc6552de0 | |
parent | 645e0bd6c0b16e61e0fc428e4daf32e5ad33a005 (diff) |
block: Make blk_queue_bounce() handle bios larger than BIO_MAX_PAGES
We'd like to eventually be able to handle bios with more than
BIO_MAX_PAGES segments; this shouldn't be too hard and it'll simplify
other code in the kernel.
The issue is code that clones the bio and must clone the biovec (i.e.
it can't use bio_clone_fast()) won't be able to allocate a bio with more
than BIO_MAX_PAGES - bio_alloc_bioset() always fails in that case.
Fortunately, it's easy to make blk_queue_bounce() just process part of
the bio if necessary, using bi_remaining to count the splits and punting
the rest back to generic_make_request().
Signed-off-by: Kent Overstreet <kmo@daterainc.com>
-rw-r--r-- | mm/bounce.c | 60 |
1 files changed, 52 insertions, 8 deletions
diff --git a/mm/bounce.c b/mm/bounce.c index 523918b8c6dc..dafe17093c39 100644 --- a/mm/bounce.c +++ b/mm/bounce.c @@ -193,6 +193,43 @@ static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio) } #endif /* CONFIG_NEED_BOUNCE_POOL */ +static struct bio *bio_clone_segments(struct bio *bio_src, gfp_t gfp_mask, + struct bio_set *bs, unsigned nsegs) +{ + struct bvec_iter iter; + struct bio_vec bv; + struct bio *bio; + + bio = bio_alloc_bioset(gfp_mask, nsegs, bs); + if (!bio) + return NULL; + + bio->bi_bdev = bio_src->bi_bdev; + bio->bi_rw = bio_src->bi_rw; + bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; + + bio_for_each_segment(bv, bio_src, iter) { + bio->bi_io_vec[bio->bi_vcnt++] = bv; + bio->bi_iter.bi_size += bv.bv_len; + if (!--nsegs) + break; + } + + if (bio_integrity(bio_src)) { + int ret; + + ret = bio_integrity_clone(bio, bio_src, gfp_mask); + if (ret < 0) { + bio_put(bio); + return NULL; + } + } + + bio_src->bi_iter = iter; + + return bio; +} + static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, mempool_t *pool, int force) { @@ -200,17 +237,24 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, int rw = bio_data_dir(*bio_orig); struct bio_vec *to, from; struct bvec_iter iter; - unsigned i; + int i, nsegs = 0, bounce = force; - if (force) - goto bounce; - bio_for_each_segment(from, *bio_orig, iter) + bio_for_each_segment(from, *bio_orig, iter) { + nsegs++; if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q)) - goto bounce; + bounce = 1; + } + + if (!bounce) + return; - return; -bounce: - bio = bio_clone_bioset(*bio_orig, GFP_NOIO, fs_bio_set); + bio = bio_clone_segments(*bio_orig, GFP_NOIO, fs_bio_set, + min(nsegs, BIO_MAX_PAGES)); + + if ((*bio_orig)->bi_iter.bi_size) { + atomic_inc(&(*bio_orig)->bi_remaining); + generic_make_request(*bio_orig); + } bio_for_each_segment_all(to, bio, i) { struct page *page = to->bv_page; |