diff options
author | Jens Axboe <axboe@kernel.dk> | 2021-08-27 16:32:01 -0600 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2021-08-27 16:32:01 -0600 |
commit | 461d971215dfb55bcd5f7d040b2b222592040f95 (patch) | |
tree | 2b1baab8e9462a35f47d16ca98cc937584e52f93 /drivers/md/raid1.c | |
parent | 7ee656c3ac3d047b4cf1269f83ac9d6c0bba916b (diff) | |
parent | 6607cd319b6b91bff94e90f798a61c031650b514 (diff) |
Merge branch 'md-next' of https://git.kernel.org/pub/scm/linux/kernel/git/song/md into for-5.15/drivers
Pull MD changes from Song.
* 'md-next' of https://git.kernel.org/pub/scm/linux/kernel/git/song/md:
raid1: ensure write behind bio has less than BIO_MAX_VECS sectors
md/raid10: Remove unnecessary rcu_dereference in raid10_handle_discard
Diffstat (limited to 'drivers/md/raid1.c')
-rw-r--r-- | drivers/md/raid1.c | 19 |
1 files changed, 19 insertions, 0 deletions
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 51f2547c2007..21b348408478 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1331,6 +1331,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, struct raid1_plug_cb *plug = NULL; int first_clone; int max_sectors; + bool write_behind = false; if (mddev_is_clustered(mddev) && md_cluster_ops->area_resyncing(mddev, WRITE, @@ -1383,6 +1384,15 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, max_sectors = r1_bio->sectors; for (i = 0; i < disks; i++) { struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); + + /* + * The write-behind io is only attempted on drives marked as + * write-mostly, which means we could allocate write behind + * bio later. + */ + if (rdev && test_bit(WriteMostly, &rdev->flags)) + write_behind = true; + if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { atomic_inc(&rdev->nr_pending); blocked_rdev = rdev; @@ -1456,6 +1466,15 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, goto retry_write; } + /* + * When using a bitmap, we may call alloc_behind_master_bio below. + * alloc_behind_master_bio allocates a copy of the data payload a page + * at a time and thus needs a new bio that can fit the whole payload + * this bio in page sized chunks. + */ + if (write_behind && bitmap) + max_sectors = min_t(int, max_sectors, + BIO_MAX_VECS * (PAGE_SIZE >> 9)); if (max_sectors < bio_sectors(bio)) { struct bio *split = bio_split(bio, max_sectors, GFP_NOIO, &conf->bio_split); |