diff options
-rw-r--r-- | block/bfq-cgroup.c | 1 | ||||
-rw-r--r-- | block/bfq-iosched.c | 6 | ||||
-rw-r--r-- | block/blk-merge.c | 35 | ||||
-rw-r--r-- | block/blk-mq.c | 13 | ||||
-rw-r--r-- | block/blk-settings.c | 141 | ||||
-rw-r--r-- | block/blk-sysfs.c | 6 | ||||
-rw-r--r-- | block/genhd.c | 9 | ||||
-rw-r--r-- | drivers/block/loop.c | 30 | ||||
-rw-r--r-- | drivers/block/ublk_drv.c | 2 | ||||
-rw-r--r-- | drivers/md/raid0.c | 1 | ||||
-rw-r--r-- | drivers/md/raid1.c | 20 | ||||
-rw-r--r-- | drivers/md/raid10.c | 20 | ||||
-rw-r--r-- | include/linux/blkdev.h | 20 |
13 files changed, 227 insertions, 77 deletions
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index e831aedb4643..9fb9f3533150 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -736,6 +736,7 @@ static void bfq_sync_bfqq_move(struct bfq_data *bfqd, */ bfq_put_cooperator(sync_bfqq); bic_set_bfqq(bic, NULL, true, act_idx); + bfq_release_process_ref(bfqd, sync_bfqq); } } diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 0747d9d0e48c..28c2bb06e859 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -5434,8 +5434,6 @@ void bfq_put_cooperator(struct bfq_queue *bfqq) bfq_put_queue(__bfqq); __bfqq = next; } - - bfq_release_process_ref(bfqq->bfqd, bfqq); } static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) @@ -5448,6 +5446,8 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref); bfq_put_cooperator(bfqq); + + bfq_release_process_ref(bfqd, bfqq); } static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync, @@ -6734,6 +6734,8 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq) bic_set_bfqq(bic, NULL, true, bfqq->actuator_idx); bfq_put_cooperator(bfqq); + + bfq_release_process_ref(bfqq->bfqd, bfqq); return NULL; } diff --git a/block/blk-merge.c b/block/blk-merge.c index e0b28e9298c9..e01383c6e534 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -864,17 +864,10 @@ static struct request *attempt_merge(struct request_queue *q, if (req_op(req) != req_op(next)) return NULL; - if (rq_data_dir(req) != rq_data_dir(next)) + if (req->bio->bi_write_hint != next->bio->bi_write_hint) + return NULL; + if (req->bio->bi_ioprio != next->bio->bi_ioprio) return NULL; - - if (req->bio && next->bio) { - /* Don't merge requests with different write hints. */ - if (req->bio->bi_write_hint != next->bio->bi_write_hint) - return NULL; - if (req->bio->bi_ioprio != next->bio->bi_ioprio) - return NULL; - } - if (!blk_atomic_write_mergeable_rqs(req, next)) return NULL; @@ -986,30 +979,16 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio) if (req_op(rq) != bio_op(bio)) return false; - /* different data direction or already started, don't merge */ - if (bio_data_dir(bio) != rq_data_dir(rq)) - return false; - - /* don't merge across cgroup boundaries */ if (!blk_cgroup_mergeable(rq, bio)) return false; - - /* only merge integrity protected bio into ditto rq */ if (blk_integrity_merge_bio(rq->q, rq, bio) == false) return false; - - /* Only merge if the crypt contexts are compatible */ if (!bio_crypt_rq_ctx_compatible(rq, bio)) return false; - - if (rq->bio) { - /* Don't merge requests with different write hints. */ - if (rq->bio->bi_write_hint != bio->bi_write_hint) - return false; - if (rq->bio->bi_ioprio != bio->bi_ioprio) - return false; - } - + if (rq->bio->bi_write_hint != bio->bi_write_hint) + return false; + if (rq->bio->bi_ioprio != bio->bi_ioprio) + return false; if (blk_atomic_write_mergeable_rq_bio(rq, bio) == false) return false; diff --git a/block/blk-mq.c b/block/blk-mq.c index 270cfd9fc6b0..abdf44736e08 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -3273,19 +3273,21 @@ int blk_rq_prep_clone(struct request *rq, struct request *rq_src, int (*bio_ctr)(struct bio *, struct bio *, void *), void *data) { - struct bio *bio, *bio_src; + struct bio *bio_src; if (!bs) bs = &fs_bio_set; __rq_for_each_bio(bio_src, rq_src) { - bio = bio_alloc_clone(rq->q->disk->part0, bio_src, gfp_mask, - bs); + struct bio *bio = bio_alloc_clone(rq->q->disk->part0, bio_src, + gfp_mask, bs); if (!bio) goto free_and_out; - if (bio_ctr && bio_ctr(bio, bio_src, data)) + if (bio_ctr && bio_ctr(bio, bio_src, data)) { + bio_put(bio); goto free_and_out; + } if (rq->bio) { rq->biotail->bi_next = bio; @@ -3293,7 +3295,6 @@ int blk_rq_prep_clone(struct request *rq, struct request *rq_src, } else { rq->bio = rq->biotail = bio; } - bio = NULL; } /* Copy attributes of the original request to the clone request. */ @@ -3311,8 +3312,6 @@ int blk_rq_prep_clone(struct request *rq, struct request *rq_src, return 0; free_and_out: - if (bio) - bio_put(bio); blk_rq_unprep_clone(rq); return -ENOMEM; diff --git a/block/blk-settings.c b/block/blk-settings.c index f1d4dfdc37a7..8f09e33f41f6 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -178,9 +178,26 @@ static void blk_validate_atomic_write_limits(struct queue_limits *lim) if (!lim->atomic_write_hw_max) goto unsupported; + if (WARN_ON_ONCE(!is_power_of_2(lim->atomic_write_hw_unit_min))) + goto unsupported; + + if (WARN_ON_ONCE(!is_power_of_2(lim->atomic_write_hw_unit_max))) + goto unsupported; + + if (WARN_ON_ONCE(lim->atomic_write_hw_unit_min > + lim->atomic_write_hw_unit_max)) + goto unsupported; + + if (WARN_ON_ONCE(lim->atomic_write_hw_unit_max > + lim->atomic_write_hw_max)) + goto unsupported; + boundary_sectors = lim->atomic_write_hw_boundary >> SECTOR_SHIFT; if (boundary_sectors) { + if (WARN_ON_ONCE(lim->atomic_write_hw_max > + lim->atomic_write_hw_boundary)) + goto unsupported; /* * A feature of boundary support is that it disallows bios to * be merged which would result in a merged request which @@ -249,6 +266,13 @@ int blk_validate_limits(struct queue_limits *lim) lim->io_min = lim->physical_block_size; /* + * The optimal I/O size may not be aligned to physical block size + * (because it may be limited by dma engines which have no clue about + * block size of the disks attached to them), so we round it down here. + */ + lim->io_opt = round_down(lim->io_opt, lim->physical_block_size); + + /* * max_hw_sectors has a somewhat weird default for historical reason, * but driver really should set their own instead of relying on this * value. @@ -458,8 +482,6 @@ static unsigned int queue_limit_discard_alignment( /* Why are these in bytes, not sectors? */ alignment = lim->discard_alignment >> SECTOR_SHIFT; granularity = lim->discard_granularity >> SECTOR_SHIFT; - if (!granularity) - return 0; /* Offset of the partition start in 'granularity' sectors */ offset = sector_div(sector, granularity); @@ -479,6 +501,119 @@ static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lb return sectors; } +/* Check if second and later bottom devices are compliant */ +static bool blk_stack_atomic_writes_tail(struct queue_limits *t, + struct queue_limits *b) +{ + /* We're not going to support different boundary sizes.. yet */ + if (t->atomic_write_hw_boundary != b->atomic_write_hw_boundary) + return false; + + /* Can't support this */ + if (t->atomic_write_hw_unit_min > b->atomic_write_hw_unit_max) + return false; + + /* Or this */ + if (t->atomic_write_hw_unit_max < b->atomic_write_hw_unit_min) + return false; + + t->atomic_write_hw_max = min(t->atomic_write_hw_max, + b->atomic_write_hw_max); + t->atomic_write_hw_unit_min = max(t->atomic_write_hw_unit_min, + b->atomic_write_hw_unit_min); + t->atomic_write_hw_unit_max = min(t->atomic_write_hw_unit_max, + b->atomic_write_hw_unit_max); + return true; +} + +/* Check for valid boundary of first bottom device */ +static bool blk_stack_atomic_writes_boundary_head(struct queue_limits *t, + struct queue_limits *b) +{ + /* + * Ensure atomic write boundary is aligned with chunk sectors. Stacked + * devices store chunk sectors in t->io_min. + */ + if (b->atomic_write_hw_boundary > t->io_min && + b->atomic_write_hw_boundary % t->io_min) + return false; + if (t->io_min > b->atomic_write_hw_boundary && + t->io_min % b->atomic_write_hw_boundary) + return false; + + t->atomic_write_hw_boundary = b->atomic_write_hw_boundary; + return true; +} + + +/* Check stacking of first bottom device */ +static bool blk_stack_atomic_writes_head(struct queue_limits *t, + struct queue_limits *b) +{ + if (b->atomic_write_hw_boundary && + !blk_stack_atomic_writes_boundary_head(t, b)) + return false; + + if (t->io_min <= SECTOR_SIZE) { + /* No chunk sectors, so use bottom device values directly */ + t->atomic_write_hw_unit_max = b->atomic_write_hw_unit_max; + t->atomic_write_hw_unit_min = b->atomic_write_hw_unit_min; + t->atomic_write_hw_max = b->atomic_write_hw_max; + return true; + } + + /* + * Find values for limits which work for chunk size. + * b->atomic_write_hw_unit_{min, max} may not be aligned with chunk + * size (t->io_min), as chunk size is not restricted to a power-of-2. + * So we need to find highest power-of-2 which works for the chunk + * size. + * As an example scenario, we could have b->unit_max = 16K and + * t->io_min = 24K. For this case, reduce t->unit_max to a value + * aligned with both limits, i.e. 8K in this example. + */ + t->atomic_write_hw_unit_max = b->atomic_write_hw_unit_max; + while (t->io_min % t->atomic_write_hw_unit_max) + t->atomic_write_hw_unit_max /= 2; + + t->atomic_write_hw_unit_min = min(b->atomic_write_hw_unit_min, + t->atomic_write_hw_unit_max); + t->atomic_write_hw_max = min(b->atomic_write_hw_max, t->io_min); + + return true; +} + +static void blk_stack_atomic_writes_limits(struct queue_limits *t, + struct queue_limits *b) +{ + if (!(t->features & BLK_FEAT_ATOMIC_WRITES_STACKED)) + goto unsupported; + + if (!b->atomic_write_unit_min) + goto unsupported; + + /* + * If atomic_write_hw_max is set, we have already stacked 1x bottom + * device, so check for compliance. + */ + if (t->atomic_write_hw_max) { + if (!blk_stack_atomic_writes_tail(t, b)) + goto unsupported; + return; + } + + if (!blk_stack_atomic_writes_head(t, b)) + goto unsupported; + return; + +unsupported: + t->atomic_write_hw_max = 0; + t->atomic_write_hw_unit_max = 0; + t->atomic_write_hw_unit_min = 0; + t->atomic_write_hw_boundary = 0; + t->features &= ~BLK_FEAT_ATOMIC_WRITES_STACKED; +} + /** * blk_stack_limits - adjust queue_limits for stacked devices * @t: the stacking driver limits (top device) @@ -639,6 +774,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, t->zone_write_granularity = 0; t->max_zone_append_sectors = 0; } + blk_stack_atomic_writes_limits(t, b); + return ret; } EXPORT_SYMBOL(blk_stack_limits); diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index d80a202cd170..4241aea84161 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -810,10 +810,8 @@ int blk_register_queue(struct gendisk *disk) * faster to shut down and is made fully functional here as * request_queues for non-existent devices never get registered. */ - if (!blk_queue_init_done(q)) { - blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q); - percpu_ref_switch_to_percpu(&q->q_usage_counter); - } + blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q); + percpu_ref_switch_to_percpu(&q->q_usage_counter); return ret; diff --git a/block/genhd.c b/block/genhd.c index 9130e163e191..79230c109fca 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -742,13 +742,10 @@ void del_gendisk(struct gendisk *disk) * If the disk does not own the queue, allow using passthrough requests * again. Else leave the queue frozen to fail all I/O. */ - if (!test_bit(GD_OWNS_QUEUE, &disk->state)) { - blk_queue_flag_clear(QUEUE_FLAG_INIT_DONE, q); + if (!test_bit(GD_OWNS_QUEUE, &disk->state)) __blk_mq_unfreeze_queue(q, true); - } else { - if (queue_is_mq(q)) - blk_mq_exit_queue(q); - } + else if (queue_is_mq(q)) + blk_mq_exit_queue(q); if (start_drain) blk_unfreeze_release_lock(q, true, queue_dying); diff --git a/drivers/block/loop.c b/drivers/block/loop.c index fe9bb4fb5f1b..8f6761c27c68 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -770,12 +770,11 @@ static void loop_sysfs_exit(struct loop_device *lo) &loop_attribute_group); } -static void loop_config_discard(struct loop_device *lo, - struct queue_limits *lim) +static void loop_get_discard_config(struct loop_device *lo, + u32 *granularity, u32 *max_discard_sectors) { struct file *file = lo->lo_backing_file; struct inode *inode = file->f_mapping->host; - u32 granularity = 0, max_discard_sectors = 0; struct kstatfs sbuf; /* @@ -788,24 +787,17 @@ static void loop_config_discard(struct loop_device *lo, if (S_ISBLK(inode->i_mode)) { struct block_device *bdev = I_BDEV(inode); - max_discard_sectors = bdev_write_zeroes_sectors(bdev); - granularity = bdev_discard_granularity(bdev); + *max_discard_sectors = bdev_write_zeroes_sectors(bdev); + *granularity = bdev_discard_granularity(bdev); /* * We use punch hole to reclaim the free space used by the * image a.k.a. discard. */ } else if (file->f_op->fallocate && !vfs_statfs(&file->f_path, &sbuf)) { - max_discard_sectors = UINT_MAX >> 9; - granularity = sbuf.f_bsize; + *max_discard_sectors = UINT_MAX >> 9; + *granularity = sbuf.f_bsize; } - - lim->max_hw_discard_sectors = max_discard_sectors; - lim->max_write_zeroes_sectors = max_discard_sectors; - if (max_discard_sectors) - lim->discard_granularity = granularity; - else - lim->discard_granularity = 0; } struct loop_worker { @@ -991,6 +983,7 @@ static int loop_reconfigure_limits(struct loop_device *lo, unsigned int bsize) struct inode *inode = file->f_mapping->host; struct block_device *backing_bdev = NULL; struct queue_limits lim; + u32 granularity = 0, max_discard_sectors = 0; if (S_ISBLK(inode->i_mode)) backing_bdev = I_BDEV(inode); @@ -1000,6 +993,8 @@ static int loop_reconfigure_limits(struct loop_device *lo, unsigned int bsize) if (!bsize) bsize = loop_default_blocksize(lo, backing_bdev); + loop_get_discard_config(lo, &granularity, &max_discard_sectors); + lim = queue_limits_start_update(lo->lo_queue); lim.logical_block_size = bsize; lim.physical_block_size = bsize; @@ -1009,7 +1004,12 @@ static int loop_reconfigure_limits(struct loop_device *lo, unsigned int bsize) lim.features |= BLK_FEAT_WRITE_CACHE; if (backing_bdev && !bdev_nonrot(backing_bdev)) lim.features |= BLK_FEAT_ROTATIONAL; - loop_config_discard(lo, &lim); + lim.max_hw_discard_sectors = max_discard_sectors; + lim.max_write_zeroes_sectors = max_discard_sectors; + if (max_discard_sectors) + lim.discard_granularity = granularity; + else + lim.discard_granularity = 0; return queue_limits_commit_update(lo->lo_queue, &lim); } diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index c6d18cd8af44..d4aed12dd436 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -3041,7 +3041,7 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd, ret = ublk_ctrl_end_recovery(ub, cmd); break; default: - ret = -ENOTSUPP; + ret = -EOPNOTSUPP; break; } diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index baaf5f8b80ae..7049ec7fb8eb 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -384,6 +384,7 @@ static int raid0_set_limits(struct mddev *mddev) lim.max_write_zeroes_sectors = mddev->chunk_sectors; lim.io_min = mddev->chunk_sectors << 9; lim.io_opt = lim.io_min * mddev->raid_disks; + lim.features |= BLK_FEAT_ATOMIC_WRITES_STACKED; err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY); if (err) { queue_limits_cancel_update(mddev->gendisk->queue); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index a5adf08ee174..519c56f0ee3d 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1571,7 +1571,21 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, continue; } if (is_bad) { - int good_sectors = first_bad - r1_bio->sector; + int good_sectors; + + /* + * We cannot atomically write this, so just + * error in that case. It could be possible to + * atomically write other mirrors, but the + * complexity of supporting that is not worth + * the benefit. + */ + if (bio->bi_opf & REQ_ATOMIC) { + error = -EIO; + goto err_handle; + } + + good_sectors = first_bad - r1_bio->sector; if (good_sectors < max_sectors) max_sectors = good_sectors; } @@ -1657,7 +1671,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, mbio->bi_iter.bi_sector = (r1_bio->sector + rdev->data_offset); mbio->bi_end_io = raid1_end_write_request; - mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA)); + mbio->bi_opf = bio_op(bio) | + (bio->bi_opf & (REQ_SYNC | REQ_FUA | REQ_ATOMIC)); if (test_bit(FailFast, &rdev->flags) && !test_bit(WriteMostly, &rdev->flags) && conf->raid_disks - mddev->degraded > 1) @@ -3224,6 +3239,7 @@ static int raid1_set_limits(struct mddev *mddev) md_init_stacking_limits(&lim); lim.max_write_zeroes_sectors = 0; + lim.features |= BLK_FEAT_ATOMIC_WRITES_STACKED; err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY); if (err) { queue_limits_cancel_update(mddev->gendisk->queue); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 8c7f5daa073a..f779d8225667 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1255,6 +1255,7 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio, const enum req_op op = bio_op(bio); const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC; const blk_opf_t do_fua = bio->bi_opf & REQ_FUA; + const blk_opf_t do_atomic = bio->bi_opf & REQ_ATOMIC; unsigned long flags; struct r10conf *conf = mddev->private; struct md_rdev *rdev; @@ -1273,7 +1274,7 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio, mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr + choose_data_offset(r10_bio, rdev)); mbio->bi_end_io = raid10_end_write_request; - mbio->bi_opf = op | do_sync | do_fua; + mbio->bi_opf = op | do_sync | do_fua | do_atomic; if (!replacement && test_bit(FailFast, &conf->mirrors[devnum].rdev->flags) && enough(conf, devnum)) @@ -1468,7 +1469,21 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio, continue; } if (is_bad) { - int good_sectors = first_bad - dev_sector; + int good_sectors; + + /* + * We cannot atomically write this, so just + * error in that case. It could be possible to + * atomically write other mirrors, but the + * complexity of supporting that is not worth + * the benefit. + */ + if (bio->bi_opf & REQ_ATOMIC) { + error = -EIO; + goto err_handle; + } + + good_sectors = first_bad - dev_sector; if (good_sectors < max_sectors) max_sectors = good_sectors; } @@ -4025,6 +4040,7 @@ static int raid10_set_queue_limits(struct mddev *mddev) lim.max_write_zeroes_sectors = 0; lim.io_min = mddev->chunk_sectors << 9; lim.io_opt = lim.io_min * raid10_nr_stripes(conf); + lim.features |= BLK_FEAT_ATOMIC_WRITES_STACKED; err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY); if (err) { queue_limits_cancel_update(mddev->gendisk->queue); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index a1fd0ddce5cf..08a727b40816 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -333,6 +333,10 @@ typedef unsigned int __bitwise blk_features_t; #define BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE \ ((__force blk_features_t)(1u << 15)) +/* stacked device can/does support atomic writes */ +#define BLK_FEAT_ATOMIC_WRITES_STACKED \ + ((__force blk_features_t)(1u << 16)) + /* * Flags automatically inherited when stacking limits. */ @@ -775,13 +779,13 @@ static inline void bdev_clear_flag(struct block_device *bdev, unsigned flag) atomic_andnot(flag, &bdev->__bd_flags); } -static inline int get_disk_ro(struct gendisk *disk) +static inline bool get_disk_ro(struct gendisk *disk) { return bdev_test_flag(disk->part0, BD_READ_ONLY) || test_bit(GD_READ_ONLY, &disk->state); } -static inline int bdev_read_only(struct block_device *bdev) +static inline bool bdev_read_only(struct block_device *bdev) { return bdev_test_flag(bdev, BD_READ_ONLY) || get_disk_ro(bdev->bd_disk); } @@ -1261,7 +1265,7 @@ static inline unsigned int queue_io_min(const struct request_queue *q) return q->limits.io_min; } -static inline int bdev_io_min(struct block_device *bdev) +static inline unsigned int bdev_io_min(struct block_device *bdev) { return queue_io_min(bdev_get_queue(bdev)); } @@ -1271,7 +1275,7 @@ static inline unsigned int queue_io_opt(const struct request_queue *q) return q->limits.io_opt; } -static inline int bdev_io_opt(struct block_device *bdev) +static inline unsigned int bdev_io_opt(struct block_device *bdev) { return queue_io_opt(bdev_get_queue(bdev)); } @@ -1417,7 +1421,7 @@ static inline bool bdev_zone_is_seq(struct block_device *bdev, sector_t sector) return is_seq; } -static inline int queue_dma_alignment(const struct request_queue *q) +static inline unsigned int queue_dma_alignment(const struct request_queue *q) { return q->limits.dma_alignment; } @@ -1458,12 +1462,13 @@ static inline bool bdev_iter_is_aligned(struct block_device *bdev, bdev_logical_block_size(bdev) - 1); } -static inline int blk_lim_dma_alignment_and_pad(struct queue_limits *lim) +static inline unsigned int +blk_lim_dma_alignment_and_pad(struct queue_limits *lim) { return lim->dma_alignment | lim->dma_pad_mask; } -static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, +static inline bool blk_rq_aligned(struct request_queue *q, unsigned long addr, unsigned int len) { unsigned int alignment = blk_lim_dma_alignment_and_pad(&q->limits); @@ -1581,7 +1586,6 @@ static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time) return bio_end_io_acct_remapped(bio, start_time, bio->bi_bdev); } -int bdev_read_only(struct block_device *bdev); int set_blocksize(struct file *file, int size); int lookup_bdev(const char *pathname, dev_t *dev); |