Add block provisioning support for device-mapper targets.
dm-crypt, dm-snap and dm-linear will, by default, passthrough
REQ_OP_PROVISION requests to the underlying device, if
supported.
Signed-off-by: Sarthak Kukreti <sarthakkukreti@chromium.org>
---
drivers/md/dm-crypt.c | 4 +++-
drivers/md/dm-linear.c | 1 +
drivers/md/dm-snap.c | 7 +++++++
drivers/md/dm-table.c | 23 +++++++++++++++++++++++
drivers/md/dm.c | 6 ++++++
include/linux/device-mapper.h | 17 +++++++++++++++++
6 files changed, 57 insertions(+), 1 deletion(-)
@@ -3336,6 +3336,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
cc->tag_pool_max_sectors <<= cc->sector_shift;
}
+ ti->num_provision_bios = 1;
+
ret = -ENOMEM;
cc->io_queue = alloc_workqueue("kcryptd_io/%s", WQ_MEM_RECLAIM, 1, devname);
if (!cc->io_queue) {
@@ -3390,7 +3392,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
* - for REQ_OP_DISCARD caller must use flush if IO ordering matters
*/
if (unlikely(bio->bi_opf & REQ_PREFLUSH ||
- bio_op(bio) == REQ_OP_DISCARD)) {
+ bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_PROVISION)) {
bio_set_dev(bio, cc->dev->bdev);
if (bio_sectors(bio))
bio->bi_iter.bi_sector = cc->start +
@@ -62,6 +62,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->num_discard_bios = 1;
ti->num_secure_erase_bios = 1;
ti->num_write_zeroes_bios = 1;
+ ti->num_provision_bios = 1;
ti->private = lc;
return 0;
@@ -1358,6 +1358,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (s->discard_zeroes_cow)
ti->num_discard_bios = (s->discard_passdown_origin ? 2 : 1);
ti->per_io_data_size = sizeof(struct dm_snap_tracked_chunk);
+ ti->num_provision_bios = 1;
/* Add snapshot to the list of snapshots for this origin */
/* Exceptions aren't triggered till snapshot_resume() is called */
@@ -2003,6 +2004,11 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
/* If the block is already remapped - use that, else remap it */
e = dm_lookup_exception(&s->complete, chunk);
if (e) {
+ if (unlikely(bio_op(bio) == REQ_OP_PROVISION)) {
+ bio_endio(bio);
+ r = DM_MAPIO_SUBMITTED;
+ goto out_unlock;
+ }
remap_exception(s, e, bio, chunk);
if (unlikely(bio_op(bio) == REQ_OP_DISCARD) &&
io_overlaps_chunk(s, bio)) {
@@ -2413,6 +2419,7 @@ static void snapshot_io_hints(struct dm_target *ti, struct queue_limits *limits)
/* All discards are split on chunk_size boundary */
limits->discard_granularity = snap->store->chunk_size;
limits->max_discard_sectors = snap->store->chunk_size;
+ limits->max_provision_sectors = snap->store->chunk_size;
up_read(&_origins_lock);
}
@@ -1845,6 +1845,26 @@ static bool dm_table_supports_write_zeroes(struct dm_table *t)
return true;
}
+static int device_provision_capable(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ return bdev_max_provision_sectors(dev->bdev);
+}
+
+static bool dm_table_supports_provision(struct dm_table *t)
+{
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(t, i);
+
+ if (ti->provision_supported ||
+ (ti->type->iterate_devices &&
+ ti->type->iterate_devices(ti, device_provision_capable, NULL)))
+ return true;
+ }
+
+ return false;
+}
+
static int device_not_nowait_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
@@ -1978,6 +1998,9 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
if (!dm_table_supports_write_zeroes(t))
q->limits.max_write_zeroes_sectors = 0;
+ if (!dm_table_supports_provision(t))
+ q->limits.max_provision_sectors = 0;
+
dm_table_verify_integrity(t);
/*
@@ -1609,6 +1609,7 @@ static bool is_abnormal_io(struct bio *bio)
case REQ_OP_DISCARD:
case REQ_OP_SECURE_ERASE:
case REQ_OP_WRITE_ZEROES:
+ case REQ_OP_PROVISION:
return true;
default:
break;
@@ -1641,6 +1642,11 @@ static blk_status_t __process_abnormal_io(struct clone_info *ci,
if (ti->max_write_zeroes_granularity)
max_granularity = limits->max_write_zeroes_sectors;
break;
+ case REQ_OP_PROVISION:
+ num_bios = ti->num_provision_bios;
+ if (ti->max_provision_granularity)
+ max_granularity = limits->max_provision_sectors;
+ break;
default:
break;
}
@@ -334,6 +334,12 @@ struct dm_target {
*/
unsigned int num_write_zeroes_bios;
+ /*
+ * The number of PROVISION bios that will be submitted to the target.
+ * The bio number can be accessed with dm_bio_get_target_bio_nr.
+ */
+ unsigned int num_provision_bios;
+
/*
* The minimum number of extra bytes allocated in each io for the
* target to use.
@@ -358,6 +364,11 @@ struct dm_target {
*/
bool discards_supported:1;
+ /* Set if this target needs to receive provision requests regardless of
+ * whether or not its underlying devices have support.
+ */
+ bool provision_supported:1;
+
/*
* Set if this target requires that discards be split on
* 'max_discard_sectors' boundaries.
@@ -376,6 +387,12 @@ struct dm_target {
*/
bool max_write_zeroes_granularity:1;
+ /*
+ * Set if this target requires that provisions be split on
+ * 'max_provision_sectors' boundaries.
+ */
+ bool max_provision_granularity:1;
+
/*
* Set if we need to limit the number of in-flight bios when swapping.
*/