[v17,03/12] block: add copy offload support
Commit Message
Introduce blkdev_copy_offload to perform copy offload.
Issue REQ_OP_COPY_SRC with source info along with taking a plug.
This flows till request layer and waits for dst bio to arrive.
Issue REQ_OP_COPY_DST with destination info and this bio reaches request
layer and merges with src request.
For any reason, if a request comes to the driver with only one of src/dst
bio, we fail the copy offload.
Larger copy will be divided, based on max_copy_sectors limit.
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Anuj Gupta <anuj20.g@samsung.com>
Signed-off-by: Nitesh Shetty <nj.shetty@samsung.com>
---
block/blk-lib.c | 204 +++++++++++++++++++++++++++++++++++++++++
include/linux/blkdev.h | 4 +
2 files changed, 208 insertions(+)
Comments
Hi Nitesh,
kernel test robot noticed the following build warnings:
url: https://github.com/intel-lab-lkp/linux/commits/Nitesh-Shetty/block-Introduce-queue-limits-and-sysfs-for-copy-offload-support/20231019-200658
base: 213f891525c222e8ed145ce1ce7ae1f47921cb9c
patch link: https://lore.kernel.org/r/20231019110147.31672-4-nj.shetty%40samsung.com
patch subject: [PATCH v17 03/12] block: add copy offload support
config: i386-randconfig-141-20231022 (https://download.01.org/0day-ci/archive/20231025/202310251059.GiTmwLYx-lkp@intel.com/config)
compiler: gcc-12 (Debian 12.2.0-14) 12.2.0
reproduce: (https://download.01.org/0day-ci/archive/20231025/202310251059.GiTmwLYx-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Reported-by: Dan Carpenter <dan.carpenter@linaro.org>
| Closes: https://lore.kernel.org/r/202310251059.GiTmwLYx-lkp@intel.com/
New smatch warnings:
block/blk-lib.c:248 blkdev_copy_offload() warn: use 'gfp' here instead of GFP_KERNEL?
Old smatch warnings:
block/blk-lib.c:264 blkdev_copy_offload() warn: use 'gfp' here instead of GFP_KERNEL?
vim +/gfp +248 block/blk-lib.c
391929a15e2c18 Nitesh Shetty 2023-10-19 228 ssize_t blkdev_copy_offload(struct block_device *bdev, loff_t pos_in,
391929a15e2c18 Nitesh Shetty 2023-10-19 229 loff_t pos_out, size_t len,
391929a15e2c18 Nitesh Shetty 2023-10-19 230 void (*endio)(void *, int, ssize_t),
391929a15e2c18 Nitesh Shetty 2023-10-19 231 void *private, gfp_t gfp)
391929a15e2c18 Nitesh Shetty 2023-10-19 232 {
391929a15e2c18 Nitesh Shetty 2023-10-19 233 struct blkdev_copy_io *cio;
391929a15e2c18 Nitesh Shetty 2023-10-19 234 struct blkdev_copy_offload_io *offload_io;
391929a15e2c18 Nitesh Shetty 2023-10-19 235 struct bio *src_bio, *dst_bio;
391929a15e2c18 Nitesh Shetty 2023-10-19 236 size_t rem, chunk;
391929a15e2c18 Nitesh Shetty 2023-10-19 237 size_t max_copy_bytes = bdev_max_copy_sectors(bdev) << SECTOR_SHIFT;
391929a15e2c18 Nitesh Shetty 2023-10-19 238 ssize_t ret;
391929a15e2c18 Nitesh Shetty 2023-10-19 239 struct blk_plug plug;
391929a15e2c18 Nitesh Shetty 2023-10-19 240
391929a15e2c18 Nitesh Shetty 2023-10-19 241 if (!max_copy_bytes)
391929a15e2c18 Nitesh Shetty 2023-10-19 242 return -EOPNOTSUPP;
391929a15e2c18 Nitesh Shetty 2023-10-19 243
391929a15e2c18 Nitesh Shetty 2023-10-19 244 ret = blkdev_copy_sanity_check(bdev, pos_in, bdev, pos_out, len);
391929a15e2c18 Nitesh Shetty 2023-10-19 245 if (ret)
391929a15e2c18 Nitesh Shetty 2023-10-19 246 return ret;
391929a15e2c18 Nitesh Shetty 2023-10-19 247
391929a15e2c18 Nitesh Shetty 2023-10-19 @248 cio = kzalloc(sizeof(*cio), GFP_KERNEL);
Should this be: cio = kzalloc(sizeof(*cio), gfp);? It's not totally
clear from the context honestly. (I haven't looked at the code outside
what is in this automated email).
391929a15e2c18 Nitesh Shetty 2023-10-19 249 if (!cio)
391929a15e2c18 Nitesh Shetty 2023-10-19 250 return -ENOMEM;
391929a15e2c18 Nitesh Shetty 2023-10-19 251 atomic_set(&cio->refcount, 1);
391929a15e2c18 Nitesh Shetty 2023-10-19 252 cio->waiter = current;
391929a15e2c18 Nitesh Shetty 2023-10-19 253 cio->endio = endio;
391929a15e2c18 Nitesh Shetty 2023-10-19 254 cio->private = private;
391929a15e2c18 Nitesh Shetty 2023-10-19 255
391929a15e2c18 Nitesh Shetty 2023-10-19 256 /*
391929a15e2c18 Nitesh Shetty 2023-10-19 257 * If there is a error, copied will be set to least successfully
391929a15e2c18 Nitesh Shetty 2023-10-19 258 * completed copied length
391929a15e2c18 Nitesh Shetty 2023-10-19 259 */
391929a15e2c18 Nitesh Shetty 2023-10-19 260 cio->copied = len;
391929a15e2c18 Nitesh Shetty 2023-10-19 261 for (rem = len; rem > 0; rem -= chunk) {
391929a15e2c18 Nitesh Shetty 2023-10-19 262 chunk = min(rem, max_copy_bytes);
391929a15e2c18 Nitesh Shetty 2023-10-19 263
391929a15e2c18 Nitesh Shetty 2023-10-19 264 offload_io = kzalloc(sizeof(*offload_io), GFP_KERNEL);
391929a15e2c18 Nitesh Shetty 2023-10-19 265 if (!offload_io)
391929a15e2c18 Nitesh Shetty 2023-10-19 266 goto err_free_cio;
391929a15e2c18 Nitesh Shetty 2023-10-19 267 offload_io->cio = cio;
391929a15e2c18 Nitesh Shetty 2023-10-19 268 /*
391929a15e2c18 Nitesh Shetty 2023-10-19 269 * For partial completion, we use offload_io->offset to truncate
391929a15e2c18 Nitesh Shetty 2023-10-19 270 * successful copy length
391929a15e2c18 Nitesh Shetty 2023-10-19 271 */
391929a15e2c18 Nitesh Shetty 2023-10-19 272 offload_io->offset = len - rem;
391929a15e2c18 Nitesh Shetty 2023-10-19 273
391929a15e2c18 Nitesh Shetty 2023-10-19 274 src_bio = bio_alloc(bdev, 0, REQ_OP_COPY_SRC, gfp);
391929a15e2c18 Nitesh Shetty 2023-10-19 275 if (!src_bio)
391929a15e2c18 Nitesh Shetty 2023-10-19 276 goto err_free_offload_io;
391929a15e2c18 Nitesh Shetty 2023-10-19 277 src_bio->bi_iter.bi_size = chunk;
391929a15e2c18 Nitesh Shetty 2023-10-19 278 src_bio->bi_iter.bi_sector = pos_in >> SECTOR_SHIFT;
391929a15e2c18 Nitesh Shetty 2023-10-19 279
391929a15e2c18 Nitesh Shetty 2023-10-19 280 blk_start_plug(&plug);
391929a15e2c18 Nitesh Shetty 2023-10-19 281 dst_bio = blk_next_bio(src_bio, bdev, 0, REQ_OP_COPY_DST, gfp);
391929a15e2c18 Nitesh Shetty 2023-10-19 282 if (!dst_bio)
391929a15e2c18 Nitesh Shetty 2023-10-19 283 goto err_free_src_bio;
391929a15e2c18 Nitesh Shetty 2023-10-19 284 dst_bio->bi_iter.bi_size = chunk;
391929a15e2c18 Nitesh Shetty 2023-10-19 285 dst_bio->bi_iter.bi_sector = pos_out >> SECTOR_SHIFT;
391929a15e2c18 Nitesh Shetty 2023-10-19 286 dst_bio->bi_end_io = blkdev_copy_offload_dst_endio;
391929a15e2c18 Nitesh Shetty 2023-10-19 287 dst_bio->bi_private = offload_io;
391929a15e2c18 Nitesh Shetty 2023-10-19 288
391929a15e2c18 Nitesh Shetty 2023-10-19 289 atomic_inc(&cio->refcount);
391929a15e2c18 Nitesh Shetty 2023-10-19 290 submit_bio(dst_bio);
391929a15e2c18 Nitesh Shetty 2023-10-19 291 blk_finish_plug(&plug);
391929a15e2c18 Nitesh Shetty 2023-10-19 292 pos_in += chunk;
391929a15e2c18 Nitesh Shetty 2023-10-19 293 pos_out += chunk;
391929a15e2c18 Nitesh Shetty 2023-10-19 294 }
391929a15e2c18 Nitesh Shetty 2023-10-19 295
391929a15e2c18 Nitesh Shetty 2023-10-19 296 if (atomic_dec_and_test(&cio->refcount))
391929a15e2c18 Nitesh Shetty 2023-10-19 297 blkdev_copy_endio(cio);
391929a15e2c18 Nitesh Shetty 2023-10-19 298 if (endio)
391929a15e2c18 Nitesh Shetty 2023-10-19 299 return -EIOCBQUEUED;
391929a15e2c18 Nitesh Shetty 2023-10-19 300
391929a15e2c18 Nitesh Shetty 2023-10-19 301 return blkdev_copy_wait_for_completion_io(cio);
391929a15e2c18 Nitesh Shetty 2023-10-19 302
391929a15e2c18 Nitesh Shetty 2023-10-19 303 err_free_src_bio:
391929a15e2c18 Nitesh Shetty 2023-10-19 304 bio_put(src_bio);
391929a15e2c18 Nitesh Shetty 2023-10-19 305 err_free_offload_io:
391929a15e2c18 Nitesh Shetty 2023-10-19 306 kfree(offload_io);
391929a15e2c18 Nitesh Shetty 2023-10-19 307 err_free_cio:
391929a15e2c18 Nitesh Shetty 2023-10-19 308 cio->copied = min_t(ssize_t, cio->copied, (len - rem));
391929a15e2c18 Nitesh Shetty 2023-10-19 309 cio->status = -ENOMEM;
391929a15e2c18 Nitesh Shetty 2023-10-19 310 if (rem == len) {
391929a15e2c18 Nitesh Shetty 2023-10-19 311 ret = cio->status;
391929a15e2c18 Nitesh Shetty 2023-10-19 312 kfree(cio);
391929a15e2c18 Nitesh Shetty 2023-10-19 313 return ret;
391929a15e2c18 Nitesh Shetty 2023-10-19 314 }
391929a15e2c18 Nitesh Shetty 2023-10-19 315 if (cio->endio)
391929a15e2c18 Nitesh Shetty 2023-10-19 316 return cio->status;
391929a15e2c18 Nitesh Shetty 2023-10-19 317
391929a15e2c18 Nitesh Shetty 2023-10-19 318 return blkdev_copy_wait_for_completion_io(cio);
391929a15e2c18 Nitesh Shetty 2023-10-19 319 }
@@ -10,6 +10,22 @@
#include "blk.h"
+/* Keeps track of all outstanding copy IO */
+struct blkdev_copy_io {
+ atomic_t refcount;
+ ssize_t copied;
+ int status;
+ struct task_struct *waiter;
+ void (*endio)(void *private, int status, ssize_t copied);
+ void *private;
+};
+
+/* Keeps track of single outstanding copy offload IO */
+struct blkdev_copy_offload_io {
+ struct blkdev_copy_io *cio;
+ loff_t offset;
+};
+
static sector_t bio_discard_limit(struct block_device *bdev, sector_t sector)
{
unsigned int discard_granularity = bdev_discard_granularity(bdev);
@@ -115,6 +131,194 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
}
EXPORT_SYMBOL(blkdev_issue_discard);
+static inline ssize_t blkdev_copy_sanity_check(struct block_device *bdev_in,
+ loff_t pos_in,
+ struct block_device *bdev_out,
+ loff_t pos_out, size_t len)
+{
+ unsigned int align = max(bdev_logical_block_size(bdev_out),
+ bdev_logical_block_size(bdev_in)) - 1;
+
+ if ((pos_in & align) || (pos_out & align) || (len & align) || !len ||
+ len >= BLK_COPY_MAX_BYTES)
+ return -EINVAL;
+
+ return 0;
+}
+
+static inline void blkdev_copy_endio(struct blkdev_copy_io *cio)
+{
+ if (cio->endio) {
+ cio->endio(cio->private, cio->status, cio->copied);
+ kfree(cio);
+ } else {
+ struct task_struct *waiter = cio->waiter;
+
+ WRITE_ONCE(cio->waiter, NULL);
+ blk_wake_io_task(waiter);
+ }
+}
+
+/*
+ * This must only be called once all bios have been issued so that the refcount
+ * can only decrease. This just waits for all bios to complete.
+ * Returns the length of bytes copied or error
+ */
+static ssize_t blkdev_copy_wait_for_completion_io(struct blkdev_copy_io *cio)
+{
+ ssize_t ret;
+
+ for (;;) {
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+ if (!READ_ONCE(cio->waiter))
+ break;
+ blk_io_schedule();
+ }
+ __set_current_state(TASK_RUNNING);
+ ret = cio->copied;
+ kfree(cio);
+
+ return ret;
+}
+
+static void blkdev_copy_offload_dst_endio(struct bio *bio)
+{
+ struct blkdev_copy_offload_io *offload_io = bio->bi_private;
+ struct blkdev_copy_io *cio = offload_io->cio;
+
+ if (bio->bi_status) {
+ cio->copied = min_t(ssize_t, offload_io->offset, cio->copied);
+ if (!cio->status)
+ cio->status = blk_status_to_errno(bio->bi_status);
+ }
+ bio_put(bio);
+ kfree(offload_io);
+
+ if (atomic_dec_and_test(&cio->refcount))
+ blkdev_copy_endio(cio);
+}
+
+/*
+ * @bdev: block device
+ * @pos_in: source offset
+ * @pos_out: destination offset
+ * @len: length in bytes to be copied
+ * @endio: endio function to be called on completion of copy operation,
+ * for synchronous operation this should be NULL
+ * @private: endio function will be called with this private data,
+ * for synchronous operation this should be NULL
+ * @gfp_mask: memory allocation flags (for bio_alloc)
+ *
+ * For synchronous operation returns the length of bytes copied or error
+ * For asynchronous operation returns -EIOCBQUEUED or error
+ *
+ * Description:
+ * Copy source offset to destination offset within block device, using
+ * device's native copy offload feature.
+ * We perform copy operation using 2 bio's.
+ * 1. We take a plug and send a REQ_OP_COPY_SRC bio along with source
+ * sector and length. Once this bio reaches request layer, we form a
+ * request and wait for dst bio to arrive.
+ * 2. We issue REQ_OP_COPY_DST bio along with destination sector, length.
+ * Once this bio reaches request layer and find a request with previously
+ * sent source info we merge the destination bio and return.
+ * 3. Release the plug and request is sent to driver
+ * This design works only for drivers with request queue.
+ */
+ssize_t blkdev_copy_offload(struct block_device *bdev, loff_t pos_in,
+ loff_t pos_out, size_t len,
+ void (*endio)(void *, int, ssize_t),
+ void *private, gfp_t gfp)
+{
+ struct blkdev_copy_io *cio;
+ struct blkdev_copy_offload_io *offload_io;
+ struct bio *src_bio, *dst_bio;
+ size_t rem, chunk;
+ size_t max_copy_bytes = bdev_max_copy_sectors(bdev) << SECTOR_SHIFT;
+ ssize_t ret;
+ struct blk_plug plug;
+
+ if (!max_copy_bytes)
+ return -EOPNOTSUPP;
+
+ ret = blkdev_copy_sanity_check(bdev, pos_in, bdev, pos_out, len);
+ if (ret)
+ return ret;
+
+ cio = kzalloc(sizeof(*cio), GFP_KERNEL);
+ if (!cio)
+ return -ENOMEM;
+ atomic_set(&cio->refcount, 1);
+ cio->waiter = current;
+ cio->endio = endio;
+ cio->private = private;
+
+ /*
+ * If there is a error, copied will be set to least successfully
+ * completed copied length
+ */
+ cio->copied = len;
+ for (rem = len; rem > 0; rem -= chunk) {
+ chunk = min(rem, max_copy_bytes);
+
+ offload_io = kzalloc(sizeof(*offload_io), GFP_KERNEL);
+ if (!offload_io)
+ goto err_free_cio;
+ offload_io->cio = cio;
+ /*
+ * For partial completion, we use offload_io->offset to truncate
+ * successful copy length
+ */
+ offload_io->offset = len - rem;
+
+ src_bio = bio_alloc(bdev, 0, REQ_OP_COPY_SRC, gfp);
+ if (!src_bio)
+ goto err_free_offload_io;
+ src_bio->bi_iter.bi_size = chunk;
+ src_bio->bi_iter.bi_sector = pos_in >> SECTOR_SHIFT;
+
+ blk_start_plug(&plug);
+ dst_bio = blk_next_bio(src_bio, bdev, 0, REQ_OP_COPY_DST, gfp);
+ if (!dst_bio)
+ goto err_free_src_bio;
+ dst_bio->bi_iter.bi_size = chunk;
+ dst_bio->bi_iter.bi_sector = pos_out >> SECTOR_SHIFT;
+ dst_bio->bi_end_io = blkdev_copy_offload_dst_endio;
+ dst_bio->bi_private = offload_io;
+
+ atomic_inc(&cio->refcount);
+ submit_bio(dst_bio);
+ blk_finish_plug(&plug);
+ pos_in += chunk;
+ pos_out += chunk;
+ }
+
+ if (atomic_dec_and_test(&cio->refcount))
+ blkdev_copy_endio(cio);
+ if (endio)
+ return -EIOCBQUEUED;
+
+ return blkdev_copy_wait_for_completion_io(cio);
+
+err_free_src_bio:
+ bio_put(src_bio);
+err_free_offload_io:
+ kfree(offload_io);
+err_free_cio:
+ cio->copied = min_t(ssize_t, cio->copied, (len - rem));
+ cio->status = -ENOMEM;
+ if (rem == len) {
+ ret = cio->status;
+ kfree(cio);
+ return ret;
+ }
+ if (cio->endio)
+ return cio->status;
+
+ return blkdev_copy_wait_for_completion_io(cio);
+}
+EXPORT_SYMBOL_GPL(blkdev_copy_offload);
+
static int __blkdev_issue_write_zeroes(struct block_device *bdev,
sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
struct bio **biop, unsigned flags)
@@ -1042,6 +1042,10 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, struct bio **biop);
int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp);
+ssize_t blkdev_copy_offload(struct block_device *bdev, loff_t pos_in,
+ loff_t pos_out, size_t len,
+ void (*endio)(void *, int, ssize_t),
+ void *private, gfp_t gfp_mask);
#define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */
#define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */