summaryrefslogtreecommitdiff
path: root/drivers/nvme/target
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-08-02 13:46:35 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-08-02 13:46:35 -0700
commitc013d0af81f60cc7dbe357c4e2a925fb6738dbfe (patch)
tree171dfdf928d0450a3fa98a58b2297d857804bb35 /drivers/nvme/target
parent42df1cbf6a4726934cc5dac12bf263aa73c49fa3 (diff)
parent8d9fdb6011b4d413271eba3a62e10f89efecc419 (diff)
downloadlinux-c013d0af81f60cc7dbe357c4e2a925fb6738dbfe.tar.gz
linux-c013d0af81f60cc7dbe357c4e2a925fb6738dbfe.tar.bz2
linux-c013d0af81f60cc7dbe357c4e2a925fb6738dbfe.zip
Merge tag 'for-5.20/block-2022-07-29' of git://git.kernel.dk/linux-block
Pull block updates from Jens Axboe: - Improve the type checking of request flags (Bart) - Ensure queue mapping for a single queues always picks the right queue (Bart) - Sanitize the io priority handling (Jan) - rq-qos race fix (Jinke) - Reserved tags handling improvements (John) - Separate memory alignment from file/disk offset aligment for O_DIRECT (Keith) - Add new ublk driver, userspace block driver using io_uring for communication with the userspace backend (Ming) - Use try_cmpxchg() to cleanup the code in various spots (Uros) - Finally remove bdevname() (Christoph) - Clean up the zoned device handling (Christoph) - Clean up independent access range support (Christoph) - Clean up and improve block sysfs handling (Christoph) - Clean up and improve teardown of block devices. This turns the usual two step process into something that is simpler to implement and handle in block drivers (Christoph) - Clean up chunk size handling (Christoph) - Misc cleanups and fixes (Bart, Bo, Dan, GuoYong, Jason, Keith, Liu, Ming, Sebastian, Yang, Ying) * tag 'for-5.20/block-2022-07-29' of git://git.kernel.dk/linux-block: (178 commits) ublk_drv: fix double shift bug ublk_drv: make sure that correct flags(features) returned to userspace ublk_drv: fix error handling of ublk_add_dev ublk_drv: fix lockdep warning block: remove __blk_get_queue block: call blk_mq_exit_queue from disk_release for never added disks blk-mq: fix error handling in __blk_mq_alloc_disk ublk: defer disk allocation ublk: rewrite ublk_ctrl_get_queue_affinity to not rely on hctx->cpumask ublk: fold __ublk_create_dev into ublk_ctrl_add_dev ublk: cleanup ublk_ctrl_uring_cmd ublk: simplify ublk_ch_open and ublk_ch_release ublk: remove the empty open and release block device operations ublk: remove UBLK_IO_F_PREFLUSH ublk: add a MAINTAINERS entry block: don't allow the same type rq_qos add more than once mmc: fix disk/queue leak in case of adding disk failure ublk_drv: fix an IS_ERR() vs NULL check ublk: remove UBLK_IO_F_INTEGRITY ublk_drv: remove unneeded semicolon ...
Diffstat (limited to 'drivers/nvme/target')
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c17
-rw-r--r--drivers/nvme/target/loop.c12
-rw-r--r--drivers/nvme/target/zns.c24
3 files changed, 27 insertions, 26 deletions
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 27a72504d31c..2dc1c1035626 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -246,7 +246,8 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
struct scatterlist *sg;
struct blk_plug plug;
sector_t sector;
- int op, i, rc;
+ blk_opf_t opf;
+ int i, rc;
struct sg_mapping_iter prot_miter;
unsigned int iter_flags;
unsigned int total_len = nvmet_rw_data_len(req) + req->metadata_len;
@@ -260,26 +261,26 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
}
if (req->cmd->rw.opcode == nvme_cmd_write) {
- op = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
+ opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
- op |= REQ_FUA;
+ opf |= REQ_FUA;
iter_flags = SG_MITER_TO_SG;
} else {
- op = REQ_OP_READ;
+ opf = REQ_OP_READ;
iter_flags = SG_MITER_FROM_SG;
}
if (is_pci_p2pdma_page(sg_page(req->sg)))
- op |= REQ_NOMERGE;
+ opf |= REQ_NOMERGE;
sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
if (nvmet_use_inline_bvec(req)) {
bio = &req->b.inline_bio;
bio_init(bio, req->ns->bdev, req->inline_bvec,
- ARRAY_SIZE(req->inline_bvec), op);
+ ARRAY_SIZE(req->inline_bvec), opf);
} else {
- bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt), op,
+ bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt), opf,
GFP_KERNEL);
}
bio->bi_iter.bi_sector = sector;
@@ -306,7 +307,7 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
}
bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt),
- op, GFP_KERNEL);
+ opf, GFP_KERNEL);
bio->bi_iter.bi_sector = sector;
bio_chain(bio, prev);
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 59024af2da2e..0f5c77e22a0a 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -266,8 +266,8 @@ static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags))
return;
nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
- blk_cleanup_queue(ctrl->ctrl.admin_q);
- blk_cleanup_queue(ctrl->ctrl.fabrics_q);
+ blk_mq_destroy_queue(ctrl->ctrl.admin_q);
+ blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
blk_mq_free_tag_set(&ctrl->admin_tag_set);
}
@@ -283,7 +283,7 @@ static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
mutex_unlock(&nvme_loop_ctrl_mutex);
if (nctrl->tagset) {
- blk_cleanup_queue(ctrl->ctrl.connect_q);
+ blk_mq_destroy_queue(ctrl->ctrl.connect_q);
blk_mq_free_tag_set(&ctrl->tag_set);
}
kfree(ctrl->queues);
@@ -410,9 +410,9 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
out_cleanup_queue:
clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
- blk_cleanup_queue(ctrl->ctrl.admin_q);
+ blk_mq_destroy_queue(ctrl->ctrl.admin_q);
out_cleanup_fabrics_q:
- blk_cleanup_queue(ctrl->ctrl.fabrics_q);
+ blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
out_free_tagset:
blk_mq_free_tag_set(&ctrl->admin_tag_set);
out_free_sq:
@@ -554,7 +554,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
return 0;
out_cleanup_connect_q:
- blk_cleanup_queue(ctrl->ctrl.connect_q);
+ blk_mq_destroy_queue(ctrl->ctrl.connect_q);
out_free_tagset:
blk_mq_free_tag_set(&ctrl->tag_set);
out_destroy_queues:
diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
index 82b61acf7a72..c7ef69f29fe4 100644
--- a/drivers/nvme/target/zns.c
+++ b/drivers/nvme/target/zns.c
@@ -57,10 +57,10 @@ bool nvmet_bdev_zns_enable(struct nvmet_ns *ns)
* zones, reject the device. Otherwise, use report zones to detect if
* the device has conventional zones.
*/
- if (ns->bdev->bd_disk->queue->conv_zones_bitmap)
+ if (ns->bdev->bd_disk->conv_zones_bitmap)
return false;
- ret = blkdev_report_zones(ns->bdev, 0, blkdev_nr_zones(bd_disk),
+ ret = blkdev_report_zones(ns->bdev, 0, bdev_nr_zones(ns->bdev),
validate_conv_zones_cb, NULL);
if (ret < 0)
return false;
@@ -241,7 +241,7 @@ static unsigned long nvmet_req_nr_zones_from_slba(struct nvmet_req *req)
{
unsigned int sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba);
- return blkdev_nr_zones(req->ns->bdev->bd_disk) -
+ return bdev_nr_zones(req->ns->bdev) -
(sect >> ilog2(bdev_zone_sectors(req->ns->bdev)));
}
@@ -308,7 +308,7 @@ void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req)
queue_work(zbd_wq, &req->z.zmgmt_work);
}
-static inline enum req_opf zsa_req_op(u8 zsa)
+static inline enum req_op zsa_req_op(u8 zsa)
{
switch (zsa) {
case NVME_ZONE_OPEN:
@@ -386,7 +386,7 @@ static int zmgmt_send_scan_cb(struct blk_zone *z, unsigned i, void *d)
static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req)
{
struct block_device *bdev = req->ns->bdev;
- unsigned int nr_zones = blkdev_nr_zones(bdev->bd_disk);
+ unsigned int nr_zones = bdev_nr_zones(bdev);
struct request_queue *q = bdev_get_queue(bdev);
struct bio *bio = NULL;
sector_t sector = 0;
@@ -413,8 +413,8 @@ static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req)
ret = 0;
}
- while (sector < get_capacity(bdev->bd_disk)) {
- if (test_bit(blk_queue_zone_no(q, sector), d.zbitmap)) {
+ while (sector < bdev_nr_sectors(bdev)) {
+ if (test_bit(disk_zone_no(bdev->bd_disk, sector), d.zbitmap)) {
bio = blk_next_bio(bio, bdev, 0,
zsa_req_op(req->cmd->zms.zsa) | REQ_SYNC,
GFP_KERNEL);
@@ -422,7 +422,7 @@ static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req)
/* This may take a while, so be nice to others */
cond_resched();
}
- sector += blk_queue_zone_sectors(q);
+ sector += bdev_zone_sectors(bdev);
}
if (bio) {
@@ -465,7 +465,7 @@ static void nvmet_bdev_zmgmt_send_work(struct work_struct *w)
{
struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work);
sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zms.slba);
- enum req_opf op = zsa_req_op(req->cmd->zms.zsa);
+ enum req_op op = zsa_req_op(req->cmd->zms.zsa);
struct block_device *bdev = req->ns->bdev;
sector_t zone_sectors = bdev_zone_sectors(bdev);
u16 status = NVME_SC_SUCCESS;
@@ -525,7 +525,7 @@ static void nvmet_bdev_zone_append_bio_done(struct bio *bio)
void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
{
sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
- const unsigned int op = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE;
+ const blk_opf_t opf = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE;
u16 status = NVME_SC_SUCCESS;
unsigned int total_len = 0;
struct scatterlist *sg;
@@ -556,9 +556,9 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
if (nvmet_use_inline_bvec(req)) {
bio = &req->z.inline_bio;
bio_init(bio, req->ns->bdev, req->inline_bvec,
- ARRAY_SIZE(req->inline_bvec), op);
+ ARRAY_SIZE(req->inline_bvec), opf);
} else {
- bio = bio_alloc(req->ns->bdev, req->sg_cnt, op, GFP_KERNEL);
+ bio = bio_alloc(req->ns->bdev, req->sg_cnt, opf, GFP_KERNEL);
}
bio->bi_end_io = nvmet_bdev_zone_append_bio_done;