diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-02-21 10:57:33 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-02-21 10:57:33 -0800 |
commit | 772c8f6f3bbd3ceb94a89373473083e3e1113554 (patch) | |
tree | d2b34e8f1841a169d59adf53074de217a9e0f977 /drivers | |
parent | fd4a61e08aa79f2b7835b25c6f94f27bd2d65990 (diff) | |
parent | 818551e2b2c662a1b26de6b4f7d6b8411a838d18 (diff) |
Merge tag 'for-4.11/linus-merge-signed' of git://git.kernel.dk/linux-block
Pull block layer updates from Jens Axboe:
- blk-mq scheduling framework from me and Omar, with a port of the
deadline scheduler for this framework. A port of BFQ from Paolo is in
the works, and should be ready for 4.12.
- Various fixups and improvements to the above scheduling framework
from Omar, Paolo, Bart, me, others.
- Cleanup of the exported sysfs blk-mq data into debugfs, from Omar.
This allows us to export more information that helps debug hangs or
performance issues, without cluttering or abusing the sysfs API.
- Fixes for the sbitmap code, the scalable bitmap code that was
migrated from blk-mq, from Omar.
- Removal of the BLOCK_PC support in struct request, and refactoring of
carrying SCSI payloads in the block layer. This cleans up the code
nicely, and enables us to kill the SCSI specific parts of struct
request, shrinking it down nicely. From Christoph mainly, with help
from Hannes.
- Support for ranged discard requests and discard merging, also from
Christoph.
- Support for OPAL in the block layer, and for NVMe as well. Mainly
from Scott Bauer, with fixes/updates from various others folks.
- Error code fixup for gdrom from Christophe.
- cciss pci irq allocation cleanup from Christoph.
- Making the cdrom device operations read only, from Kees Cook.
- Fixes for duplicate bdi registrations and bdi/queue life time
problems from Jan and Dan.
- Set of fixes and updates for lightnvm, from Matias and Javier.
- A few fixes for nbd from Josef, using idr to name devices and a
workqueue deadlock fix on receive. Also marks Josef as the current
maintainer of nbd.
- Fix from Josef, overwriting queue settings when the number of
hardware queues is updated for a blk-mq device.
- NVMe fix from Keith, ensuring that we don't repeatedly mark and IO
aborted, if we didn't end up aborting it.
- SG gap merging fix from Ming Lei for block.
- Loop fix also from Ming, fixing a race and crash between setting loop
status and IO.
- Two block race fixes from Tahsin, fixing request list iteration and
fixing a race between device registration and udev device add
notifiations.
- Double free fix from cgroup writeback, from Tejun.
- Another double free fix in blkcg, from Hou Tao.
- Partition overflow fix for EFI from Alden Tondettar.
* tag 'for-4.11/linus-merge-signed' of git://git.kernel.dk/linux-block: (156 commits)
nvme: Check for Security send/recv support before issuing commands.
block/sed-opal: allocate struct opal_dev dynamically
block/sed-opal: tone down not supported warnings
block: don't defer flushes on blk-mq + scheduling
blk-mq-sched: ask scheduler for work, if we failed dispatching leftovers
blk-mq: don't special case flush inserts for blk-mq-sched
blk-mq-sched: don't add flushes to the head of requeue queue
blk-mq: have blk_mq_dispatch_rq_list() return if we queued IO or not
block: do not allow updates through sysfs until registration completes
lightnvm: set default lun range when no luns are specified
lightnvm: fix off-by-one error on target initialization
Maintainers: Modify SED list from nvme to block
Move stack parameters for sed_ioctl to prevent oversized stack with CONFIG_KASAN
uapi: sed-opal fix IOW for activate lsp to use correct struct
cdrom: Make device operations read-only
elevator: fix loading wrong elevator type for blk-mq devices
cciss: switch to pci_irq_alloc_vectors
block/loop: fix race between I/O and set_status
blk-mq-sched: don't hold queue_lock when calling exit_icq
block: set make_request_fn manually in blk_mq_update_nr_hw_queues
...
Diffstat (limited to 'drivers')
121 files changed, 2513 insertions, 4208 deletions
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 1f863e757ee4..c771d4c341ea 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -1265,13 +1265,13 @@ static void ata_scsi_sdev_config(struct scsi_device *sdev) */ static int atapi_drain_needed(struct request *rq) { - if (likely(rq->cmd_type != REQ_TYPE_BLOCK_PC)) + if (likely(!blk_rq_is_passthrough(rq))) return 0; if (!blk_rq_bytes(rq) || op_is_write(req_op(rq))) return 0; - return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC; + return atapi_cmd_type(scsi_req(rq)->cmd[0]) == ATAPI_MISC; } static int ata_scsi_dev_config(struct scsi_device *sdev, diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index 223ff2fcae7e..f744de7a0f9b 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -69,6 +69,7 @@ config AMIGA_Z2RAM config GDROM tristate "SEGA Dreamcast GD-ROM drive" depends on SH_DREAMCAST + select BLK_SCSI_REQUEST # only for the generic cdrom code help A standard SEGA Dreamcast comes with a modified CD ROM drive called a "GD-ROM" by SEGA to signify it is capable of reading special disks @@ -114,6 +115,7 @@ config BLK_CPQ_CISS_DA tristate "Compaq Smart Array 5xxx support" depends on PCI select CHECK_SIGNATURE + select BLK_SCSI_REQUEST help This is the driver for Compaq Smart Array 5xxx controllers. Everyone using these boards should say Y here. @@ -386,6 +388,7 @@ config BLK_DEV_RAM_DAX config CDROM_PKTCDVD tristate "Packet writing on CD/DVD media (DEPRECATED)" depends on !UML + select BLK_SCSI_REQUEST help Note: This driver is deprecated and will be removed from the kernel in the near future! @@ -501,6 +504,16 @@ config VIRTIO_BLK This is the virtual block driver for virtio. It can be used with lguest or QEMU based VMMs (like KVM or Xen). Say Y or M. +config VIRTIO_BLK_SCSI + bool "SCSI passthrough request for the Virtio block driver" + depends on VIRTIO_BLK + select BLK_SCSI_REQUEST + ---help--- + Enable support for SCSI passthrough (e.g. the SG_IO ioctl) on + virtio-blk devices. This is only supported for the legacy + virtio protocol and not enabled by default by any hypervisor. + Your probably want to virtio-scsi instead. + config BLK_DEV_HD bool "Very old hard disk (MFM/RLL/IDE) driver" depends on HAVE_IDE diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c index ec9d8610b25f..027b876370bc 100644 --- a/drivers/block/aoe/aoeblk.c +++ b/drivers/block/aoe/aoeblk.c @@ -396,8 +396,8 @@ aoeblk_gdalloc(void *vp) WARN_ON(d->gd); WARN_ON(d->flags & DEVFL_UP); blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS); - q->backing_dev_info.name = "aoe"; - q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_SIZE; + q->backing_dev_info->name = "aoe"; + q->backing_dev_info->ra_pages = READ_AHEAD / PAGE_SIZE; d->bufpool = mp; d->blkq = gd->queue = q; q->queuedata = d; diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index e5c5b8eb14a9..27d613795653 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -52,6 +52,7 @@ #include <scsi/scsi.h> #include <scsi/sg.h> #include <scsi/scsi_ioctl.h> +#include <scsi/scsi_request.h> #include <linux/cdrom.h> #include <linux/scatterlist.h> #include <linux/kthread.h> @@ -1853,8 +1854,8 @@ static void cciss_softirq_done(struct request *rq) dev_dbg(&h->pdev->dev, "Done with %p\n", rq); /* set the residual count for pc requests */ - if (rq->cmd_type == REQ_TYPE_BLOCK_PC) - rq->resid_len = c->err_info->ResidualCnt; + if (blk_rq_is_passthrough(rq)) + scsi_req(rq)->resid_len = c->err_info->ResidualCnt; blk_end_request_all(rq, (rq->errors == 0) ? 0 : -EIO); @@ -1941,9 +1942,16 @@ static void cciss_get_serial_no(ctlr_info_t *h, int logvol, static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk, int drv_index) { - disk->queue = blk_init_queue(do_cciss_request, &h->lock); + disk->queue = blk_alloc_queue(GFP_KERNEL); if (!disk->queue) goto init_queue_failure; + + disk->queue->cmd_size = sizeof(struct scsi_request); + disk->queue->request_fn = do_cciss_request; + disk->queue->queue_lock = &h->lock; + if (blk_init_allocated_queue(disk->queue) < 0) + goto cleanup_queue; + sprintf(disk->disk_name, "cciss/c%dd%d", h->ctlr, drv_index); disk->major = h->major; disk->first_minor = drv_index << NWD_SHIFT; @@ -3075,7 +3083,7 @@ static inline int evaluate_target_status(ctlr_info_t *h, driver_byte = DRIVER_OK; msg_byte = cmd->err_info->CommandStatus; /* correct? seems too device specific */ - if (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) + if (blk_rq_is_passthrough(cmd->rq)) host_byte = DID_PASSTHROUGH; else host_byte = DID_OK; @@ -3084,7 +3092,7 @@ static inline int evaluate_target_status(ctlr_info_t *h, host_byte, driver_byte); if (cmd->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) { - if (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC) + if (!blk_rq_is_passthrough(cmd->rq)) dev_warn(&h->pdev->dev, "cmd %p " "has SCSI Status 0x%x\n", cmd, cmd->err_info->ScsiStatus); @@ -3095,31 +3103,23 @@ static inline int evaluate_target_status(ctlr_info_t *h, sense_key = 0xf & cmd->err_info->SenseInfo[2]; /* no status or recovered error */ if (((sense_key == 0x0) || (sense_key == 0x1)) && - (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC)) + !blk_rq_is_passthrough(cmd->rq)) error_value = 0; if (check_for_unit_attention(h, cmd)) { - *retry_cmd = !(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC); + *retry_cmd = !blk_rq_is_passthrough(cmd->rq); return 0; } /* Not SG_IO or similar? */ - if (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC) { + if (!blk_rq_is_passthrough(cmd->rq)) { if (error_value != 0) dev_warn(&h->pdev->dev, "cmd %p has CHECK CONDITION" " sense key = 0x%x\n", cmd, sense_key); return error_value; } - /* SG_IO or similar, copy sense data back */ - if (cmd->rq->sense) { - if (cmd->rq->sense_len > cmd->err_info->SenseLen) - cmd->rq->sense_len = cmd->err_info->SenseLen; - memcpy(cmd->rq->sense, cmd->err_info->SenseInfo, - cmd->rq->sense_len); - } else - cmd->rq->sense_len = 0; - + scsi_req(cmd->rq)->sense_len = cmd->err_info->SenseLen; return error_value; } @@ -3146,15 +3146,14 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd, rq->errors = evaluate_target_status(h, cmd, &retry_cmd); break; case CMD_DATA_UNDERRUN: - if (cmd->rq->cmd_type == REQ_TYPE_FS) { + if (!blk_rq_is_passthrough(cmd->rq)) { dev_warn(&h->pdev->dev, "cmd %p has" " completed with data underrun " "reported\n", cmd); - cmd->rq->resid_len = cmd->err_info->ResidualCnt; } break; case CMD_DATA_OVERRUN: - if (cmd->rq->cmd_type == REQ_TYPE_FS) + if (!blk_rq_is_passthrough(cmd->rq)) dev_warn(&h->pdev->dev, "cciss: cmd %p has" " completed with data overrun " "reported\n", cmd); @@ -3164,7 +3163,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd, "reported invalid\n", cmd); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, - (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? + blk_rq_is_passthrough(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); break; case CMD_PROTOCOL_ERR: @@ -3172,7 +3171,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd, "protocol error\n", cmd); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, - (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? + blk_rq_is_passthrough(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); break; case CMD_HARDWARE_ERR: @@ -3180,7 +3179,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd, " hardware error\n", cmd); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, - (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? + blk_rq_is_passthrough(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); break; case CMD_CONNECTION_LOST: @@ -3188,7 +3187,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd, "connection lost\n", cmd); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, - (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? + blk_rq_is_passthrough(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); break; case CMD_ABORTED: @@ -3196,7 +3195,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd, "aborted\n", cmd); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, - (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? + blk_rq_is_passthrough(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT); break; case CMD_ABORT_FAILED: @@ -3204,7 +3203,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd, "abort failed\n", cmd); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, - (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? + blk_rq_is_passthrough(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); break; case CMD_UNSOLICITED_ABORT: @@ -3219,21 +3218,21 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd, "%p retried too many times\n", cmd); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, - (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? + blk_rq_is_passthrough(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT); break; case CMD_TIMEOUT: dev_warn(&h->pdev->dev, "cmd %p timedout\n", cmd); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, - (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? + blk_rq_is_passthrough(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); break; case CMD_UNABORTABLE: dev_warn(&h->pdev->dev, "cmd %p unabortable\n", cmd); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, - cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC ? + blk_rq_is_passthrough(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); break; default: @@ -3242,7 +3241,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd, cmd->err_info->CommandStatus); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, - (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? + blk_rq_is_passthrough(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); } @@ -3395,7 +3394,9 @@ static void do_cciss_request(struct request_queue *q) c->Header.SGList = h->max_cmd_sgentries; set_performant_mode(h, c); - if (likely(creq->cmd_type == REQ_TYPE_FS)) { + switch (req_op(creq)) { + case REQ_OP_READ: + case REQ_OP_WRITE: if(h->cciss_read == CCISS_READ_10) { c->Request.CDB[1] = 0; c->Request.CDB[2] = (start_blk >> 24) & 0xff; /* MSB */ @@ -3425,12 +3426,16 @@ static void do_cciss_request(struct request_queue *q) c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff; c->Request.CDB[14] = c->Request.CDB[15] = 0; } - } else if (creq->cmd_type == REQ_TYPE_BLOCK_PC) { - c->Request.CDBLen = creq->cmd_len; - memcpy(c->Request.CDB, creq->cmd, BLK_MAX_CDB); - } else { + break; + case REQ_OP_SCSI_IN: + case REQ_OP_SCSI_OUT: + c->Request.CDBLen = scsi_req(creq)->cmd_len; + memcpy(c->Request.CDB, scsi_req(creq)->cmd, BLK_MAX_CDB); + scsi_req(creq)->sense = c->err_info->SenseInfo; + break; + default: dev_warn(&h->pdev->dev, "bad request type %d\n", - creq->cmd_type); + creq->cmd_flags); BUG(); } @@ -4074,41 +4079,27 @@ clean_up: static void cciss_interrupt_mode(ctlr_info_t *h) { -#ifdef CONFIG_PCI_MSI - int err; - struct msix_entry cciss_msix_entries[4] = { {0, 0}, {0, 1}, - {0, 2}, {0, 3} - }; + int ret; /* Some boards advertise MSI but don't really support it */ if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) || (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11)) goto default_int_mode; - if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { - err = pci_enable_msix_exact(h->pdev, cciss_msix_entries, 4); - if (!err) { - h->intr[0] = cciss_msix_entries[0].vector; - h->intr[1] = cciss_msix_entries[1].vector; - h->intr[2] = cciss_msix_entries[2].vector; - h->intr[3] = cciss_msix_entries[3].vector; - h->msix_vector = 1; - return; - } else { - dev_warn(&h->pdev->dev, - "MSI-X init failed %d\n", err); - } - } - if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) { - if (!pci_enable_msi(h->pdev)) - h->msi_vector = 1; - else - dev_warn(&h->pdev->dev, "MSI init failed\n"); + ret = pci_alloc_irq_vectors(h->pdev, 4, 4, PCI_IRQ_MSIX); + if (ret >= 0) { + h->intr[0] = pci_irq_vector(h->pdev, 0); + h->intr[1] = pci_irq_vector(h->pdev, 1); + h->intr[2] = pci_irq_vector(h->pdev, 2); + h->intr[3] = pci_irq_vector(h->pdev, 3); + return; } + + ret = pci_alloc_irq_vectors(h->pdev, 1, 1, PCI_IRQ_MSI); + default_int_mode: -#endif /* CONFIG_PCI_MSI */ /* if we get here we're going to use the default interrupt mode */ - h->intr[h->intr_mode] = h->pdev->irq; + h->intr[h->intr_mode] = pci_irq_vector(h->pdev, 0); return; } @@ -4888,7 +4879,7 @@ static int cciss_request_irq(ctlr_info_t *h, irqreturn_t (*msixhandler)(int, void *), irqreturn_t (*intxhandler)(int, void *)) { - if (h->msix_vector || h->msi_vector) { + if (h->pdev->msi_enabled || h->pdev->msix_enabled) { if (!request_irq(h->intr[h->intr_mode], msixhandler, 0, h->devname, h)) return 0; @@ -4934,12 +4925,7 @@ static void cciss_undo_allocations_after_kdump_soft_reset(ctlr_info_t *h) int ctlr = h->ctlr; free_irq(h->intr[h->intr_mode], h); -#ifdef CONFIG_PCI_MSI - if (h->msix_vector) - pci_disable_msix(h->pdev); - else if (h->msi_vector) - pci_disable_msi(h->pdev); -#endif /* CONFIG_PCI_MSI */ + pci_free_irq_vectors(h->pdev); cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds); cciss_free_scatterlists(h); cciss_free_cmd_pool(h); @@ -5295,12 +5281,7 @@ static void cciss_remove_one(struct pci_dev *pdev) cciss_shutdown(pdev); -#ifdef CONFIG_PCI_MSI - if (h->msix_vector) - pci_disable_msix(h->pdev); - else if (h->msi_vector) - pci_disable_msi(h->pdev); -#endif /* CONFIG_PCI_MSI */ + pci_free_irq_vectors(h->pdev); iounmap(h->transtable); iounmap(h->cfgtable); diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h index 7fda30e4a241..4affa94ca17b 100644 --- a/drivers/block/cciss.h +++ b/drivers/block/cciss.h @@ -90,8 +90,6 @@ struct ctlr_info # define SIMPLE_MODE_INT 2 # define MEMQ_MODE_INT 3 unsigned int intr[4]; - unsigned int msix_vector; - unsigned int msi_vector; int intr_mode; int cciss_max_sectors; BYTE cciss_read; @@ -333,7 +331,7 @@ static unsigned long SA5_performant_completed(ctlr_info_t *h) */ register_value = readl(h->vaddr + SA5_OUTDB_STATUS); /* msi auto clears the interrupt pending bit. */ - if (!(h->msi_vector || h->msix_vector)) { + if (!(h->pdev->msi_enabled || h->pdev->msix_enabled)) { writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR); /* Do a read in order to flush the write to the controller * (as per spec.) @@ -393,7 +391,7 @@ static bool SA5_performant_intr_pending(ctlr_info_t *h) if (!register_value) return false; - if (h->msi_vector || h->msix_vector) + if (h->pdev->msi_enabled || h->pdev->msix_enabled) return true; /* Read outbound doorbell to flush */ diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index c3ff60c30dde..615e5b5178a0 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -2462,7 +2462,7 @@ static int drbd_congested(void *congested_data, int bdi_bits) if (get_ldev(device)) { q = bdev_get_queue(device->ldev->backing_bdev); - r = bdi_congested(&q->backing_dev_info, bdi_bits); + r = bdi_congested(q->backing_dev_info, bdi_bits); put_ldev(device); if (r) reason = 'b'; @@ -2834,8 +2834,8 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig /* we have no partitions. we contain only ourselves. */ device->this_bdev->bd_contains = device->this_bdev; - q->backing_dev_info.congested_fn = drbd_congested; - q->backing_dev_info.congested_data = device; + q->backing_dev_info->congested_fn = drbd_congested; + q->backing_dev_info->congested_data = device; blk_queue_make_request(q, drbd_make_request); blk_queue_write_cache(q, true, true); diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index f35db29cac76..908c704e20aa 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -1328,11 +1328,13 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi if (b) { blk_queue_stack_limits(q, b); - if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) { + if (q->backing_dev_info->ra_pages != + b->backing_dev_info->ra_pages) { drbd_info(device, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n", - q->backing_dev_info.ra_pages, - b->backing_dev_info.ra_pages); - q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages; + q->backing_dev_info->ra_pages, + b->backing_dev_info->ra_pages); + q->backing_dev_info->ra_pages = + b->backing_dev_info->ra_pages; } } fixup_discard_if_not_supported(q); @@ -3345,7 +3347,7 @@ static void device_to_statistics(struct device_statistics *s, s->dev_disk_flags = md->flags; q = bdev_get_queue(device->ldev->backing_bdev); s->dev_lower_blocked = - bdi_congested(&q->backing_dev_info, + bdi_congested(q->backing_dev_info, (1 << WB_async_congested) | (1 << WB_sync_congested)); put_ldev(device); diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c index be2b93fd2c11..8378142f7a55 100644 --- a/drivers/block/drbd/drbd_proc.c +++ b/drivers/block/drbd/drbd_proc.c @@ -288,7 +288,7 @@ static int drbd_seq_show(struct seq_file *seq, void *v) seq_printf(seq, "%2d: cs:Unconfigured\n", i); } else { /* reset device->congestion_reason */ - bdi_rw_congested(&device->rq_queue->backing_dev_info); + bdi_rw_congested(device->rq_queue->backing_dev_info); nc = rcu_dereference(first_peer_device(device)->connection->net_conf); wp = nc ? nc->wire_protocol - DRBD_PROT_A + 'A' : ' '; diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index b489ac2e9c44..652114ae1a8a 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -927,7 +927,7 @@ static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t se switch (rbm) { case RB_CONGESTED_REMOTE: - bdi = &device->ldev->backing_bdev->bd_disk->queue->backing_dev_info; + bdi = device->ldev->backing_bdev->bd_disk->queue->backing_dev_info; return bdi_read_congested(bdi); case RB_LEAST_PENDING: return atomic_read(&device->local_cnt) > diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index a391a3cfb3fe..45b4384f650c 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -2900,8 +2900,8 @@ static void do_fd_request(struct request_queue *q) return; if (WARN(atomic_read(&usage_count) == 0, - "warning: usage count=0, current_req=%p sect=%ld type=%x flags=%llx\n", - current_req, (long)blk_rq_pos(current_req), current_req->cmd_type, + "warning: usage count=0, current_req=%p sect=%ld flags=%llx\n", + current_req, (long)blk_rq_pos(current_req), (unsigned long long) current_req->cmd_flags)) return; @@ -3119,7 +3119,7 @@ static int raw_cmd_copyin(int cmd, void __user *param, *rcmd = NULL; loop: - ptr = kmalloc(sizeof(struct floppy_raw_cmd), GFP_USER); + ptr = kmalloc(sizeof(struct floppy_raw_cmd), GFP_KERNEL); if (!ptr) return -ENOMEM; *rcmd = ptr; diff --git a/drivers/block/hd.c b/drivers/block/hd.c index a9b48ed7a3cd..6043648da1e8 100644 --- a/drivers/block/hd.c +++ b/drivers/block/hd.c @@ -626,30 +626,29 @@ repeat: req_data_dir(req) == READ ? "read" : "writ", cyl, head, sec, nsect, bio_data(req->bio)); #endif - if (req->cmd_type == REQ_TYPE_FS) { - switch (rq_data_dir(req)) { - case READ: - hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_READ, - &read_intr); - if (reset) - goto repeat; - break; - case WRITE: - hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_WRITE, - &write_intr); - if (reset) - goto repeat; - if (wait_DRQ()) { - bad_rw_intr(); - goto repeat; - } - outsw(HD_DATA, bio_data(req->bio), 256); - break; - default: - printk("unknown hd-command\n"); - hd_end_request_cur(-EIO); - break; + + switch (req_op(req)) { + case REQ_OP_READ: + hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_READ, + &read_intr); + if (reset) + goto repeat; + break; + case REQ_OP_WRITE: + hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_WRITE, + &write_intr); + if (reset) + goto repeat; + if (wait_DRQ()) { + bad_rw_intr(); + goto repeat; } + outsw(HD_DATA, bio_data(req->bio), 256); + break; + default: + printk("unknown hd-command\n"); + hd_end_request_cur(-EIO); + break; } } diff --git a/drivers/block/loop.c b/drivers/block/loop.c index f347285c67ec..304377182c1a 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1097,9 +1097,12 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) return -EINVAL; + /* I/O need to be drained during transfer transition */ + blk_mq_freeze_queue(lo->lo_queue); + err = loop_release_xfer(lo); if (err) - return err; + goto exit; if (info->lo_encrypt_type) { unsigned int type = info->lo_encrypt_type; @@ -1114,12 +1117,14 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) err = loop_init_xfer(lo, xfer, info); if (err) - return err; + goto exit; if (lo->lo_offset != info->lo_offset || lo->lo_sizelimit != info->lo_sizelimit) - if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) - return -EFBIG; + if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) { + err = -EFBIG; + goto exit; + } loop_config_discard(lo); @@ -1156,7 +1161,9 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) /* update dio if lo_offset or transfer is changed */ __loop_update_dio(lo, lo->use_dio); - return 0; + exit: + blk_mq_unfreeze_queue(lo->lo_queue); + return err; } static int diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c index e937fcf71769..286f276f586e 100644 --- a/drivers/block/mg_disk.c +++ b/drivers/block/mg_disk.c @@ -670,15 +670,17 @@ static void mg_request_poll(struct request_queue *q) break; } - if (unlikely(host->req->cmd_type != REQ_TYPE_FS)) { - mg_end_request_cur(host, -EIO); - continue; - } - - if (rq_data_dir(host->req) == READ) + switch (req_op(host->req)) { + case REQ_OP_READ: mg_read(host->req); - else + break; + case REQ_OP_WRITE: mg_write(host->req); + break; + default: + mg_end_request_cur(host, -EIO); + break; + } } } @@ -687,13 +689,15 @@ static unsigned int mg_issue_req(struct request *req, unsigned int sect_num, unsigned int sect_cnt) { - if (rq_data_dir(req) == READ) { + switch (req_op(host->req)) { + case REQ_OP_READ: if (mg_out(host, sect_num, sect_cnt, MG_CMD_RD, &mg_read_intr) != MG_ERR_NONE) { mg_bad_rw_intr(host); return host->error; } - } else { + break; + case REQ_OP_WRITE: /* TODO : handler */ outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL); if (mg_out(host, sect_num, sect_cnt, MG_CMD_WR, &mg_write_intr) @@ -712,6 +716,10 @@ static unsigned int mg_issue_req(struct request *req, mod_timer(&host->timer, jiffies + 3 * HZ); outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); + break; + default: + mg_end_request_cur(host, -EIO); + break; } return MG_ERR_NONE; } @@ -753,11 +761,6 @@ static void mg_request(struct request_queue *q) continue; } - if (unlikely(req->cmd_type != REQ_TYPE_FS)) { - mg_end_request_cur(host, -EIO); - continue; - } - if (!mg_issue_req(req, host, sect_num, sect_cnt)) return; } diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 9fd06eeb1a17..0be84a3cb6d7 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -41,6 +41,9 @@ #include <linux/nbd.h> +static DEFINE_IDR(nbd_index_idr); +static DEFINE_MUTEX(nbd_index_mutex); + struct nbd_sock { struct socket *sock; struct mutex tx_lock; @@ -89,8 +92,9 @@ static struct dentry *nbd_dbg_dir; #define NBD_MAGIC 0x68797548 static unsigned int nbds_max = 16; -static struct nbd_device *nbd_dev; static int max_part; +static struct workqueue_struct *recv_workqueue; +static int part_shift; static inline struct device *nbd_to_dev(struct nbd_device *nbd) { @@ -193,13 +197,6 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, set_bit(NBD_TIMEDOUT, &nbd->runtime_flags); req->errors++; - /* - * If our disconnect packet times out then we're already holding the - * config_lock and could deadlock here, so just set an error and return, - * we'll handle shutting everything down later. - */ - if (req->cmd_type == REQ_TYPE_DRV_PRIV) - return BLK_EH_HANDLED; mutex_lock(&nbd->config_lock); sock_shutdown(nbd); mutex_unlock(&nbd->config_lock); @@ -278,14 +275,29 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) u32 type; u32 tag = blk_mq_unique_tag(req); - if (req_op(req) == REQ_OP_DISCARD) + switch (req_op(req)) { + case REQ_OP_DISCARD: type = NBD_CMD_TRIM; - else if (req_op(req) == REQ_OP_FLUSH) + break; + case REQ_OP_FLUSH: type = NBD_CMD_FLUSH; - else if (rq_data_dir(req) == WRITE) + break; + case REQ_OP_WRITE: type = NBD_CMD_WRITE; - else + break; + case REQ_OP_READ: type = NBD_CMD_READ; + break; + default: + return -EIO; + } + + if (rq_data_dir(req) == WRITE && + (nbd->flags & NBD_FLAG_READ_ONLY)) { + dev_err_ratelimited(disk_to_dev(nbd->disk), + "Write on read-only\n"); + return -EIO; + } memset(&request, 0, sizeof(request)); request.magic = htonl(NBD_REQUEST_MAGIC); @@ -510,18 +522,6 @@ static void nbd_handle_cmd(struct nbd_cmd *cmd, int index) goto error_out; } - if (req->cmd_type != REQ_TYPE_FS && - req->cmd_type != REQ_TYPE_DRV_PRIV) - goto error_out; - - if (req->cmd_type == REQ_TYPE_FS && - rq_data_dir(req) == WRITE && - (nbd->flags & NBD_FLAG_READ_ONLY)) { - dev_err_ratelimited(disk_to_dev(nbd->disk), - "Write on read-only\n"); - goto error_out; - } - req->errors = 0; nsock = nbd->socks[index]; @@ -785,7 +785,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, INIT_WORK(&args[i].work, recv_work); args[i].nbd = nbd; args[i].index = i; - queue_work(system_long_wq, &args[i].work); + queue_work(recv_workqueue, &args[i].work); } wait_event_interruptible(nbd->recv_wq, atomic_read(&nbd->recv_threads) == 0); @@ -996,6 +996,103 @@ static struct blk_mq_ops nbd_mq_ops = { .timeout = nbd_xmit_timeout, }; +static void nbd_dev_remove(struct nbd_device *nbd) +{ + struct gendisk *disk = nbd->disk; + nbd->magic = 0; + if (disk) { + del_gendisk(disk); + blk_cleanup_queue(disk->queue); + blk_mq_free_tag_set(&nbd->tag_set); + put_disk(disk); + } + kfree(nbd); +} + +static int nbd_dev_add(int index) +{ + struct nbd_device *nbd; + struct gendisk *disk; + struct request_queue *q; + int err = -ENOMEM; + + nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL); + if (!nbd) + goto out; + + disk = alloc_disk(1 << part_shift); + if (!disk) + goto out_free_nbd; + + if (index >= 0) { + err = idr_alloc(&nbd_index_idr, nbd, index, index + 1, + GFP_KERNEL); + if (err == -ENOSPC) + err = -EEXIST; + } else { + err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL); + if (err >= 0) + index = err; + } + if (err < 0) + goto out_free_disk; + + nbd->disk = disk; + nbd->tag_set.ops = &nbd_mq_ops; + nbd->tag_set.nr_hw_queues = 1; + nbd->tag_set.queue_depth = 128; + nbd->tag_set.numa_node = NUMA_NO_NODE; + nbd->tag_set.cmd_size = sizeof(struct nbd_cmd); + nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | + BLK_MQ_F_SG_MERGE | BLK_MQ_F_BLOCKING; + nbd->tag_set.driver_data = nbd; + + err = blk_mq_alloc_tag_set(&nbd->tag_set); + if (err) + goto out_free_idr; + + q = blk_mq_init_queue(&nbd->tag_set); + if (IS_ERR(q)) { + err = PTR_ERR(q); + goto out_free_tags; + } + disk->queue = q; + + /* + * Tell the block layer that we are not a rotational device + */ + queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue); + queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue); + disk->queue->limits.discard_granularity = 512; + blk_queue_max_discard_sectors(disk->queue, UINT_MAX); + disk->queue->limits.discard_zeroes_data = 0; + blk_queue_max_hw_sectors(disk->queue, 65536); + disk->queue->limits.max_sectors = 256; + + nbd->magic = NBD_MAGIC; + mutex_init(&nbd->config_lock); + disk->major = NBD_MAJOR; + disk->first_minor = index << part_shift; + disk->fops = &nbd_fops; + disk->private_data = nbd; + sprintf(disk->disk_name, "nbd%d", index); + init_waitqueue_head(&nbd->recv_wq); + nbd_reset(nbd); + add_disk(disk); + return index; + +out_free_tags: + blk_mq_free_tag_set(&nbd->tag_set); +out_free_idr: + idr_remove(&nbd_index_idr, index); +out_free_disk: + put_disk(disk); +out_free_nbd: + kfree(nbd); +out: + return err; +} + /* * And here should be modules and kernel interface * (Just smiley confuses emacs :-) @@ -1003,9 +1100,7 @@ static struct blk_mq_ops nbd_mq_ops = { static int __init nbd_init(void) { - int err = -ENOMEM; int i; - int part_shift; BUILD_BUG_ON(sizeof(struct nbd_request) != 28); @@ -1034,111 +1129,38 @@ static int __init nbd_init(void) if (nbds_max > 1UL << (MINORBITS - part_shift)) return -EINVAL; - - nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL); - if (!nbd_dev) + recv_workqueue = alloc_workqueue("knbd-recv", + WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); + if (!recv_workqueue) return -ENOMEM; - for (i = 0; i < nbds_max; i++) { - struct request_queue *q; - struct gendisk *disk = alloc_disk(1 << part_shift); - if (!disk) - goto out; - nbd_dev[i].disk = disk; - - nbd_dev[i].tag_set.ops = &nbd_mq_ops; - nbd_dev[i].tag_set.nr_hw_queues = 1; - nbd_dev[i].tag_set.queue_depth = 128; - nbd_dev[i].tag_set.numa_node = NUMA_NO_NODE; - nbd_dev[i].tag_set.cmd_size = sizeof(struct nbd_cmd); - nbd_dev[i].tag_set.flags = BLK_MQ_F_SHOULD_MERGE | - BLK_MQ_F_SG_MERGE | BLK_MQ_F_BLOCKING; - nbd_dev[i].tag_set.driver_data = &nbd_dev[i]; - - err = blk_mq_alloc_tag_set(&nbd_dev[i].tag_set); - if (err) { - put_disk(disk); - goto out; - } - - /* - * The new linux 2.5 block layer implementation requires - * every gendisk to have its very own request_queue struct. - * These structs are big so we dynamically allocate them. - */ - q = blk_mq_init_queue(&nbd_dev[i].tag_set); - if (IS_ERR(q)) { - blk_mq_free_tag_set(&nbd_dev[i].tag_set); - put_disk(disk); - goto out; - } - disk->queue = q; - - /* - * Tell the block layer that we are not a rotational device - */ - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue); - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue); - disk->queue->limits.discard_granularity = 512; - blk_queue_max_discard_sectors(disk->queue, UINT_MAX); - disk->queue->limits.discard_zeroes_data = 0; - blk_queue_max_hw_sectors(disk->queue, 65536); - disk->queue->limits.max_sectors = 256; - } - - if (register_blkdev(NBD_MAJOR, "nbd")) { - err = -EIO; - goto out; - } - - printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR); + if (register_blkdev(NBD_MAJOR, "nbd")) + return -EIO; nbd_dbg_init(); - for (i = 0; i < nbds_max; i++) { - struct gendisk *disk = nbd_dev[i].disk; - nbd_dev[i].magic = NBD_MAGIC; - mutex_init(&nbd_dev[i].config_lock); - disk->major = NBD_MAJOR; - disk->first_minor = i << part_shift; - disk->fops = &nbd_fops; - disk->private_data = &nbd_dev[i]; - sprintf(disk->disk_name, "nbd%d", i); - init_waitqueue_head(&nbd_dev[i].recv_wq); - nbd_reset(&nbd_dev[i]); - add_disk(disk); - } + mutex_lock(&nbd_index_mutex); + for (i = 0; i < nbds_max; i++) + nbd_dev_add(i); + mutex_unlock(&nbd_index_mutex); + return 0; +} +static int nbd_exit_cb(int id, void *ptr, void *data) +{ + struct nbd_device *nbd = ptr; + nbd_dev_remove(nbd); return 0; -out: - while (i--) { - blk_mq_free_tag_set(&nbd_dev[i].tag_set); - blk_cleanup_queue(nbd_dev[i].disk->queue); - put_disk(nbd_dev[i].disk); - } - kfree(nbd_dev); - return err; } static void __exit nbd_cleanup(void) { - int i; - nbd_dbg_close(); - for (i = 0; i < nbds_max; i++) { - struct gendisk *disk = nbd_dev[i].disk; - nbd_dev[i].magic = 0; - if (disk) { - del_gendisk(disk); - blk_cleanup_queue(disk->queue); - blk_mq_free_tag_set(&nbd_dev[i].tag_set); - put_disk(disk); - } - } + idr_for_each(&nbd_index_idr, &nbd_exit_cb, NULL); + idr_destroy(&nbd_index_idr); + destroy_workqueue(recv_workqueue); unregister_blkdev(NBD_MAJOR, "nbd"); - kfree(nbd_dev); - printk(KERN_INFO "nbd: unregistered device at major %d\n", NBD_MAJOR); } module_init(nbd_init); diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index c0e14e54909b..6f2e565bccc5 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -420,7 +420,8 @@ static void null_lnvm_end_io(struct request *rq, int error) { struct nvm_rq *rqd = rq->end_io_data; - nvm_end_io(rqd, error); + rqd->error = error; + nvm_end_io(rqd); blk_put_request(rq); } @@ -431,11 +432,11 @@ static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) struct request *rq; struct bio *bio = rqd->bio; - rq = blk_mq_alloc_request(q, bio_data_dir(bio), 0); + rq = blk_mq_alloc_request(q, + op_is_write(bio_op(bio)) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); if (IS_ERR(rq)) return -ENOMEM; - rq->cmd_type = REQ_TYPE_DRV_PRIV; rq->__sector = bio->bi_iter.bi_sector; rq->ioprio = bio_prio(bio); @@ -460,7 +461,6 @@ static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id) id->ver_id = 0x1; id->vmnt = 0; - id->cgrps = 1; id->cap = 0x2; id->dom = 0x1; @@ -479,7 +479,7 @@ static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id) sector_div(size, bs); /* convert size to pages */ size >>= 8; /* concert size to pgs pr blk */ - grp = &id->groups[0]; + grp = &id->grp; grp->mtype = 0; grp->fmtype = 0; grp->num_ch = 1; diff --git a/drivers/block/osdblk.c b/drivers/block/osdblk.c index 92900f5f0b47..8127b8201a01 100644 --- a/drivers/block/osdblk.c +++ b/drivers/block/osdblk.c @@ -308,12 +308,6 @@ static void osdblk_rq_fn(struct request_queue *q) if (!rq) break; - /* filter out block requests we don't understand */ - if (rq->cmd_type != REQ_TYPE_FS) { - blk_end_request_all(rq, 0); - continue; - } - /* deduce our operation (read, write, flush) */ /* I wish the block layer simplified cmd_type/cmd_flags/cmd[] * into a clearly defined set of RPC commands: diff --git a/drivers/block/paride/Kconfig b/drivers/block/paride/Kconfig index efefb5ac3004..3a15247942e4 100644 --- a/drivers/block/paride/Kconfig +++ b/drivers/block/paride/Kconfig @@ -25,6 +25,7 @@ config PARIDE_PD config PARIDE_PCD tristate "Parallel port ATAPI CD-ROMs" depends on PARIDE + select BLK_SCSI_REQUEST # only for the generic cdrom code ---help--- This option enables the high-level driver for ATAPI CD-ROM devices connected through a parallel port. If you chose to build PARIDE diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c index 5fd2d0e25567..10aed84244f5 100644 --- a/drivers/block/paride/pcd.c +++ b/drivers/block/paride/pcd.c @@ -273,7 +273,7 @@ static const struct block_device_operations pcd_bdops = { .check_events = pcd_block_check_events, }; -static struct cdrom_device_ops pcd_dops = { +static const struct cdrom_device_ops pcd_dops = { .open = pcd_open, .release = pcd_release, .drive_status = pcd_drive_status, diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c index c3ed2fc72daa..644ba0888bd4 100644 --- a/drivers/block/paride/pd.c +++ b/drivers/block/paride/pd.c @@ -439,18 +439,16 @@ static int pd_retries = 0; /* i/o error retry count */ static int pd_block; /* address of next requested block */ static int pd_count; /* number of blocks still to do */ static int pd_run; /* sectors in current cluster */ -static int pd_cmd; /* current command READ/WRITE */ static char *pd_buf; /* buffer for request in progress */ static enum action do_pd_io_start(void) { - if (pd_req->cmd_type == REQ_TYPE_DRV_PRIV) { + switch (req_op(pd_req)) { + case REQ_OP_DRV_IN: phase = pd_special; return pd_special(); - } - - pd_cmd = rq_data_dir(pd_req); - if (pd_cmd == READ || pd_cmd == WRITE) { + case REQ_OP_READ: + case REQ_OP_WRITE: pd_block = blk_rq_pos(pd_req); pd_count = blk_rq_cur_sectors(pd_req); if (pd_block + pd_count > get_capacity(pd_req->rq_disk)) @@ -458,7 +456,7 @@ static enum action do_pd_io_start(void) pd_run = blk_rq_sectors(pd_req); pd_buf = bio_data(pd_req->bio); pd_retries = 0; - if (pd_cmd == READ) + if (req_op(pd_req) == REQ_OP_READ) return do_pd_read_start(); else return do_pd_write_start(); @@ -723,11 +721,10 @@ static int pd_special_command(struct pd_unit *disk, struct request *rq; int err = 0; - rq = blk_get_request(disk->gd->queue, READ, __GFP_RECLAIM); + rq = blk_get_request(disk->gd->queue, REQ_OP_DRV_IN, __GFP_RECLAIM); if (IS_ERR(rq)) return PTR_ERR(rq); - rq->cmd_type = REQ_TYPE_DRV_PRIV; rq->special = func; err = blk_execute_rq(disk->gd->queue, disk->gd, rq, 0); diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 1b94c1ca5c5f..66d846ba85a9 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -704,10 +704,10 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command * int ret = 0; rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ? - WRITE : READ, __GFP_RECLAIM); + REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, __GFP_RECLAIM); if (IS_ERR(rq)) return PTR_ERR(rq); - blk_rq_set_block_pc(rq); + scsi_req_init(rq); if (cgc->buflen) { ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen, @@ -716,8 +716,8 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command * goto out; } - rq->cmd_len = COMMAND_SIZE(cgc->cmd[0]); - memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE); + scsi_req(rq)->cmd_len = COMMAND_SIZE(cgc->cmd[0]); + memcpy(scsi_req(rq)->cmd, cgc->cmd, CDROM_PACKET_SIZE); rq->timeout = 60*HZ; if (cgc->quiet) @@ -1243,7 +1243,7 @@ try_next_bio: && pd->bio_queue_size <= pd->write_congestion_off); spin_unlock(&pd->lock); if (wakeup) { - clear_bdi_congested(&pd->disk->queue->backing_dev_info, + clear_bdi_congested(pd->disk->queue->backing_dev_info, BLK_RW_ASYNC); } @@ -2370,7 +2370,7 @@ static void pkt_make_request_write(struct request_queue *q, struct bio *bio) spin_lock(&pd->lock); if (pd->write_congestion_on > 0 && pd->bio_queue_size >= pd->write_congestion_on) { - set_bdi_congested(&q->backing_dev_info, BLK_RW_ASYNC); + set_bdi_congested(q->backing_dev_info, BLK_RW_ASYNC); do { spin_unlock(&pd->lock); congestion_wait(BLK_RW_ASYNC, HZ); diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c index 76f33c84ce3d..a809e3e9feb8 100644 --- a/drivers/block/ps3disk.c +++ b/drivers/block/ps3disk.c @@ -196,16 +196,19 @@ static void ps3disk_do_request(struct ps3_storage_device *dev, dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__); while ((req = blk_fetch_request(q))) { - if (req_op(req) == REQ_OP_FLUSH) { + switch (req_op(req)) { + case REQ_OP_FLUSH: if (ps3disk_submit_flush_request(dev, req)) - break; - } else if (req->cmd_type == REQ_TYPE_FS) { + return; + break; + case REQ_OP_READ: + case REQ_OP_WRITE: if (ps3disk_submit_request_sg(dev, req)) - break; - } else { + return; + break; + default: blk_dump_rq_flags(req, DEVICE_NAME " bad request"); __blk_end_request_all(req, -EIO); - continue; } } } diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 436baa66f701..362cecc77130 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -4099,19 +4099,21 @@ static void rbd_queue_workfn(struct work_struct *work) bool must_be_locked; int result; - if (rq->cmd_type != REQ_TYPE_FS) { - dout("%s: non-fs request type %d\n", __func__, - (int) rq->cmd_type); - result = -EIO; - goto err; - } - - if (req_op(rq) == REQ_OP_DISCARD) + switch (req_op(rq)) { + case REQ_OP_DISCARD: op_type = OBJ_OP_DISCARD; - else if (req_op(rq) == REQ_OP_WRITE) + break; + case REQ_OP_WRITE: op_type = OBJ_OP_WRITE; - else + break; + case REQ_OP_READ: op_type = OBJ_OP_READ; + break; + default: + dout("%s: non-fs request type %d\n", __func__, req_op(rq)); + result = -EIO; + goto err; + } /* Ignore/skip any zero-length requests */ @@ -4524,7 +4526,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) q->limits.discard_zeroes_data = 1; if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC)) - q->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES; + q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; disk->queue = q; diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c index abf805e332e2..27833e4dae2a 100644 --- a/drivers/block/skd_main.c +++ b/drivers/block/skd_main.c @@ -1204,10 +1204,11 @@ static void skd_complete_special(struct skd_device *skdev, static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode, uint cmd_in, ulong arg) { - int rc = 0; + static const int sg_version_num = 30527; + int rc = 0, timeout; struct gendisk *disk = bdev->bd_disk; struct skd_device *skdev = disk->private_data; - void __user *p = (void *)arg; + int __user *p = (int __user *)arg; pr_debug("%s:%s:%d %s: CMD[%s] ioctl mode 0x%x, cmd 0x%x arg %0lx\n", skdev->name, __func__, __LINE__, @@ -1218,12 +1219,18 @@ static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode, switch (cmd_in) { case SG_SET_TIMEOUT: + rc = get_user(timeout, p); + if (!rc) + disk->queue->sg_timeout = clock_t_to_jiffies(timeout); + break; case SG_GET_TIMEOUT: + rc = jiffies_to_clock_t(disk->queue->sg_timeout); + break; case SG_GET_VERSION_NUM: - rc = scsi_cmd_ioctl(disk->queue, disk, mode, cmd_in, p); + rc = put_user(sg_version_num, p); break; case SG_IO: - rc = skd_ioctl_sg_io(skdev, mode, p); + rc = skd_ioctl_sg_io(skdev, mode, (void __user *)arg); break; default: diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c index 0e93ad7b8511..c8e072caf56f 100644 --- a/drivers/block/sx8.c +++ b/drivers/block/sx8.c @@ -567,7 +567,7 @@ static struct carm_request *carm_get_special(struct carm_host *host) if (!crq) return NULL; - rq = blk_get_request(host->oob_q, WRITE /* bogus */, GFP_KERNEL); + rq = blk_get_request(host->oob_q, REQ_OP_DRV_OUT, GFP_KERNEL); if (IS_ERR(rq)) { spin_lock_irqsave(&host->lock, flags); carm_put_request(host, crq); @@ -620,7 +620,6 @@ static int carm_array_info (struct carm_host *host, unsigned int array_idx) spin_unlock_irq(&host->lock); DPRINTK("blk_execute_rq_nowait, tag == %u\n", idx); - crq->rq->cmd_type = REQ_TYPE_DRV_PRIV; crq->rq->special = crq; blk_execute_rq_nowait(host->oob_q, NULL, crq->rq, true, NULL); @@ -661,7 +660,6 @@ static int carm_send_special (struct carm_host *host, carm_sspc_t func) crq->msg_bucket = (u32) rc; DPRINTK("blk_execute_rq_nowait, tag == %u\n", idx); - crq->rq->cmd_type = REQ_TYPE_DRV_PRIV; crq->rq->special = crq; blk_execute_rq_nowait(host->oob_q, NULL, crq->rq, true, NULL); diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 264c5eac12b0..024b473524c0 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -52,11 +52,13 @@ struct virtio_blk { }; struct virtblk_req { - struct request *req; - struct virtio_blk_outhdr out_hdr; +#ifdef CONFIG_VIRTIO_BLK_SCSI + struct scsi_request sreq; /* for SCSI passthrough, must be first */ + u8 sense[SCSI_SENSE_BUFFERSIZE]; struct virtio_scsi_inhdr in_hdr; +#endif + struct virtio_blk_outhdr out_hdr; u8 status; - u8 sense[SCSI_SENSE_BUFFERSIZE]; struct scatterlist sg[]; }; @@ -72,28 +74,88 @@ static inline int virtblk_result(struct virtblk_req *vbr) } } -static int __virtblk_add_req(struct virtqueue *vq, - struct virtblk_req *vbr, - struct scatterlist *data_sg, - bool have_data) +/* + * If this is a packet command we need a couple of additional headers. Behind + * the normal outhdr we put a segment with the scsi command block, and before + * the normal inhdr we put the sense data and the inhdr with additional status + * information. + */ +#ifdef CONFIG_VIRTIO_BLK_SCSI +static int virtblk_add_req_scsi(struct virtqueue *vq, struct virtblk_req *vbr, + struct scatterlist *data_sg, bool have_data) { struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6]; unsigned int num_out = 0, num_in = 0; - __virtio32 type = vbr->out_hdr.type & ~cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT); sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr)); sgs[num_out++] = &hdr; + sg_init_one(&cmd, vbr->sreq.cmd, vbr->sreq.cmd_len); + sgs[num_out++] = &cmd; + + if (have_data) { + if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT)) + sgs[num_out++] = data_sg; + else + sgs[num_out + num_in++] = data_sg; + } + + sg_init_one(&sense, vbr->sense, SCSI_SENSE_BUFFERSIZE); + sgs[num_out + num_in++] = &sense; + sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr)); + sgs[num_out + num_in++] = &inhdr; + sg_init_one(&status, &vbr->status, sizeof(vbr->status)); + sgs[num_out + num_in++] = &status; + + return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC); +} + +static inline void virtblk_scsi_reques_done(struct request *req) +{ + struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); + struct virtio_blk *vblk = req->q->queuedata; + struct scsi_request *sreq = &vbr->sreq; + + sreq->resid_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.residual); + sreq->sense_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.sense_len); + req->errors = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.errors); +} + +static int virtblk_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long data) +{ + struct gendisk *disk = bdev->bd_disk; + struct virtio_blk *vblk = disk->private_data; /* - * If this is a packet command we need a couple of additional headers. - * Behind the normal outhdr we put a segment with the scsi command - * block, and before the normal inhdr we put the sense data and the - * inhdr with additional status information. + * Only allow the generic SCSI ioctls if the host can support it. */ - if (type == cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_SCSI_CMD)) { - sg_init_one(&cmd, vbr->req->cmd, vbr->req->cmd_len); - sgs[num_out++] = &cmd; - } + if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI)) + return -ENOTTY; + + return scsi_cmd_blk_ioctl(bdev, mode, cmd, + (void __user *)data); +} +#else +static inline int virtblk_add_req_scsi(struct virtqueue *vq, + struct virtblk_req *vbr, struct scatterlist *data_sg, + bool have_data) +{ + return -EIO; +} +static inline void virtblk_scsi_reques_done(struct request *req) +{ +} +#define virtblk_ioctl NULL +#endif /* CONFIG_VIRTIO_BLK_SCSI */ + +static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr, + struct scatterlist *data_sg, bool have_data) +{ + struct scatterlist hdr, status, *sgs[3]; + unsigned int num_out = 0, num_in = 0; + + sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr)); + sgs[num_out++] = &hdr; if (have_data) { if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT)) @@ -102,14 +164,6 @@ static int __virtblk_add_req(struct virtqueue *vq, sgs[num_out + num_in++] = data_sg; } - if (type == cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_SCSI_CMD)) { - memcpy(vbr->sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE); - sg_init_one(&sense, vbr->sense, SCSI_SENSE_BUFFERSIZE); - sgs[num_out + num_in++] = &sense; - sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr)); - sgs[num_out + num_in++] = &inhdr; - } - sg_init_one(&status, &vbr->status, sizeof(vbr->status)); sgs[num_out + num_in++] = &status; @@ -119,15 +173,16 @@ static int __virtblk_add_req(struct virtqueue *vq, static inline void virtblk_request_done(struct request *req) { struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); - struct virtio_blk *vblk = req->q->queuedata; int error = virtblk_result(vbr); - if (req->cmd_type == REQ_TYPE_BLOCK_PC) { - req->resid_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.residual); - req->sense_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.sense_len); - req->errors = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.errors); - } else if (req->cmd_type == REQ_TYPE_DRV_PRIV) { + switch (req_op(req)) { + case REQ_OP_SCSI_IN: + case REQ_OP_SCSI_OUT: + virtblk_scsi_reques_done(req); + break; + case REQ_OP_DRV_IN: req->errors = (error != 0); + break; } blk_mq_end_request(req, error); @@ -146,7 +201,9 @@ static void virtblk_done(struct virtqueue *vq) do { virtqueue_disable_cb(vq); while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) { - blk_mq_complete_request(vbr->req, vbr->req->errors); + struct request *req = blk_mq_rq_from_pdu(vbr); + + blk_mq_complete_request(req, req->errors); req_done = true; } if (unlikely(virtqueue_is_broken(vq))) @@ -170,49 +227,50 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, int qid = hctx->queue_num; int err; bool notify = false; + u32 type; BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); - vbr->req = req; - if (req_op(req) == REQ_OP_FLUSH) { - vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_FLUSH); - vbr->out_hdr.sector = 0; - vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req)); - } else { - switch (req->cmd_type) { - case REQ_TYPE_FS: - vbr->out_hdr.type = 0; - vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, blk_rq_pos(vbr->req)); - vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req)); - break; - case REQ_TYPE_BLOCK_PC: - vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_SCSI_CMD); - vbr->out_hdr.sector = 0; - vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req)); - break; - case REQ_TYPE_DRV_PRIV: - vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID); - vbr->out_hdr.sector = 0; - vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req)); - break; - default: - /* We don't put anything else in the queue. */ - BUG(); - } + switch (req_op(req)) { + case REQ_OP_READ: + case REQ_OP_WRITE: + type = 0; + break; + case REQ_OP_FLUSH: + type = VIRTIO_BLK_T_FLUSH; + break; + case REQ_OP_SCSI_IN: + case REQ_OP_SCSI_OUT: + type = VIRTIO_BLK_T_SCSI_CMD; + break; + case REQ_OP_DRV_IN: + type = VIRTIO_BLK_T_GET_ID; + break; + default: + WARN_ON_ONCE(1); + return BLK_MQ_RQ_QUEUE_ERROR; } + vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type); + vbr->out_hdr.sector = type ? + 0 : cpu_to_virtio64(vblk->vdev, blk_rq_pos(req)); + vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req)); + blk_mq_start_request(req); - num = blk_rq_map_sg(hctx->queue, vbr->req, vbr->sg); + num = blk_rq_map_sg(hctx->queue, req, vbr->sg); if (num) { - if (rq_data_dir(vbr->req) == WRITE) + if (rq_data_dir(req) == WRITE) vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT); else vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN); } spin_lock_irqsave(&vblk->vqs[qid].lock, flags); - err = __virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num); + if (req_op(req) == REQ_OP_SCSI_IN || req_op(req) == REQ_OP_SCSI_OUT) + err = virtblk_add_req_scsi(vblk->vqs[qid].vq, vbr, vbr->sg, num); + else + err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num); if (err) { virtqueue_kick(vblk->vqs[qid].vq); blk_mq_stop_hw_queue(hctx); @@ -242,10 +300,9 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str) struct request *req; int err; - req = blk_get_request(q, READ, GFP_KERNEL); + req = blk_get_request(q, REQ_OP_DRV_IN, GFP_KERNEL); if (IS_ERR(req)) return PTR_ERR(req); - req->cmd_type = REQ_TYPE_DRV_PRIV; err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL); if (err) @@ -257,22 +314,6 @@ out: return err; } -static int virtblk_ioctl(struct block_device *bdev, fmode_t mode, - unsigned int cmd, unsigned long data) -{ - struct gendisk *disk = bdev->bd_disk; - struct virtio_blk *vblk = disk->private_data; - - /* - * Only allow the generic SCSI ioctls if the host can support it. - */ - if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI)) - return -ENOTTY; - - return scsi_cmd_blk_ioctl(bdev, mode, cmd, - (void __user *)data); -} - /* We provide getgeo only to please some old bootloader/partitioning tools */ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo) { @@ -538,6 +579,9 @@ static int virtblk_init_request(void *data, struct request *rq, struct virtio_blk *vblk = data; struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq); +#ifdef CONFIG_VIRTIO_BLK_SCSI + vbr->sreq.sense = vbr->sense; +#endif sg_init_table(vbr->sg, vblk->sg_elems); return 0; } @@ -821,7 +865,10 @@ static const struct virtio_device_id id_table[] = { static unsigned int features_legacy[] = { VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY, - VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, VIRTIO_BLK_F_SCSI, + VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, +#ifdef CONFIG_VIRTIO_BLK_SCSI + VIRTIO_BLK_F_SCSI, +#endif VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE, VIRTIO_BLK_F_MQ, } diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 265f1a7072e9..5067a0a952cb 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -865,7 +865,7 @@ static inline void flush_requests(struct blkfront_ring_info *rinfo) static inline bool blkif_request_flush_invalid(struct request *req, struct blkfront_info *info) { - return ((req->cmd_type != REQ_TYPE_FS) || + return (blk_rq_is_passthrough(req) || ((req_op(req) == REQ_OP_FLUSH) && !info->feature_flush) || ((req->cmd_flags & REQ_FUA) && diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c index c4328d9d9981..757dce2147e0 100644 --- a/drivers/block/xsysace.c +++ b/drivers/block/xsysace.c @@ -468,7 +468,7 @@ static struct request *ace_get_next_request(struct request_queue *q) struct request *req; while ((req = blk_peek_request(q)) != NULL) { - if (req->cmd_type == REQ_TYPE_FS) + if (!blk_rq_is_passthrough(req)) break; blk_start_request(req); __blk_end_request_all(req, -EIO); diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index e5ab7d9e8c45..3cd7856156b4 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -117,7 +117,7 @@ static void zram_revalidate_disk(struct zram *zram) { revalidate_disk(zram->disk); /* revalidate_disk reset the BDI_CAP_STABLE_WRITES so set again */ - zram->disk->queue->backing_dev_info.capabilities |= + zram->disk->queue->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; } diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 59cca72647a6..87739649eac2 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -281,8 +281,8 @@ #include <linux/fcntl.h> #include <linux/blkdev.h> #include <linux/times.h> - #include <linux/uaccess.h> +#include <scsi/scsi_request.h> /* used to tell the module to turn on full debugging messages */ static bool debug; @@ -342,8 +342,8 @@ static void cdrom_sysctl_register(void); static LIST_HEAD(cdrom_list); -static int cdrom_dummy_generic_packet(struct cdrom_device_info *cdi, - struct packet_command *cgc) +int cdrom_dummy_generic_packet(struct cdrom_device_info *cdi, + struct packet_command *cgc) { if (cgc->sense) { cgc->sense->sense_key = 0x05; @@ -354,6 +354,7 @@ static int cdrom_dummy_generic_packet(struct cdrom_device_info *cdi, cgc->stat = -EIO; return -EIO; } +EXPORT_SYMBOL(cdrom_dummy_generic_packet); static int cdrom_flush_cache(struct cdrom_device_info *cdi) { @@ -371,7 +372,7 @@ static int cdrom_flush_cache(struct cdrom_device_info *cdi) static int cdrom_get_disc_info(struct cdrom_device_info *cdi, disc_information *di) { - struct cdrom_device_ops *cdo = cdi->ops; + const struct cdrom_device_ops *cdo = cdi->ops; struct packet_command cgc; int ret, buflen; @@ -586,7 +587,7 @@ static int cdrom_mrw_set_lba_space(struct cdrom_device_info *cdi, int space) int register_cdrom(struct cdrom_device_info *cdi) { static char banner_printed; - struct cdrom_device_ops *cdo = cdi->ops; + const struct cdrom_device_ops *cdo = cdi->ops; int *change_capability = (int *)&cdo->capability; /* hack */ cd_dbg(CD_OPEN, "entering register_cdrom\n"); @@ -610,7 +611,6 @@ int register_cdrom(struct cdrom_device_info *cdi) ENSURE(reset, CDC_RESET); ENSURE(generic_packet, CDC_GENERIC_PACKET); cdi->mc_flags = 0; - cdo->n_minors = 0; cdi->options = CDO_USE_FFLAGS; if (autoclose == 1 && CDROM_CAN(CDC_CLOSE_TRAY)) @@ -630,8 +630,7 @@ int register_cdrom(struct cdrom_device_info *cdi) else cdi->cdda_method = CDDA_OLD; - if (!cdo->generic_packet) - cdo->generic_packet = cdrom_dummy_generic_packet; + WARN_ON(!cdo->generic_packet); cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name); mutex_lock(&cdrom_mutex); @@ -652,7 +651,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi) if (cdi->exit) cdi->exit(cdi); - cdi->ops->n_minors--; cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name); } @@ -1036,7 +1034,7 @@ static int open_for_data(struct cdrom_device_info *cdi) { int ret; - struct cdrom_device_ops *cdo = cdi->ops; + const struct cdrom_device_ops *cdo = cdi->ops; tracktype tracks; cd_dbg(CD_OPEN, "entering open_for_data\n"); /* Check if the driver can report drive status. If it can, we @@ -1198,8 +1196,8 @@ err: /* This code is similar to that in open_for_data. The routine is called whenever an audio play operation is requested. */ -static int check_for_audio_disc(struct cdrom_device_info * cdi, - struct cdrom_device_ops * cdo) +static int check_for_audio_disc(struct cdrom_device_info *cdi, + const struct cdrom_device_ops *cdo) { int ret; tracktype tracks; @@ -1254,7 +1252,7 @@ static int check_for_audio_disc(struct cdrom_device_info * cdi, void cdrom_release(struct cdrom_device_info *cdi, fmode_t mode) { - struct cdrom_device_ops *cdo = cdi->ops; + const struct cdrom_device_ops *cdo = cdi->ops; int opened_for_data; cd_dbg(CD_CLOSE, "entering cdrom_release\n"); @@ -1294,7 +1292,7 @@ static int cdrom_read_mech_status(struct cdrom_device_info *cdi, struct cdrom_changer_info *buf) { struct packet_command cgc; - struct cdrom_device_ops *cdo = cdi->ops; + const struct cdrom_device_ops *cdo = cdi->ops; int length; /* @@ -1643,7 +1641,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai) int ret; u_char buf[20]; struct packet_command cgc; - struct cdrom_device_ops *cdo = cdi->ops; + const struct cdrom_device_ops *cdo = cdi->ops; rpc_state_t rpc_state; memset(buf, 0, sizeof(buf)); @@ -1791,7 +1789,7 @@ static int dvd_read_physical(struct cdrom_device_info *cdi, dvd_struct *s, { unsigned char buf[21], *base; struct dvd_layer *layer; - struct cdrom_device_ops *cdo = cdi->ops; + const struct cdrom_device_ops *cdo = cdi->ops; int ret, layer_num = s->physical.layer_num; if (layer_num >= DVD_LAYERS) @@ -1842,7 +1840,7 @@ static int dvd_read_copyright(struct cdrom_device_info *cdi, dvd_struct *s, { int ret; u_char buf[8]; - struct cdrom_device_ops *cdo = cdi->ops; + const struct cdrom_device_ops *cdo = cdi->ops; init_cdrom_command(cgc, buf, sizeof(buf), CGC_DATA_READ); cgc->cmd[0] = GPCMD_READ_DVD_STRUCTURE; @@ -1866,7 +1864,7 @@ static int dvd_read_disckey(struct cdrom_device_info *cdi, dvd_struct *s, { int ret, size; u_char *buf; - struct cdrom_device_ops *cdo = cdi->ops; + const struct cdrom_device_ops *cdo = cdi->ops; size = sizeof(s->disckey.value) + 4; @@ -1894,7 +1892,7 @@ static int dvd_read_bca(struct cdrom_device_info *cdi, dvd_struct *s, { int ret, size = 4 + 188; u_char *buf; - struct cdrom_device_ops *cdo = cdi->ops; + const struct cdrom_device_ops *cdo = cdi->ops; buf = kmalloc(size, GFP_KERNEL); if (!buf) @@ -1928,7 +1926,7 @@ static int dvd_read_manufact(struct cdrom_device_info *cdi, dvd_struct *s, { int ret = 0, size; u_char *buf; - struct cdrom_device_ops *cdo = cdi->ops; + const struct cdrom_device_ops *cdo = cdi->ops; size = sizeof(s->manufact.value) + 4; @@ -1995,7 +1993,7 @@ int cdrom_mode_sense(struct cdrom_device_info *cdi, struct packet_command *cgc, int page_code, int page_control) { - struct cdrom_device_ops *cdo = cdi->ops; + const struct cdrom_device_ops *cdo = cdi->ops; memset(cgc->cmd, 0, sizeof(cgc->cmd)); @@ -2010,7 +2008,7 @@ int cdrom_mode_sense(struct cdrom_device_info *cdi, int cdrom_mode_select(struct cdrom_device_info *cdi, struct packet_command *cgc) { - struct cdrom_device_ops *cdo = cdi->ops; + const struct cdrom_device_ops *cdo = cdi->ops; memset(cgc->cmd, 0, sizeof(cgc->cmd)); memset(cgc->buffer, 0, 2); @@ -2025,7 +2023,7 @@ int cdrom_mode_select(struct cdrom_device_info *cdi, static int cdrom_read_subchannel(struct cdrom_device_info *cdi, struct cdrom_subchnl *subchnl, int mcn) { - struct cdrom_device_ops *cdo = cdi->ops; + const struct cdrom_device_ops *cdo = cdi->ops; struct packet_command cgc; char buffer[32]; int ret; @@ -2073,7 +2071,7 @@ static int cdrom_read_cd(struct cdrom_device_info *cdi, struct packet_command *cgc, int lba, int blocksize, int nblocks) { - struct cdrom_device_ops *cdo = cdi->ops; + const struct cdrom_device_ops *cdo = cdi->ops; memset(&cgc->cmd, 0, sizeof(cgc->cmd)); cgc->cmd[0] = GPCMD_READ_10; @@ -2093,7 +2091,7 @@ static int cdrom_read_block(struct cdrom_device_info *cdi, struct packet_command *cgc, int lba, int nblocks, int format, int blksize) { - struct cdrom_device_ops *cdo = cdi->ops; + const struct cdrom_device_ops *cdo = cdi->ops; memset(&cgc->cmd, 0, sizeof(cgc->cmd)); cgc->cmd[0] = GPCMD_READ_CD; @@ -2172,6 +2170,7 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf, { struct request_queue *q = cdi->disk->queue; struct request *rq; + struct scsi_request *req; struct bio *bio; unsigned int len; int nr, ret = 0; @@ -2190,12 +2189,13 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf, len = nr * CD_FRAMESIZE_RAW; - rq = blk_get_request(q, READ, GFP_KERNEL); + rq = blk_get_request(q, REQ_OP_SCSI_IN, GFP_KERNEL); if (IS_ERR(rq)) { ret = PTR_ERR(rq); break; } - blk_rq_set_block_pc(rq); + req = scsi_req(rq); + scsi_req_init(rq); ret = blk_rq_map_user(q, rq, NULL, ubuf, len, GFP_KERNEL); if (ret) { @@ -2203,23 +2203,23 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf, break; } - rq->cmd[0] = GPCMD_READ_CD; - rq->cmd[1] = 1 << 2; - rq->cmd[2] = (lba >> 24) & 0xff; - rq->cmd[3] = (lba >> 16) & 0xff; - rq->cmd[4] = (lba >> 8) & 0xff; - rq->cmd[5] = lba & 0xff; - rq->cmd[6] = (nr >> 16) & 0xff; - rq->cmd[7] = (nr >> 8) & 0xff; - rq->cmd[8] = nr & 0xff; - rq->cmd[9] = 0xf8; - - rq->cmd_len = 12; + req->cmd[0] = GPCMD_READ_CD; + req->cmd[1] = 1 << 2; + req->cmd[2] = (lba >> 24) & 0xff; + req->cmd[3] = (lba >> 16) & 0xff; + req->cmd[4] = (lba >> 8) & 0xff; + req->cmd[5] = lba & 0xff; + req->cmd[6] = (nr >> 16) & 0xff; + req->cmd[7] = (nr >> 8) & 0xff; + req->cmd[8] = nr & 0xff; + req->cmd[9] = 0xf8; + + req->cmd_len = 12; rq->timeout = 60 * HZ; bio = rq->bio; if (blk_execute_rq(q, cdi->disk, rq, 0)) { - struct request_sense *s = rq->sense; + struct request_sense *s = req->sense; ret = -EIO; cdi->last_sense = s->sense_key; } @@ -2764,7 +2764,7 @@ static int cdrom_ioctl_audioctl(struct cdrom_device_info *cdi, */ static int cdrom_switch_blocksize(struct cdrom_device_info *cdi, int size) { - struct cdrom_device_ops *cdo = cdi->ops; + const struct cdrom_device_ops *cdo = cdi->ops; struct packet_command cgc; struct modesel_head mh; @@ -2790,7 +2790,7 @@ static int cdrom_switch_blocksize(struct cdrom_device_info *cdi, int size) static int cdrom_get_track_info(struct cdrom_device_info *cdi, __u16 track, __u8 type, track_information *ti) { - struct cdrom_device_ops *cdo = cdi->ops; + const struct cdrom_device_ops *cdo = cdi->ops; struct packet_command cgc; int ret, buflen; @@ -3049,7 +3049,7 @@ static noinline int mmc_ioctl_cdrom_play_msf(struct cdrom_device_info *cdi, void __user *arg, struct packet_command *cgc) { - struct cdrom_device_ops *cdo = cdi->ops; + const struct cdrom_device_ops *cdo = cdi->ops; struct cdrom_msf msf; cd_dbg(CD_DO_IOCTL, "entering CDROMPLAYMSF\n"); if (copy_from_user(&msf, (struct cdrom_msf __user *)arg, sizeof(msf))) @@ -3069,7 +3069,7 @@ static noinline int mmc_ioctl_cdrom_play_blk(struct cdrom_device_info *cdi, void __user *arg, struct packet_command *cgc) { - struct cdrom_device_ops *cdo = cdi->ops; + const struct cdrom_device_ops *cdo = cdi->ops; struct cdrom_blk blk; cd_dbg(CD_DO_IOCTL, "entering CDROMPLAYBLK\n"); if (copy_from_user(&blk, (struct cdrom_blk __user *)arg, sizeof(blk))) @@ -3164,7 +3164,7 @@ static noinline int mmc_ioctl_cdrom_start_stop(struct cdrom_device_info *cdi, struct packet_command *cgc, int cmd) { - struct cdrom_device_ops *cdo = cdi->ops; + const struct cdrom_device_ops *cdo = cdi->ops; cd_dbg(CD_DO_IOCTL, "entering CDROMSTART/CDROMSTOP\n"); cgc->cmd[0] = GPCMD_START_STOP_UNIT; cgc->cmd[1] = 1; @@ -3177,7 +3177,7 @@ static noinline int mmc_ioctl_cdrom_pause_resume(struct cdrom_device_info *cdi, struct packet_command *cgc, int cmd) { - struct cdrom_device_ops *cdo = cdi->ops; + const struct cdrom_device_ops *cdo = cdi->ops; cd_dbg(CD_DO_IOCTL, "entering CDROMPAUSE/CDROMRESUME\n"); cgc->cmd[0] = GPCMD_PAUSE_RESUME; cgc->cmd[8] = (cmd == CDROMRESUME) ? 1 : 0; diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c index 584bc3126403..1372763a948f 100644 --- a/drivers/cdrom/gdrom.c +++ b/drivers/cdrom/gdrom.c @@ -481,7 +481,7 @@ static int gdrom_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, return -EINVAL; } -static struct cdrom_device_ops gdrom_ops = { +static const struct cdrom_device_ops gdrom_ops = { .open = gdrom_open, .release = gdrom_release, .drive_status = gdrom_drivestatus, @@ -489,9 +489,9 @@ static struct cdrom_device_ops gdrom_ops = { .get_last_session = gdrom_get_last_session, .reset = gdrom_hardreset, .audio_ioctl = gdrom_audio_ioctl, + .generic_packet = cdrom_dummy_generic_packet, .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED | CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R, - .n_minors = 1, }; static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode) @@ -659,23 +659,24 @@ static void gdrom_request(struct request_queue *rq) struct request *req; while ((req = blk_fetch_request(rq)) != NULL) { - if (req->cmd_type != REQ_TYPE_FS) { - printk(KERN_DEBUG "gdrom: Non-fs request ignored\n"); - __blk_end_request_all(req, -EIO); - continue; - } - if (rq_data_dir(req) != READ) { + switch (req_op(req)) { + case REQ_OP_READ: + /* + * Add to list of deferred work and then schedule + * workqueue. + */ + list_add_tail(&req->queuelist, &gdrom_deferred); + schedule_work(&work); + break; + case REQ_OP_WRITE: pr_notice("Read only device - write request ignored\n"); __blk_end_request_all(req, -EIO); - continue; + break; + default: + printk(KERN_DEBUG "gdrom: Non-fs request ignored\n"); + __blk_end_request_all(req, -EIO); + break; } - - /* - * Add to list of deferred work and then schedule - * workqueue. - */ - list_add_tail(&req->queuelist, &gdrom_deferred); - schedule_work(&work); } } @@ -807,16 +808,20 @@ static int probe_gdrom(struct platform_device *devptr) if (err) goto probe_fail_cmdirq_register; gd.gdrom_rq = blk_init_queue(gdrom_request, &gdrom_lock); - if (!gd.gdrom_rq) + if (!gd.gdrom_rq) { + err = -ENOMEM; goto probe_fail_requestq; + } err = probe_gdrom_setupqueue(); if (err) goto probe_fail_toc; gd.toc = kzalloc(sizeof(struct gdromtoc), GFP_KERNEL); - if (!gd.toc) + if (!gd.toc) { + err = -ENOMEM; goto probe_fail_toc; + } add_disk(gd.disk); return 0; diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig index 39ea67f9b066..c99a25c075bc 100644 --- a/drivers/ide/Kconfig +++ b/drivers/ide/Kconfig @@ -10,6 +10,7 @@ menuconfig IDE tristate "ATA/ATAPI/MFM/RLL support (DEPRECATED)" depends on HAVE_IDE depends on BLOCK + select BLK_SCSI_REQUEST ---help--- If you say Y here, your kernel will be able to manage ATA/(E)IDE and ATAPI units. The most common cases are IDE hard drives and ATAPI diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c index f90ea221f7f2..feb30061123b 100644 --- a/drivers/ide/ide-atapi.c +++ b/drivers/ide/ide-atapi.c @@ -92,8 +92,9 @@ int ide_queue_pc_tail(ide_drive_t *drive, struct gendisk *disk, struct request *rq; int error; - rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); - rq->cmd_type = REQ_TYPE_DRV_PRIV; + rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM); + scsi_req_init(rq); + ide_req(rq)->type = ATA_PRIV_MISC; rq->special = (char *)pc; if (buf && bufflen) { @@ -103,9 +104,9 @@ int ide_queue_pc_tail(ide_drive_t *drive, struct gendisk *disk, goto put_req; } - memcpy(rq->cmd, pc->c, 12); + memcpy(scsi_req(rq)->cmd, pc->c, 12); if (drive->media == ide_tape) - rq->cmd[13] = REQ_IDETAPE_PC1; + scsi_req(rq)->cmd[13] = REQ_IDETAPE_PC1; error = blk_execute_rq(drive->queue, disk, rq, 0); put_req: blk_put_request(rq); @@ -171,7 +172,8 @@ EXPORT_SYMBOL_GPL(ide_create_request_sense_cmd); void ide_prep_sense(ide_drive_t *drive, struct request *rq) { struct request_sense *sense = &drive->sense_data; - struct request *sense_rq = &drive->sense_rq; + struct request *sense_rq = drive->sense_rq; + struct scsi_request *req = scsi_req(sense_rq); unsigned int cmd_len, sense_len; int err; @@ -191,12 +193,13 @@ void ide_prep_sense(ide_drive_t *drive, struct request *rq) BUG_ON(sense_len > sizeof(*sense)); - if (rq->cmd_type == REQ_TYPE_ATA_SENSE || drive->sense_rq_armed) + if (ata_sense_request(rq) || drive->sense_rq_armed) return; memset(sense, 0, sizeof(*sense)); blk_rq_init(rq->q, sense_rq); + scsi_req_init(sense_rq); err = blk_rq_map_kern(drive->queue, sense_rq, sense, sense_len, GFP_NOIO); @@ -208,13 +211,14 @@ void ide_prep_sense(ide_drive_t *drive, struct request *rq) } sense_rq->rq_disk = rq->rq_disk; - sense_rq->cmd[0] = GPCMD_REQUEST_SENSE; - sense_rq->cmd[4] = cmd_len; - sense_rq->cmd_type = REQ_TYPE_ATA_SENSE; + sense_rq->cmd_flags = REQ_OP_DRV_IN; + ide_req(sense_rq)->type = ATA_PRIV_SENSE; sense_rq->rq_flags |= RQF_PREEMPT; + req->cmd[0] = GPCMD_REQUEST_SENSE; + req->cmd[4] = cmd_len; if (drive->media == ide_tape) - sense_rq->cmd[13] = REQ_IDETAPE_PC1; + req->cmd[13] = REQ_IDETAPE_PC1; drive->sense_rq_armed = true; } @@ -229,12 +233,12 @@ int ide_queue_sense_rq(ide_drive_t *drive, void *special) return -ENOMEM; } - drive->sense_rq.special = special; + drive->sense_rq->special = special; drive->sense_rq_armed = false; drive->hwif->rq = NULL; - elv_add_request(drive->queue, &drive->sense_rq, ELEVATOR_INSERT_FRONT); + elv_add_request(drive->queue, drive->sense_rq, ELEVATOR_INSERT_FRONT); return 0; } EXPORT_SYMBOL_GPL(ide_queue_sense_rq); @@ -247,14 +251,14 @@ EXPORT_SYMBOL_GPL(ide_queue_sense_rq); void ide_retry_pc(ide_drive_t *drive) { struct request *failed_rq = drive->hwif->rq; - struct request *sense_rq = &drive->sense_rq; + struct request *sense_rq = drive->sense_rq; struct ide_atapi_pc *pc = &drive->request_sense_pc; (void)ide_read_error(drive); /* init pc from sense_rq */ ide_init_pc(pc); - memcpy(pc->c, sense_rq->cmd, 12); + memcpy(pc->c, scsi_req(sense_rq)->cmd, 12); if (drive->media == ide_tape) drive->atapi_flags |= IDE_AFLAG_IGNORE_DSC; @@ -286,7 +290,7 @@ int ide_cd_expiry(ide_drive_t *drive) * commands/drives support that. Let ide_timer_expiry keep polling us * for these. */ - switch (rq->cmd[0]) { + switch (scsi_req(rq)->cmd[0]) { case GPCMD_BLANK: case GPCMD_FORMAT_UNIT: case GPCMD_RESERVE_RZONE_TRACK: @@ -297,7 +301,7 @@ int ide_cd_expiry(ide_drive_t *drive) default: if (!(rq->rq_flags & RQF_QUIET)) printk(KERN_INFO PFX "cmd 0x%x timed out\n", - rq->cmd[0]); + scsi_req(rq)->cmd[0]); wait = 0; break; } @@ -307,15 +311,21 @@ EXPORT_SYMBOL_GPL(ide_cd_expiry); int ide_cd_get_xferlen(struct request *rq) { - switch (rq->cmd_type) { - case REQ_TYPE_FS: + switch (req_op(rq)) { + default: return 32768; - case REQ_TYPE_ATA_SENSE: - case REQ_TYPE_BLOCK_PC: - case REQ_TYPE_ATA_PC: + case REQ_OP_SCSI_IN: + case REQ_OP_SCSI_OUT: return blk_rq_bytes(rq); - default: - return 0; + case REQ_OP_DRV_IN: + case REQ_OP_DRV_OUT: + switch (ide_req(rq)->type) { + case ATA_PRIV_PC: + case ATA_PRIV_SENSE: + return blk_rq_bytes(rq); + default: + return 0; + } } } EXPORT_SYMBOL_GPL(ide_cd_get_xferlen); @@ -374,7 +384,7 @@ int ide_check_ireason(ide_drive_t *drive, struct request *rq, int len, drive->name, __func__, ireason); } - if (dev_is_idecd(drive) && rq->cmd_type == REQ_TYPE_ATA_PC) + if (dev_is_idecd(drive) && ata_pc_request(rq)) rq->rq_flags |= RQF_FAILED; return 1; @@ -420,7 +430,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive) ? "write" : "read"); pc->flags |= PC_FLAG_DMA_ERROR; } else - rq->resid_len = 0; + scsi_req(rq)->resid_len = 0; debug_log("%s: DMA finished\n", drive->name); } @@ -436,7 +446,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive) local_irq_enable_in_hardirq(); if (drive->media == ide_tape && - (stat & ATA_ERR) && rq->cmd[0] == REQUEST_SENSE) + (stat & ATA_ERR) && scsi_req(rq)->cmd[0] == REQUEST_SENSE) stat &= ~ATA_ERR; if ((stat & ATA_ERR) || (pc->flags & PC_FLAG_DMA_ERROR)) { @@ -446,7 +456,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive) if (drive->media != ide_tape) pc->rq->errors++; - if (rq->cmd[0] == REQUEST_SENSE) { + if (scsi_req(rq)->cmd[0] == REQUEST_SENSE) { printk(KERN_ERR PFX "%s: I/O error in request " "sense command\n", drive->name); return ide_do_reset(drive); @@ -477,12 +487,12 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive) if (uptodate == 0) drive->failed_pc = NULL; - if (rq->cmd_type == REQ_TYPE_DRV_PRIV) { + if (ata_misc_request(rq)) { rq->errors = 0; error = 0; } else { - if (rq->cmd_type != REQ_TYPE_FS && uptodate <= 0) { + if (blk_rq_is_passthrough(rq) && uptodate <= 0) { if (rq->errors == 0) rq->errors = -EIO; } @@ -512,7 +522,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive) ide_pio_bytes(drive, cmd, write, done); /* Update transferred byte count */ - rq->resid_len -= done; + scsi_req(rq)->resid_len -= done; bcount -= done; @@ -520,7 +530,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive) ide_pad_transfer(drive, write, bcount); debug_log("[cmd %x] transferred %d bytes, padded %d bytes, resid: %u\n", - rq->cmd[0], done, bcount, rq->resid_len); + rq->cmd[0], done, bcount, scsi_req(rq)->resid_len); /* And set the interrupt handler again */ ide_set_handler(drive, ide_pc_intr, timeout); @@ -603,7 +613,7 @@ static ide_startstop_t ide_transfer_pc(ide_drive_t *drive) if (dev_is_idecd(drive)) { /* ATAPI commands get padded out to 12 bytes minimum */ - cmd_len = COMMAND_SIZE(rq->cmd[0]); + cmd_len = COMMAND_SIZE(scsi_req(rq)->cmd[0]); if (cmd_len < ATAPI_MIN_CDB_BYTES) cmd_len = ATAPI_MIN_CDB_BYTES; @@ -650,7 +660,7 @@ static ide_startstop_t ide_transfer_pc(ide_drive_t *drive) /* Send the actual packet */ if ((drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) == 0) - hwif->tp_ops->output_data(drive, NULL, rq->cmd, cmd_len); + hwif->tp_ops->output_data(drive, NULL, scsi_req(rq)->cmd, cmd_len); /* Begin DMA, if necessary */ if (dev_is_idecd(drive)) { @@ -695,7 +705,7 @@ ide_startstop_t ide_issue_pc(ide_drive_t *drive, struct ide_cmd *cmd) bytes, 63 * 1024)); /* We haven't transferred any data yet */ - rq->resid_len = bcount; + scsi_req(rq)->resid_len = bcount; if (pc->flags & PC_FLAG_DMA_ERROR) { pc->flags &= ~PC_FLAG_DMA_ERROR; diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 9cbd217bc0c9..aef00511ca86 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c @@ -121,7 +121,7 @@ static int cdrom_log_sense(ide_drive_t *drive, struct request *rq) * don't log START_STOP unit with LoEj set, since we cannot * reliably check if drive can auto-close */ - if (rq->cmd[0] == GPCMD_START_STOP_UNIT && sense->asc == 0x24) + if (scsi_req(rq)->cmd[0] == GPCMD_START_STOP_UNIT && sense->asc == 0x24) break; log = 1; break; @@ -163,7 +163,7 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive, * toc has not been recorded yet, it will fail with 05/24/00 (which is a * confusing error) */ - if (failed_command && failed_command->cmd[0] == GPCMD_READ_TOC_PMA_ATIP) + if (failed_command && scsi_req(failed_command)->cmd[0] == GPCMD_READ_TOC_PMA_ATIP) if (sense->sense_key == 0x05 && sense->asc == 0x24) return; @@ -176,7 +176,7 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive, if (!sense->valid) break; if (failed_command == NULL || - failed_command->cmd_type != REQ_TYPE_FS) + blk_rq_is_passthrough(failed_command)) break; sector = (sense->information[0] << 24) | (sense->information[1] << 16) | @@ -210,7 +210,7 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive, static void ide_cd_complete_failed_rq(ide_drive_t *drive, struct request *rq) { /* - * For REQ_TYPE_ATA_SENSE, "rq->special" points to the original + * For ATA_PRIV_SENSE, "rq->special" points to the original * failed request. Also, the sense data should be read * directly from rq which might be different from the original * sense buffer if it got copied during mapping. @@ -219,15 +219,12 @@ static void ide_cd_complete_failed_rq(ide_drive_t *drive, struct request *rq) void *sense = bio_data(rq->bio); if (failed) { - if (failed->sense) { - /* - * Sense is always read into drive->sense_data. - * Copy back if the failed request has its - * sense pointer set. - */ - memcpy(failed->sense, sense, 18); - failed->sense_len = rq->sense_len; - } + /* + * Sense is always read into drive->sense_data, copy back to the + * original request. + */ + memcpy(scsi_req(failed)->sense, sense, 18); + scsi_req(failed)->sense_len = scsi_req(rq)->sense_len; cdrom_analyze_sense_data(drive, failed); if (ide_end_rq(drive, failed, -EIO, blk_rq_bytes(failed))) @@ -285,7 +282,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat) "stat 0x%x", rq->cmd[0], rq->cmd_type, err, stat); - if (rq->cmd_type == REQ_TYPE_ATA_SENSE) { + if (ata_sense_request(rq)) { /* * We got an error trying to get sense info from the drive * (probably while trying to recover from a former error). @@ -296,7 +293,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat) } /* if we have an error, pass CHECK_CONDITION as the SCSI status byte */ - if (rq->cmd_type == REQ_TYPE_BLOCK_PC && !rq->errors) + if (blk_rq_is_scsi(rq) && !rq->errors) rq->errors = SAM_STAT_CHECK_CONDITION; if (blk_noretry_request(rq)) @@ -304,13 +301,13 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat) switch (sense_key) { case NOT_READY: - if (rq->cmd_type == REQ_TYPE_FS && rq_data_dir(rq) == WRITE) { + if (req_op(rq) == REQ_OP_WRITE) { if (ide_cd_breathe(drive, rq)) return 1; } else { cdrom_saw_media_change(drive); - if (rq->cmd_type == REQ_TYPE_FS && + if (!blk_rq_is_passthrough(rq) && !(rq->rq_flags & RQF_QUIET)) printk(KERN_ERR PFX "%s: tray open\n", drive->name); @@ -320,7 +317,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat) case UNIT_ATTENTION: cdrom_saw_media_change(drive); - if (rq->cmd_type != REQ_TYPE_FS) + if (blk_rq_is_passthrough(rq)) return 0; /* @@ -338,7 +335,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat) * * cdrom_log_sense() knows this! */ - if (rq->cmd[0] == GPCMD_START_STOP_UNIT) + if (scsi_req(rq)->cmd[0] == GPCMD_START_STOP_UNIT) break; /* fall-through */ case DATA_PROTECT: @@ -368,7 +365,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat) do_end_request = 1; break; default: - if (rq->cmd_type != REQ_TYPE_FS) + if (blk_rq_is_passthrough(rq)) break; if (err & ~ATA_ABORTED) { /* go to the default handler for other errors */ @@ -379,7 +376,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat) do_end_request = 1; } - if (rq->cmd_type != REQ_TYPE_FS) { + if (blk_rq_is_passthrough(rq)) { rq->rq_flags |= RQF_FAILED; do_end_request = 1; } @@ -414,7 +411,7 @@ static void ide_cd_request_sense_fixup(ide_drive_t *drive, struct ide_cmd *cmd) * Some of the trailing request sense fields are optional, * and some drives don't send them. Sigh. */ - if (rq->cmd[0] == GPCMD_REQUEST_SENSE && + if (scsi_req(rq)->cmd[0] == GPCMD_REQUEST_SENSE && cmd->nleft > 0 && cmd->nleft <= 5) cmd->nleft = 0; } @@ -425,12 +422,8 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd, req_flags_t rq_flags) { struct cdrom_info *info = drive->driver_data; - struct request_sense local_sense; int retries = 10; - req_flags_t flags = 0; - - if (!sense) - sense = &local_sense; + bool failed; ide_debug_log(IDE_DBG_PC, "cmd[0]: 0x%x, write: 0x%x, timeout: %d, " "rq_flags: 0x%x", @@ -440,12 +433,13 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd, do { struct request *rq; int error; + bool delay = false; - rq = blk_get_request(drive->queue, write, __GFP_RECLAIM); - - memcpy(rq->cmd, cmd, BLK_MAX_CDB); - rq->cmd_type = REQ_TYPE_ATA_PC; - rq->sense = sense; + rq = blk_get_request(drive->queue, + write ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, __GFP_RECLAIM); + scsi_req_init(rq); + memcpy(scsi_req(rq)->cmd, cmd, BLK_MAX_CDB); + ide_req(rq)->type = ATA_PRIV_PC; rq->rq_flags |= rq_flags; rq->timeout = timeout; if (buffer) { @@ -460,21 +454,21 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd, error = blk_execute_rq(drive->queue, info->disk, rq, 0); if (buffer) - *bufflen = rq->resid_len; - - flags = rq->rq_flags; - blk_put_request(rq); + *bufflen = scsi_req(rq)->resid_len; + if (sense) + memcpy(sense, scsi_req(rq)->sense, sizeof(*sense)); /* * FIXME: we should probably abort/retry or something in case of * failure. */ - if (flags & RQF_FAILED) { + failed = (rq->rq_flags & RQF_FAILED) != 0; + if (failed) { /* * The request failed. Retry if it was due to a unit * attention status (usually means media was changed). */ - struct request_sense *reqbuf = sense; + struct request_sense *reqbuf = scsi_req(rq)->sense; if (reqbuf->sense_key == UNIT_ATTENTION) cdrom_saw_media_change(drive); @@ -485,19 +479,20 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd, * a disk. Retry, but wait a little to give * the drive time to complete the load. */ - ssleep(2); + delay = true; } else { /* otherwise, don't retry */ retries = 0; } --retries; } - - /* end of retry loop */ - } while ((flags & RQF_FAILED) && retries >= 0); + blk_put_request(rq); + if (delay) + ssleep(2); + } while (failed && retries >= 0); /* return an error if the command failed */ - return (flags & RQF_FAILED) ? -EIO : 0; + return failed ? -EIO : 0; } /* @@ -526,7 +521,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) ide_expiry_t *expiry = NULL; int dma_error = 0, dma, thislen, uptodate = 0; int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0; - int sense = (rq->cmd_type == REQ_TYPE_ATA_SENSE); + int sense = ata_sense_request(rq); unsigned int timeout; u16 len; u8 ireason, stat; @@ -569,7 +564,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) ide_read_bcount_and_ireason(drive, &len, &ireason); - thislen = (rq->cmd_type == REQ_TYPE_FS) ? len : cmd->nleft; + thislen = !blk_rq_is_passthrough(rq) ? len : cmd->nleft; if (thislen > len) thislen = len; @@ -578,7 +573,8 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) /* If DRQ is clear, the command has completed. */ if ((stat & ATA_DRQ) == 0) { - if (rq->cmd_type == REQ_TYPE_FS) { + switch (req_op(rq)) { + default: /* * If we're not done reading/writing, complain. * Otherwise, complete the command normally. @@ -592,7 +588,9 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) rq->rq_flags |= RQF_FAILED; uptodate = 0; } - } else if (rq->cmd_type != REQ_TYPE_BLOCK_PC) { + goto out_end; + case REQ_OP_DRV_IN: + case REQ_OP_DRV_OUT: ide_cd_request_sense_fixup(drive, cmd); uptodate = cmd->nleft ? 0 : 1; @@ -608,8 +606,11 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) if (!uptodate) rq->rq_flags |= RQF_FAILED; + goto out_end; + case REQ_OP_SCSI_IN: + case REQ_OP_SCSI_OUT: + goto out_end; } - goto out_end; } rc = ide_check_ireason(drive, rq, len, ireason, write); @@ -636,12 +637,12 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) len -= blen; if (sense && write == 0) - rq->sense_len += blen; + scsi_req(rq)->sense_len += blen; } /* pad, if necessary */ if (len > 0) { - if (rq->cmd_type != REQ_TYPE_FS || write == 0) + if (blk_rq_is_passthrough(rq) || write == 0) ide_pad_transfer(drive, write, len); else { printk(KERN_ERR PFX "%s: confused, missing data\n", @@ -650,12 +651,18 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) } } - if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { + switch (req_op(rq)) { + case REQ_OP_SCSI_IN: + case REQ_OP_SCSI_OUT: timeout = rq->timeout; - } else { + break; + case REQ_OP_DRV_IN: + case REQ_OP_DRV_OUT: + expiry = ide_cd_expiry; + /*FALLTHRU*/ + default: timeout = ATAPI_WAIT_PC; - if (rq->cmd_type != REQ_TYPE_FS) - expiry = ide_cd_expiry; + break; } hwif->expiry = expiry; @@ -663,15 +670,15 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) return ide_started; out_end: - if (rq->cmd_type == REQ_TYPE_BLOCK_PC && rc == 0) { - rq->resid_len = 0; + if (blk_rq_is_scsi(rq) && rc == 0) { + scsi_req(rq)->resid_len = 0; blk_end_request_all(rq, 0); hwif->rq = NULL; } else { if (sense && uptodate) ide_cd_complete_failed_rq(drive, rq); - if (rq->cmd_type == REQ_TYPE_FS) { + if (!blk_rq_is_passthrough(rq)) { if (cmd->nleft == 0) uptodate = 1; } else { @@ -684,10 +691,10 @@ out_end: return ide_stopped; /* make sure it's fully ended */ - if (rq->cmd_type != REQ_TYPE_FS) { - rq->resid_len -= cmd->nbytes - cmd->nleft; + if (blk_rq_is_passthrough(rq)) { + scsi_req(rq)->resid_len -= cmd->nbytes - cmd->nleft; if (uptodate == 0 && (cmd->tf_flags & IDE_TFLAG_WRITE)) - rq->resid_len += cmd->last_xfer_len; + scsi_req(rq)->resid_len += cmd->last_xfer_len; } ide_complete_rq(drive, uptodate ? 0 : -EIO, blk_rq_bytes(rq)); @@ -744,7 +751,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq) ide_debug_log(IDE_DBG_PC, "rq->cmd[0]: 0x%x, rq->cmd_type: 0x%x", rq->cmd[0], rq->cmd_type); - if (rq->cmd_type == REQ_TYPE_BLOCK_PC) + if (blk_rq_is_scsi(rq)) rq->rq_flags |= RQF_QUIET; else rq->rq_flags &= ~RQF_FAILED; @@ -786,25 +793,31 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq, if (drive->debug_mask & IDE_DBG_RQ) blk_dump_rq_flags(rq, "ide_cd_do_request"); - switch (rq->cmd_type) { - case REQ_TYPE_FS: + switch (req_op(rq)) { + default: if (cdrom_start_rw(drive, rq) == ide_stopped) goto out_end; break; - case REQ_TYPE_ATA_SENSE: - case REQ_TYPE_BLOCK_PC: - case REQ_TYPE_ATA_PC: + case REQ_OP_SCSI_IN: + case REQ_OP_SCSI_OUT: + handle_pc: if (!rq->timeout) rq->timeout = ATAPI_WAIT_PC; - cdrom_do_block_pc(drive, rq); break; - case REQ_TYPE_DRV_PRIV: - /* right now this can only be a reset... */ - uptodate = 1; - goto out_end; - default: - BUG(); + case REQ_OP_DRV_IN: + case REQ_OP_DRV_OUT: + switch (ide_req(rq)->type) { + case ATA_PRIV_MISC: + /* right now this can only be a reset... */ + uptodate = 1; + goto out_end; + case ATA_PRIV_SENSE: + case ATA_PRIV_PC: + goto handle_pc; + default: + BUG(); + } } /* prepare sense request for this command */ @@ -817,7 +830,7 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq, cmd.rq = rq; - if (rq->cmd_type == REQ_TYPE_FS || blk_rq_bytes(rq)) { + if (!blk_rq_is_passthrough(rq) || blk_rq_bytes(rq)) { ide_init_sg_cmd(&cmd, blk_rq_bytes(rq)); ide_map_sg(drive, &cmd); } @@ -1166,7 +1179,7 @@ void ide_cdrom_update_speed(ide_drive_t *drive, u8 *buf) CDC_CD_RW | CDC_DVD | CDC_DVD_R | CDC_DVD_RAM | CDC_GENERIC_PACKET | \ CDC_MO_DRIVE | CDC_MRW | CDC_MRW_W | CDC_RAM) -static struct cdrom_device_ops ide_cdrom_dops = { +static const struct cdrom_device_ops ide_cdrom_dops = { .open = ide_cdrom_open_real, .release = ide_cdrom_release_real, .drive_status = ide_cdrom_drive_status, @@ -1312,28 +1325,29 @@ static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq) int hard_sect = queue_logical_block_size(q); long block = (long)blk_rq_pos(rq) / (hard_sect >> 9); unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9); + struct scsi_request *req = scsi_req(rq); - memset(rq->cmd, 0, BLK_MAX_CDB); + memset(req->cmd, 0, BLK_MAX_CDB); if (rq_data_dir(rq) == READ) - rq->cmd[0] = GPCMD_READ_10; + req->cmd[0] = GPCMD_READ_10; else - rq->cmd[0] = GPCMD_WRITE_10; + req->cmd[0] = GPCMD_WRITE_10; /* * fill in lba */ - rq->cmd[2] = (block >> 24) & 0xff; - rq->cmd[3] = (block >> 16) & 0xff; - rq->cmd[4] = (block >> 8) & 0xff; - rq->cmd[5] = block & 0xff; + req->cmd[2] = (block >> 24) & 0xff; + req->cmd[3] = (block >> 16) & 0xff; + req->cmd[4] = (block >> 8) & 0xff; + req->cmd[5] = block & 0xff; /* * and transfer length */ - rq->cmd[7] = (blocks >> 8) & 0xff; - rq->cmd[8] = blocks & 0xff; - rq->cmd_len = 10; + req->cmd[7] = (blocks >> 8) & 0xff; + req->cmd[8] = blocks & 0xff; + req->cmd_len = 10; return BLKPREP_OK; } @@ -1343,7 +1357,7 @@ static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq) */ static int ide_cdrom_prep_pc(struct request *rq) { - u8 *c = rq->cmd; + u8 *c = scsi_req(rq)->cmd; /* transform 6-byte read/write commands to the 10-byte version */ if (c[0] == READ_6 || c[0] == WRITE_6) { @@ -1354,7 +1368,7 @@ static int ide_cdrom_prep_pc(struct request *rq) c[2] = 0; c[1] &= 0xe0; c[0] += (READ_10 - READ_6); - rq->cmd_len = 10; + scsi_req(rq)->cmd_len = 10; return BLKPREP_OK; } @@ -1373,9 +1387,9 @@ static int ide_cdrom_prep_pc(struct request *rq) static int ide_cdrom_prep_fn(struct request_queue *q, struct request *rq) { - if (rq->cmd_type == REQ_TYPE_FS) + if (!blk_rq_is_passthrough(rq)) return ide_cdrom_prep_fs(q, rq); - else if (rq->cmd_type == REQ_TYPE_BLOCK_PC) + else if (blk_rq_is_scsi(rq)) return ide_cdrom_prep_pc(rq); return 0; diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c index f085e3a2e1d6..9fcefbc8425e 100644 --- a/drivers/ide/ide-cd_ioctl.c +++ b/drivers/ide/ide-cd_ioctl.c @@ -303,8 +303,9 @@ int ide_cdrom_reset(struct cdrom_device_info *cdi) struct request *rq; int ret; - rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); - rq->cmd_type = REQ_TYPE_DRV_PRIV; + rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM); + scsi_req_init(rq); + ide_req(rq)->type = ATA_PRIV_MISC; rq->rq_flags = RQF_QUIET; ret = blk_execute_rq(drive->queue, cd->disk, rq, 0); blk_put_request(rq); diff --git a/drivers/ide/ide-cd_verbose.c b/drivers/ide/ide-cd_verbose.c index f079ca2f260b..58a6feb74c02 100644 --- a/drivers/ide/ide-cd_verbose.c +++ b/drivers/ide/ide-cd_verbose.c @@ -315,12 +315,12 @@ void ide_cd_log_error(const char *name, struct request *failed_command, while (hi > lo) { mid = (lo + hi) / 2; if (packet_command_texts[mid].packet_command == - failed_command->cmd[0]) { + scsi_req(failed_command)->cmd[0]) { s = packet_command_texts[mid].text; break; } if (packet_command_texts[mid].packet_command > - failed_command->cmd[0]) + scsi_req(failed_command)->cmd[0]) hi = mid; else lo = mid + 1; @@ -329,7 +329,7 @@ void ide_cd_log_error(const char *name, struct request *failed_command, printk(KERN_ERR " The failed \"%s\" packet command " "was: \n \"", s); for (i = 0; i < BLK_MAX_CDB; i++) - printk(KERN_CONT "%02x ", failed_command->cmd[i]); + printk(KERN_CONT "%02x ", scsi_req(failed_command)->cmd[i]); printk(KERN_CONT "\"\n"); } diff --git a/drivers/ide/ide-devsets.c b/drivers/ide/ide-devsets.c index 0dd43b4fcec6..a45dda5386e4 100644 --- a/drivers/ide/ide-devsets.c +++ b/drivers/ide/ide-devsets.c @@ -165,11 +165,12 @@ int ide_devset_execute(ide_drive_t *drive, const struct ide_devset *setting, if (!(setting->flags & DS_SYNC)) return setting->set(drive, arg); - rq = blk_get_request(q, READ, __GFP_RECLAIM); - rq->cmd_type = REQ_TYPE_DRV_PRIV; - rq->cmd_len = 5; - rq->cmd[0] = REQ_DEVSET_EXEC; - *(int *)&rq->cmd[1] = arg; + rq = blk_get_request(q, REQ_OP_DRV_IN, __GFP_RECLAIM); + scsi_req_init(rq); + ide_req(rq)->type = ATA_PRIV_MISC; + scsi_req(rq)->cmd_len = 5; + scsi_req(rq)->cmd[0] = REQ_DEVSET_EXEC; + *(int *)&scsi_req(rq)->cmd[1] = arg; rq->special = setting->set; if (blk_execute_rq(q, NULL, rq, 0)) @@ -183,7 +184,7 @@ ide_startstop_t ide_do_devset(ide_drive_t *drive, struct request *rq) { int err, (*setfunc)(ide_drive_t *, int) = rq->special; - err = setfunc(drive, *(int *)&rq->cmd[1]); + err = setfunc(drive, *(int *)&scsi_req(rq)->cmd[1]); if (err) rq->errors = err; ide_complete_rq(drive, err, blk_rq_bytes(rq)); diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index 5ceace542b77..186159715b71 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c @@ -184,7 +184,7 @@ static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq, ide_hwif_t *hwif = drive->hwif; BUG_ON(drive->dev_flags & IDE_DFLAG_BLOCKED); - BUG_ON(rq->cmd_type != REQ_TYPE_FS); + BUG_ON(blk_rq_is_passthrough(rq)); ledtrig_disk_activity(); @@ -452,8 +452,9 @@ static int idedisk_prep_fn(struct request_queue *q, struct request *rq) cmd->valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; cmd->tf_flags = IDE_TFLAG_DYN; cmd->protocol = ATA_PROT_NODATA; - - rq->cmd_type = REQ_TYPE_ATA_TASKFILE; + rq->cmd_flags &= ~REQ_OP_MASK; + rq->cmd_flags |= REQ_OP_DRV_OUT; + ide_req(rq)->type = ATA_PRIV_TASKFILE; rq->special = cmd; cmd->rq = rq; @@ -477,8 +478,9 @@ static int set_multcount(ide_drive_t *drive, int arg) if (drive->special_flags & IDE_SFLAG_SET_MULTMODE) return -EBUSY; - rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); - rq->cmd_type = REQ_TYPE_ATA_TASKFILE; + rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM); + scsi_req_init(rq); + ide_req(rq)->type = ATA_PRIV_TASKFILE; drive->mult_req = arg; drive->special_flags |= IDE_SFLAG_SET_MULTMODE; diff --git a/drivers/ide/ide-eh.c b/drivers/ide/ide-eh.c index d6da011299f5..cf3af6840368 100644 --- a/drivers/ide/ide-eh.c +++ b/drivers/ide/ide-eh.c @@ -123,8 +123,8 @@ ide_startstop_t ide_error(ide_drive_t *drive, const char *msg, u8 stat) return ide_stopped; /* retry only "normal" I/O: */ - if (rq->cmd_type != REQ_TYPE_FS) { - if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { + if (blk_rq_is_passthrough(rq)) { + if (ata_taskfile_request(rq)) { struct ide_cmd *cmd = rq->special; if (cmd) @@ -147,8 +147,8 @@ static inline void ide_complete_drive_reset(ide_drive_t *drive, int err) { struct request *rq = drive->hwif->rq; - if (rq && rq->cmd_type == REQ_TYPE_DRV_PRIV && - rq->cmd[0] == REQ_DRIVE_RESET) { + if (rq && ata_misc_request(rq) && + scsi_req(rq)->cmd[0] == REQ_DRIVE_RESET) { if (err <= 0 && rq->errors == 0) rq->errors = -EIO; ide_complete_rq(drive, err ? err : 0, blk_rq_bytes(rq)); diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c index f079d8d1d856..a69e8013f1df 100644 --- a/drivers/ide/ide-floppy.c +++ b/drivers/ide/ide-floppy.c @@ -72,7 +72,7 @@ static int ide_floppy_callback(ide_drive_t *drive, int dsc) drive->failed_pc = NULL; if (pc->c[0] == GPCMD_READ_10 || pc->c[0] == GPCMD_WRITE_10 || - rq->cmd_type == REQ_TYPE_BLOCK_PC) + (req_op(rq) == REQ_OP_SCSI_IN || req_op(rq) == REQ_OP_SCSI_OUT)) uptodate = 1; /* FIXME */ else if (pc->c[0] == GPCMD_REQUEST_SENSE) { @@ -97,7 +97,7 @@ static int ide_floppy_callback(ide_drive_t *drive, int dsc) "Aborting request!\n"); } - if (rq->cmd_type == REQ_TYPE_DRV_PRIV) + if (ata_misc_request(rq)) rq->errors = uptodate ? 0 : IDE_DRV_ERROR_GENERAL; return uptodate; @@ -203,7 +203,7 @@ static void idefloppy_create_rw_cmd(ide_drive_t *drive, put_unaligned(cpu_to_be16(blocks), (unsigned short *)&pc->c[7]); put_unaligned(cpu_to_be32(block), (unsigned int *) &pc->c[2]); - memcpy(rq->cmd, pc->c, 12); + memcpy(scsi_req(rq)->cmd, pc->c, 12); pc->rq = rq; if (cmd == WRITE) @@ -216,7 +216,7 @@ static void idefloppy_blockpc_cmd(struct ide_disk_obj *floppy, struct ide_atapi_pc *pc, struct request *rq) { ide_init_pc(pc); - memcpy(pc->c, rq->cmd, sizeof(pc->c)); + memcpy(pc->c, scsi_req(rq)->cmd, sizeof(pc->c)); pc->rq = rq; if (blk_rq_bytes(rq)) { pc->flags |= PC_FLAG_DMA_OK; @@ -246,7 +246,7 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive, } else printk(KERN_ERR PFX "%s: I/O error\n", drive->name); - if (rq->cmd_type == REQ_TYPE_DRV_PRIV) { + if (ata_misc_request(rq)) { rq->errors = 0; ide_complete_rq(drive, 0, blk_rq_bytes(rq)); return ide_stopped; @@ -254,8 +254,8 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive, goto out_end; } - switch (rq->cmd_type) { - case REQ_TYPE_FS: + switch (req_op(rq)) { + default: if (((long)blk_rq_pos(rq) % floppy->bs_factor) || (blk_rq_sectors(rq) % floppy->bs_factor)) { printk(KERN_ERR PFX "%s: unsupported r/w rq size\n", @@ -265,16 +265,21 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive, pc = &floppy->queued_pc; idefloppy_create_rw_cmd(drive, pc, rq, (unsigned long)block); break; - case REQ_TYPE_DRV_PRIV: - case REQ_TYPE_ATA_SENSE: - pc = (struct ide_atapi_pc *)rq->special; - break; - case REQ_TYPE_BLOCK_PC: + case REQ_OP_SCSI_IN: + case REQ_OP_SCSI_OUT: pc = &floppy->queued_pc; idefloppy_blockpc_cmd(floppy, pc, rq); break; - default: - BUG(); + case REQ_OP_DRV_IN: + case REQ_OP_DRV_OUT: + switch (ide_req(rq)->type) { + case ATA_PRIV_MISC: + case ATA_PRIV_SENSE: + pc = (struct ide_atapi_pc *)rq->special; + break; + default: + BUG(); + } } ide_prep_sense(drive, rq); @@ -286,7 +291,7 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive, cmd.rq = rq; - if (rq->cmd_type == REQ_TYPE_FS || blk_rq_bytes(rq)) { + if (!blk_rq_is_passthrough(rq) || blk_rq_bytes(rq)) { ide_init_sg_cmd(&cmd, blk_rq_bytes(rq)); ide_map_sg(drive, &cmd); } @@ -296,7 +301,7 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive, return ide_floppy_issue_pc(drive, &cmd, pc); out_end: drive->failed_pc = NULL; - if (rq->cmd_type != REQ_TYPE_FS && rq->errors == 0) + if (blk_rq_is_passthrough(rq) && rq->errors == 0) rq->errors = -EIO; ide_complete_rq(drive, -EIO, blk_rq_bytes(rq)); return ide_stopped; diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 201e43fcbc94..043b1fb963cb 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c @@ -102,7 +102,7 @@ void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err) drive->dev_flags |= IDE_DFLAG_PARKED; } - if (rq && rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { + if (rq && ata_taskfile_request(rq)) { struct ide_cmd *orig_cmd = rq->special; if (cmd->tf_flags & IDE_TFLAG_DYN) @@ -135,7 +135,7 @@ EXPORT_SYMBOL(ide_complete_rq); void ide_kill_rq(ide_drive_t *drive, struct request *rq) { - u8 drv_req = (rq->cmd_type == REQ_TYPE_DRV_PRIV) && rq->rq_disk; + u8 drv_req = ata_misc_request(rq) && rq->rq_disk; u8 media = drive->media; drive->failed_pc = NULL; @@ -145,7 +145,7 @@ void ide_kill_rq(ide_drive_t *drive, struct request *rq) } else { if (media == ide_tape) rq->errors = IDE_DRV_ERROR_GENERAL; - else if (rq->cmd_type != REQ_TYPE_FS && rq->errors == 0) + else if (blk_rq_is_passthrough(rq) && rq->errors == 0) rq->errors = -EIO; } @@ -279,7 +279,7 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive, static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq) { - u8 cmd = rq->cmd[0]; + u8 cmd = scsi_req(rq)->cmd[0]; switch (cmd) { case REQ_PARK_HEADS: @@ -340,7 +340,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq) if (drive->current_speed == 0xff) ide_config_drive_speed(drive, drive->desired_speed); - if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) + if (ata_taskfile_request(rq)) return execute_drive_cmd(drive, rq); else if (ata_pm_request(rq)) { struct ide_pm_state *pm = rq->special; @@ -353,7 +353,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq) pm->pm_step == IDE_PM_COMPLETED) ide_complete_pm_rq(drive, rq); return startstop; - } else if (!rq->rq_disk && rq->cmd_type == REQ_TYPE_DRV_PRIV) + } else if (!rq->rq_disk && ata_misc_request(rq)) /* * TODO: Once all ULDs have been modified to * check for specific op codes rather than @@ -545,6 +545,7 @@ repeat: goto plug_device; } + scsi_req(rq)->resid_len = blk_rq_bytes(rq); hwif->rq = rq; spin_unlock_irq(&hwif->lock); diff --git a/drivers/ide/ide-ioctls.c b/drivers/ide/ide-ioctls.c index d05db2469209..248a3e0ceb46 100644 --- a/drivers/ide/ide-ioctls.c +++ b/drivers/ide/ide-ioctls.c @@ -125,8 +125,9 @@ static int ide_cmd_ioctl(ide_drive_t *drive, unsigned long arg) if (NULL == (void *) arg) { struct request *rq; - rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); - rq->cmd_type = REQ_TYPE_ATA_TASKFILE; + rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM); + scsi_req_init(rq); + ide_req(rq)->type = ATA_PRIV_TASKFILE; err = blk_execute_rq(drive->queue, NULL, rq, 0); blk_put_request(rq); @@ -221,10 +222,11 @@ static int generic_drive_reset(ide_drive_t *drive) struct request *rq; int ret = 0; - rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); - rq->cmd_type = REQ_TYPE_DRV_PRIV; - rq->cmd_len = 1; - rq->cmd[0] = REQ_DRIVE_RESET; + rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM); + scsi_req_init(rq); + ide_req(rq)->type = ATA_PRIV_MISC; + scsi_req(rq)->cmd_len = 1; + scsi_req(rq)->cmd[0] = REQ_DRIVE_RESET; if (blk_execute_rq(drive->queue, NULL, rq, 1)) ret = rq->errors; blk_put_request(rq); diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c index 2d7dca56dd24..101aed9a61ca 100644 --- a/drivers/ide/ide-park.c +++ b/drivers/ide/ide-park.c @@ -31,10 +31,11 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout) } spin_unlock_irq(&hwif->lock); - rq = blk_get_request(q, READ, __GFP_RECLAIM); - rq->cmd[0] = REQ_PARK_HEADS; - rq->cmd_len = 1; - rq->cmd_type = REQ_TYPE_DRV_PRIV; + rq = blk_get_request(q, REQ_OP_DRV_IN, __GFP_RECLAIM); + scsi_req_init(rq); + scsi_req(rq)->cmd[0] = REQ_PARK_HEADS; + scsi_req(rq)->cmd_len = 1; + ide_req(rq)->type = ATA_PRIV_MISC; rq->special = &timeout; rc = blk_execute_rq(q, NULL, rq, 1); blk_put_request(rq); @@ -45,13 +46,14 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout) * Make sure that *some* command is sent to the drive after the * timeout has expired, so power management will be reenabled. */ - rq = blk_get_request(q, READ, GFP_NOWAIT); + rq = blk_get_request(q, REQ_OP_DRV_IN, GFP_NOWAIT); + scsi_req_init(rq); if (IS_ERR(rq)) goto out; - rq->cmd[0] = REQ_UNPARK_HEADS; - rq->cmd_len = 1; - rq->cmd_type = REQ_TYPE_DRV_PRIV; + scsi_req(rq)->cmd[0] = REQ_UNPARK_HEADS; + scsi_req(rq)->cmd_len = 1; + ide_req(rq)->type = ATA_PRIV_MISC; elv_add_request(q, rq, ELEVATOR_INSERT_FRONT); out: @@ -64,7 +66,7 @@ ide_startstop_t ide_do_park_unpark(ide_drive_t *drive, struct request *rq) struct ide_taskfile *tf = &cmd.tf; memset(&cmd, 0, sizeof(cmd)); - if (rq->cmd[0] == REQ_PARK_HEADS) { + if (scsi_req(rq)->cmd[0] == REQ_PARK_HEADS) { drive->sleep = *(unsigned long *)rq->special; drive->dev_flags |= IDE_DFLAG_SLEEPING; tf->command = ATA_CMD_IDLEIMMEDIATE; diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c index a015acdffb39..ec951be4b0c8 100644 --- a/drivers/ide/ide-pm.c +++ b/drivers/ide/ide-pm.c @@ -18,8 +18,9 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg) } memset(&rqpm, 0, sizeof(rqpm)); - rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); - rq->cmd_type = REQ_TYPE_ATA_PM_SUSPEND; + rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM); + scsi_req_init(rq); + ide_req(rq)->type = ATA_PRIV_PM_SUSPEND; rq->special = &rqpm; rqpm.pm_step = IDE_PM_START_SUSPEND; if (mesg.event == PM_EVENT_PRETHAW) @@ -88,8 +89,9 @@ int generic_ide_resume(struct device *dev) } memset(&rqpm, 0, sizeof(rqpm)); - rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); - rq->cmd_type = REQ_TYPE_ATA_PM_RESUME; + rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM); + scsi_req_init(rq); + ide_req(rq)->type = ATA_PRIV_PM_RESUME; rq->rq_flags |= RQF_PREEMPT; rq->special = &rqpm; rqpm.pm_step = IDE_PM_START_RESUME; @@ -221,10 +223,10 @@ void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq) #ifdef DEBUG_PM printk("%s: completing PM request, %s\n", drive->name, - (rq->cmd_type == REQ_TYPE_ATA_PM_SUSPEND) ? "suspend" : "resume"); + (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND) ? "suspend" : "resume"); #endif spin_lock_irqsave(q->queue_lock, flags); - if (rq->cmd_type == REQ_TYPE_ATA_PM_SUSPEND) + if (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND) blk_stop_queue(q); else drive->dev_flags &= ~IDE_DFLAG_BLOCKED; @@ -240,11 +242,13 @@ void ide_check_pm_state(ide_drive_t *drive, struct request *rq) { struct ide_pm_state *pm = rq->special; - if (rq->cmd_type == REQ_TYPE_ATA_PM_SUSPEND && + if (blk_rq_is_private(rq) && + ide_req(rq)->type == ATA_PRIV_PM_SUSPEND && pm->pm_step == IDE_PM_START_SUSPEND) /* Mark drive blocked when starting the suspend sequence. */ drive->dev_flags |= IDE_DFLAG_BLOCKED; - else if (rq->cmd_type == REQ_TYPE_ATA_PM_RESUME && + else if (blk_rq_is_private(rq) && + ide_req(rq)->type == ATA_PRIV_PM_RESUME && pm->pm_step == IDE_PM_START_RESUME) { /* * The first thing we do on wakeup is to wait for BSY bit to diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 330e319419e6..a74ae8df4bb8 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c @@ -741,6 +741,14 @@ static void ide_port_tune_devices(ide_hwif_t *hwif) } } +static int ide_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp) +{ + struct ide_request *req = blk_mq_rq_to_pdu(rq); + + req->sreq.sense = req->sense; + return 0; +} + /* * init request queue */ @@ -758,11 +766,18 @@ static int ide_init_queue(ide_drive_t *drive) * limits and LBA48 we could raise it but as yet * do not. */ - - q = blk_init_queue_node(do_ide_request, NULL, hwif_to_node(hwif)); + q = blk_alloc_queue_node(GFP_KERNEL, hwif_to_node(hwif)); if (!q) return 1; + q->request_fn = do_ide_request; + q->init_rq_fn = ide_init_rq; + q->cmd_size = sizeof(struct ide_request); + if (blk_init_allocated_queue(q) < 0) { + blk_cleanup_queue(q); + return 1; + } + q->queuedata = drive; blk_queue_segment_boundary(q, 0xffff); @@ -1131,10 +1146,12 @@ static void ide_port_init_devices_data(ide_hwif_t *hwif) ide_port_for_each_dev(i, drive, hwif) { u8 j = (hwif->index * MAX_DRIVES) + i; u16 *saved_id = drive->id; + struct request *saved_sense_rq = drive->sense_rq; memset(drive, 0, sizeof(*drive)); memset(saved_id, 0, SECTOR_SIZE); drive->id = saved_id; + drive->sense_rq = saved_sense_rq; drive->media = ide_disk; drive->select = (i << 4) | ATA_DEVICE_OBS; @@ -1241,6 +1258,7 @@ static void ide_port_free_devices(ide_hwif_t *hwif) int i; ide_port_for_each_dev(i, drive, hwif) { + kfree(drive->sense_rq); kfree(drive->id); kfree(drive); } @@ -1248,11 +1266,10 @@ static void ide_port_free_devices(ide_hwif_t *hwif) static int ide_port_alloc_devices(ide_hwif_t *hwif, int node) { + ide_drive_t *drive; int i; for (i = 0; i < MAX_DRIVES; i++) { - ide_drive_t *drive; - drive = kzalloc_node(sizeof(*drive), GFP_KERNEL, node); if (drive == NULL) goto out_nomem; @@ -1267,12 +1284,21 @@ static int ide_port_alloc_devices(ide_hwif_t *hwif, int node) */ drive->id = kzalloc_node(SECTOR_SIZE, GFP_KERNEL, node); if (drive->id == NULL) - goto out_nomem; + goto out_free_drive; + + drive->sense_rq = kmalloc(sizeof(struct request) + + sizeof(struct ide_request), GFP_KERNEL); + if (!drive->sense_rq) + goto out_free_id; hwif->devices[i] = drive; } return 0; +out_free_id: + kfree(drive->id); +out_free_drive: + kfree(drive); out_nomem: ide_port_free_devices(hwif); return -ENOMEM; diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index 9ecf4e35adcd..3c1b7974d66d 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c @@ -282,7 +282,7 @@ static void idetape_analyze_error(ide_drive_t *drive) /* correct remaining bytes to transfer */ if (pc->flags & PC_FLAG_DMA_ERROR) - rq->resid_len = tape->blk_size * get_unaligned_be32(&sense[3]); + scsi_req(rq)->resid_len = tape->blk_size * get_unaligned_be32(&sense[3]); /* * If error was the result of a zero-length read or write command, @@ -316,7 +316,7 @@ static void idetape_analyze_error(ide_drive_t *drive) pc->flags |= PC_FLAG_ABORT; } if (!(pc->flags & PC_FLAG_ABORT) && - (blk_rq_bytes(rq) - rq->resid_len)) + (blk_rq_bytes(rq) - scsi_req(rq)->resid_len)) pc->retries = IDETAPE_MAX_PC_RETRIES + 1; } } @@ -348,7 +348,7 @@ static int ide_tape_callback(ide_drive_t *drive, int dsc) "itself - Aborting request!\n"); } else if (pc->c[0] == READ_6 || pc->c[0] == WRITE_6) { unsigned int blocks = - (blk_rq_bytes(rq) - rq->resid_len) / tape->blk_size; + (blk_rq_bytes(rq) - scsi_req(rq)->resid_len) / tape->blk_size; tape->avg_size += blocks * tape->blk_size; @@ -560,7 +560,7 @@ static void ide_tape_create_rw_cmd(idetape_tape_t *tape, pc->flags |= PC_FLAG_WRITING; } - memcpy(rq->cmd, pc->c, 12); + memcpy(scsi_req(rq)->cmd, pc->c, 12); } static ide_startstop_t idetape_do_request(ide_drive_t *drive, @@ -570,14 +570,16 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive, idetape_tape_t *tape = drive->driver_data; struct ide_atapi_pc *pc = NULL; struct ide_cmd cmd; + struct scsi_request *req = scsi_req(rq); u8 stat; ide_debug_log(IDE_DBG_RQ, "cmd: 0x%x, sector: %llu, nr_sectors: %u", - rq->cmd[0], (unsigned long long)blk_rq_pos(rq), + req->cmd[0], (unsigned long long)blk_rq_pos(rq), blk_rq_sectors(rq)); - BUG_ON(!(rq->cmd_type == REQ_TYPE_DRV_PRIV || - rq->cmd_type == REQ_TYPE_ATA_SENSE)); + BUG_ON(!blk_rq_is_private(rq)); + BUG_ON(ide_req(rq)->type != ATA_PRIV_MISC && + ide_req(rq)->type != ATA_PRIV_SENSE); /* Retry a failed packet command */ if (drive->failed_pc && drive->pc->c[0] == REQUEST_SENSE) { @@ -592,7 +594,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive, stat = hwif->tp_ops->read_status(hwif); if ((drive->dev_flags & IDE_DFLAG_DSC_OVERLAP) == 0 && - (rq->cmd[13] & REQ_IDETAPE_PC2) == 0) + (req->cmd[13] & REQ_IDETAPE_PC2) == 0) drive->atapi_flags |= IDE_AFLAG_IGNORE_DSC; if (drive->dev_flags & IDE_DFLAG_POST_RESET) { @@ -609,7 +611,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive, } else if (time_after(jiffies, tape->dsc_timeout)) { printk(KERN_ERR "ide-tape: %s: DSC timeout\n", tape->name); - if (rq->cmd[13] & REQ_IDETAPE_PC2) { + if (req->cmd[13] & REQ_IDETAPE_PC2) { idetape_media_access_finished(drive); return ide_stopped; } else { @@ -626,23 +628,23 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive, tape->postponed_rq = false; } - if (rq->cmd[13] & REQ_IDETAPE_READ) { + if (req->cmd[13] & REQ_IDETAPE_READ) { pc = &tape->queued_pc; ide_tape_create_rw_cmd(tape, pc, rq, READ_6); goto out; } - if (rq->cmd[13] & REQ_IDETAPE_WRITE) { + if (req->cmd[13] & REQ_IDETAPE_WRITE) { pc = &tape->queued_pc; ide_tape_create_rw_cmd(tape, pc, rq, WRITE_6); goto out; } - if (rq->cmd[13] & REQ_IDETAPE_PC1) { + if (req->cmd[13] & REQ_IDETAPE_PC1) { pc = (struct ide_atapi_pc *)rq->special; - rq->cmd[13] &= ~(REQ_IDETAPE_PC1); - rq->cmd[13] |= REQ_IDETAPE_PC2; + req->cmd[13] &= ~(REQ_IDETAPE_PC1); + req->cmd[13] |= REQ_IDETAPE_PC2; goto out; } - if (rq->cmd[13] & REQ_IDETAPE_PC2) { + if (req->cmd[13] & REQ_IDETAPE_PC2) { idetape_media_access_finished(drive); return ide_stopped; } @@ -852,9 +854,10 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size) BUG_ON(cmd != REQ_IDETAPE_READ && cmd != REQ_IDETAPE_WRITE); BUG_ON(size < 0 || size % tape->blk_size); - rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); - rq->cmd_type = REQ_TYPE_DRV_PRIV; - rq->cmd[13] = cmd; + rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM); + scsi_req_init(rq); + ide_req(rq)->type = ATA_PRIV_MISC; + scsi_req(rq)->cmd[13] = cmd; rq->rq_disk = tape->disk; rq->__sector = tape->first_frame; @@ -868,7 +871,7 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size) blk_execute_rq(drive->queue, tape->disk, rq, 0); /* calculate the number of transferred bytes and update buffer state */ - size -= rq->resid_len; + size -= scsi_req(rq)->resid_len; tape->cur = tape->buf; if (cmd == REQ_IDETAPE_READ) tape->valid = size; diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c index a716693417a3..247b9faccce1 100644 --- a/drivers/ide/ide-taskfile.c +++ b/drivers/ide/ide-taskfile.c @@ -428,10 +428,12 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf, { struct request *rq; int error; - int rw = !(cmd->tf_flags & IDE_TFLAG_WRITE) ? READ : WRITE; - rq = blk_get_request(drive->queue, rw, __GFP_RECLAIM); - rq->cmd_type = REQ_TYPE_ATA_TASKFILE; + rq = blk_get_request(drive->queue, + (cmd->tf_flags & IDE_TFLAG_WRITE) ? + REQ_OP_DRV_OUT : REQ_OP_DRV_IN, __GFP_RECLAIM); + scsi_req_init(rq); + ide_req(rq)->type = ATA_PRIV_TASKFILE; /* * (ks) We transfer currently only whole sectors. diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c index 247853ea1368..c3062b53056f 100644 --- a/drivers/ide/sis5513.c +++ b/drivers/ide/sis5513.c @@ -54,7 +54,7 @@ #define DRV_NAME "sis5513" /* registers layout and init values are chipset family dependent */ - +#undef ATA_16 #define ATA_16 0x01 #define ATA_33 0x02 #define ATA_66 0x03 diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig index 2f5d5f4a4c75..052714106b7b 100644 --- a/drivers/lightnvm/Kconfig +++ b/drivers/lightnvm/Kconfig @@ -26,15 +26,6 @@ config NVM_DEBUG It is required to create/remove targets without IOCTLs. -config NVM_GENNVM - tristate "General Non-Volatile Memory Manager for Open-Channel SSDs" - ---help--- - Non-volatile memory media manager for Open-Channel SSDs that implements - physical media metadata management and block provisioning API. - - This is the standard media manager for using Open-Channel SSDs, and - required for targets to be instantiated. - config NVM_RRPC tristate "Round-robin Hybrid Open-Channel SSD target" ---help--- diff --git a/drivers/lightnvm/Makefile b/drivers/lightnvm/Makefile index a7a0a22cf1a5..b2a39e2d2895 100644 --- a/drivers/lightnvm/Makefile +++ b/drivers/lightnvm/Makefile @@ -2,6 +2,5 @@ # Makefile for Open-Channel SSDs. # -obj-$(CONFIG_NVM) := core.o sysblk.o -obj-$(CONFIG_NVM_GENNVM) += gennvm.o +obj-$(CONFIG_NVM) := core.o obj-$(CONFIG_NVM_RRPC) += rrpc.o diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index 02240a0b39c9..5262ba66a7a7 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -29,10 +29,483 @@ static LIST_HEAD(nvm_tgt_types); static DECLARE_RWSEM(nvm_tgtt_lock); -static LIST_HEAD(nvm_mgrs); static LIST_HEAD(nvm_devices); static DECLARE_RWSEM(nvm_lock); +/* Map between virtual and physical channel and lun */ +struct nvm_ch_map { + int ch_off; + int nr_luns; + int *lun_offs; +}; + +struct nvm_dev_map { + struct nvm_ch_map *chnls; + int nr_chnls; +}; + +struct nvm_area { + struct list_head list; + sector_t begin; + sector_t end; /* end is excluded */ +}; + +static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name) +{ + struct nvm_target *tgt; + + list_for_each_entry(tgt, &dev->targets, list) + if (!strcmp(name, tgt->disk->disk_name)) + return tgt; + + return NULL; +} + +static int nvm_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end) +{ + int i; + + for (i = lun_begin; i <= lun_end; i++) { + if (test_and_set_bit(i, dev->lun_map)) { + pr_err("nvm: lun %d already allocated\n", i); + goto err; + } + } + + return 0; +err: + while (--i > lun_begin) + clear_bit(i, dev->lun_map); + + return -EBUSY; +} + +static void nvm_release_luns_err(struct nvm_dev *dev, int lun_begin, + int lun_end) +{ + int i; + + for (i = lun_begin; i <= lun_end; i++) + WARN_ON(!test_and_clear_bit(i, dev->lun_map)); +} + +static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev) +{ + struct nvm_dev *dev = tgt_dev->parent; + struct nvm_dev_map *dev_map = tgt_dev->map; + int i, j; + + for (i = 0; i < dev_map->nr_chnls; i++) { + struct nvm_ch_map *ch_map = &dev_map->chnls[i]; + int *lun_offs = ch_map->lun_offs; + int ch = i + ch_map->ch_off; + + for (j = 0; j < ch_map->nr_luns; j++) { + int lun = j + lun_offs[j]; + int lunid = (ch * dev->geo.luns_per_chnl) + lun; + + WARN_ON(!test_and_clear_bit(lunid, dev->lun_map)); + } + + kfree(ch_map->lun_offs); + } + + kfree(dev_map->chnls); + kfree(dev_map); + + kfree(tgt_dev->luns); + kfree(tgt_dev); +} + +static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev, + int lun_begin, int lun_end) +{ + struct nvm_tgt_dev *tgt_dev = NULL; + struct nvm_dev_map *dev_rmap = dev->rmap; + struct nvm_dev_map *dev_map; + struct ppa_addr *luns; + int nr_luns = lun_end - lun_begin + 1; + int luns_left = nr_luns; + int nr_chnls = nr_luns / dev->geo.luns_per_chnl; + int nr_chnls_mod = nr_luns % dev->geo.luns_per_chnl; + int bch = lun_begin / dev->geo.luns_per_chnl; + int blun = lun_begin % dev->geo.luns_per_chnl; + int lunid = 0; + int lun_balanced = 1; + int prev_nr_luns; + int i, j; + + nr_chnls = nr_luns / dev->geo.luns_per_chnl; + nr_chnls = (nr_chnls_mod == 0) ? nr_chnls : nr_chnls + 1; + + dev_map = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL); + if (!dev_map) + goto err_dev; + + dev_map->chnls = kcalloc(nr_chnls, sizeof(struct nvm_ch_map), + GFP_KERNEL); + if (!dev_map->chnls) + goto err_chnls; + + luns = kcalloc(nr_luns, sizeof(struct ppa_addr), GFP_KERNEL); + if (!luns) + goto err_luns; + + prev_nr_luns = (luns_left > dev->geo.luns_per_chnl) ? + dev->geo.luns_per_chnl : luns_left; + for (i = 0; i < nr_chnls; i++) { + struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[i + bch]; + int *lun_roffs = ch_rmap->lun_offs; + struct nvm_ch_map *ch_map = &dev_map->chnls[i]; + int *lun_offs; + int luns_in_chnl = (luns_left > dev->geo.luns_per_chnl) ? + dev->geo.luns_per_chnl : luns_left; + + if (lun_balanced && prev_nr_luns != luns_in_chnl) + lun_balanced = 0; + + ch_map->ch_off = ch_rmap->ch_off = bch; + ch_map->nr_luns = luns_in_chnl; + + lun_offs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL); + if (!lun_offs) + goto err_ch; + + for (j = 0; j < luns_in_chnl; j++) { + luns[lunid].ppa = 0; + luns[lunid].g.ch = i; + luns[lunid++].g.lun = j; + + lun_offs[j] = blun; + lun_roffs[j + blun] = blun; + } + + ch_map->lun_offs = lun_offs; + + /* when starting a new channel, lun offset is reset */ + blun = 0; + luns_left -= luns_in_chnl; + } + + dev_map->nr_chnls = nr_chnls; + + tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL); + if (!tgt_dev) + goto err_ch; + + memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo)); + /* Target device only owns a portion of the physical device */ + tgt_dev->geo.nr_chnls = nr_chnls; + tgt_dev->geo.nr_luns = nr_luns; + tgt_dev->geo.luns_per_chnl = (lun_balanced) ? prev_nr_luns : -1; + tgt_dev->total_secs = nr_luns * tgt_dev->geo.sec_per_lun; + tgt_dev->q = dev->q; + tgt_dev->map = dev_map; + tgt_dev->luns = luns; + memcpy(&tgt_dev->identity, &dev->identity, sizeof(struct nvm_id)); + + tgt_dev->parent = dev; + + return tgt_dev; +err_ch: + while (--i > 0) + kfree(dev_map->chnls[i].lun_offs); + kfree(luns); +err_luns: + kfree(dev_map->chnls); +err_chnls: + kfree(dev_map); +err_dev: + return tgt_dev; +} + +static const struct block_device_operations nvm_fops = { + .owner = THIS_MODULE, +}; + +static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create) +{ + struct nvm_ioctl_create_simple *s = &create->conf.s; + struct request_queue *tqueue; + struct gendisk *tdisk; + struct nvm_tgt_type *tt; + struct nvm_target *t; + struct nvm_tgt_dev *tgt_dev; + void *targetdata; + + tt = nvm_find_target_type(create->tgttype, 1); + if (!tt) { + pr_err("nvm: target type %s not found\n", create->tgttype); + return -EINVAL; + } + + mutex_lock(&dev->mlock); + t = nvm_find_target(dev, create->tgtname); + if (t) { + pr_err("nvm: target name already exists.\n"); + mutex_unlock(&dev->mlock); + return -EINVAL; + } + mutex_unlock(&dev->mlock); + + if (nvm_reserve_luns(dev, s->lun_begin, s->lun_end)) + return -ENOMEM; + + t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL); + if (!t) + goto err_reserve; + + tgt_dev = nvm_create_tgt_dev(dev, s->lun_begin, s->lun_end); + if (!tgt_dev) { + pr_err("nvm: could not create target device\n"); + goto err_t; + } + + tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node); + if (!tqueue) + goto err_dev; + blk_queue_make_request(tqueue, tt->make_rq); + + tdisk = alloc_disk(0); + if (!tdisk) + goto err_queue; + + sprintf(tdisk->disk_name, "%s", create->tgtname); + tdisk->flags = GENHD_FL_EXT_DEVT; + tdisk->major = 0; + tdisk->first_minor = 0; + tdisk->fops = &nvm_fops; + tdisk->queue = tqueue; + + targetdata = tt->init(tgt_dev, tdisk); + if (IS_ERR(targetdata)) + goto err_init; + + tdisk->private_data = targetdata; + tqueue->queuedata = targetdata; + + blk_queue_max_hw_sectors(tqueue, 8 * dev->ops->max_phys_sect); + + set_capacity(tdisk, tt->capacity(targetdata)); + add_disk(tdisk); + + if (tt->sysfs_init && tt->sysfs_init(tdisk)) + goto err_sysfs; + + t->type = tt; + t->disk = tdisk; + t->dev = tgt_dev; + + mutex_lock(&dev->mlock); + list_add_tail(&t->list, &dev->targets); + mutex_unlock(&dev->mlock); + + return 0; +err_sysfs: + if (tt->exit) + tt->exit(targetdata); +err_init: + put_disk(tdisk); +err_queue: + blk_cleanup_queue(tqueue); +err_dev: + nvm_remove_tgt_dev(tgt_dev); +err_t: + kfree(t); +err_reserve: + nvm_release_luns_err(dev, s->lun_begin, s->lun_end); + return -ENOMEM; +} + +static void __nvm_remove_target(struct nvm_target *t) +{ + struct nvm_tgt_type *tt = t->type; + struct gendisk *tdisk = t->disk; + struct request_queue *q = tdisk->queue; + + del_gendisk(tdisk); + blk_cleanup_queue(q); + + if (tt->sysfs_exit) + tt->sysfs_exit(tdisk); + + if (tt->exit) + tt->exit(tdisk->private_data); + + nvm_remove_tgt_dev(t->dev); + put_disk(tdisk); + + list_del(&t->list); + kfree(t); +} + +/** + * nvm_remove_tgt - Removes a target from the media manager + * @dev: device + * @remove: ioctl structure with target name to remove. + * + * Returns: + * 0: on success + * 1: on not found + * <0: on error + */ +static int nvm_remove_tgt(struct nvm_dev *dev, struct nvm_ioctl_remove *remove) +{ + struct nvm_target *t; + + mutex_lock(&dev->mlock); + t = nvm_find_target(dev, remove->tgtname); + if (!t) { + mutex_unlock(&dev->mlock); + return 1; + } + __nvm_remove_target(t); + mutex_unlock(&dev->mlock); + + return 0; +} + +static int nvm_register_map(struct nvm_dev *dev) +{ + struct nvm_dev_map *rmap; + int i, j; + + rmap = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL); + if (!rmap) + goto err_rmap; + + rmap->chnls = kcalloc(dev->geo.nr_chnls, sizeof(struct nvm_ch_map), + GFP_KERNEL); + if (!rmap->chnls) + goto err_chnls; + + for (i = 0; i < dev->geo.nr_chnls; i++) { + struct nvm_ch_map *ch_rmap; + int *lun_roffs; + int luns_in_chnl = dev->geo.luns_per_chnl; + + ch_rmap = &rmap->chnls[i]; + + ch_rmap->ch_off = -1; + ch_rmap->nr_luns = luns_in_chnl; + + lun_roffs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL); + if (!lun_roffs) + goto err_ch; + + for (j = 0; j < luns_in_chnl; j++) + lun_roffs[j] = -1; + + ch_rmap->lun_offs = lun_roffs; + } + + dev->rmap = rmap; + + return 0; +err_ch: + while (--i >= 0) + kfree(rmap->chnls[i].lun_offs); +err_chnls: + kfree(rmap); +err_rmap: + return -ENOMEM; +} + +static void nvm_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p) +{ + struct nvm_dev_map *dev_map = tgt_dev->map; + struct nvm_ch_map *ch_map = &dev_map->chnls[p->g.ch]; + int lun_off = ch_map->lun_offs[p->g.lun]; + + p->g.ch += ch_map->ch_off; + p->g.lun += lun_off; +} + +static void nvm_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p) +{ + struct nvm_dev *dev = tgt_dev->parent; + struct nvm_dev_map *dev_rmap = dev->rmap; + struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[p->g.ch]; + int lun_roff = ch_rmap->lun_offs[p->g.lun]; + + p->g.ch -= ch_rmap->ch_off; + p->g.lun -= lun_roff; +} + +static void nvm_ppa_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, + struct ppa_addr *ppa_list, int nr_ppas) +{ + int i; + + for (i = 0; i < nr_ppas; i++) { + nvm_map_to_dev(tgt_dev, &ppa_list[i]); + ppa_list[i] = generic_to_dev_addr(tgt_dev, ppa_list[i]); + } +} + +static void nvm_ppa_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, + struct ppa_addr *ppa_list, int nr_ppas) +{ + int i; + + for (i = 0; i < nr_ppas; i++) { + ppa_list[i] = dev_to_generic_addr(tgt_dev, ppa_list[i]); + nvm_map_to_tgt(tgt_dev, &ppa_list[i]); + } +} + +static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd) +{ + if (rqd->nr_ppas == 1) { + nvm_ppa_tgt_to_dev(tgt_dev, &rqd->ppa_addr, 1); + return; + } + + nvm_ppa_tgt_to_dev(tgt_dev, rqd->ppa_list, rqd->nr_ppas); +} + +static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd) +{ + if (rqd->nr_ppas == 1) { + nvm_ppa_dev_to_tgt(tgt_dev, &rqd->ppa_addr, 1); + return; + } + + nvm_ppa_dev_to_tgt(tgt_dev, rqd->ppa_list, rqd->nr_ppas); +} + +void nvm_part_to_tgt(struct nvm_dev *dev, sector_t *entries, + int len) +{ + struct nvm_geo *geo = &dev->geo; + struct nvm_dev_map *dev_rmap = dev->rmap; + u64 i; + + for (i = 0; i < len; i++) { + struct nvm_ch_map *ch_rmap; + int *lun_roffs; + struct ppa_addr gaddr; + u64 pba = le64_to_cpu(entries[i]); + int off; + u64 diff; + + if (!pba) + continue; + + gaddr = linear_to_generic_addr(geo, pba); + ch_rmap = &dev_rmap->chnls[gaddr.g.ch]; + lun_roffs = ch_rmap->lun_offs; + + off = gaddr.g.ch * geo->luns_per_chnl + gaddr.g.lun; + + diff = ((ch_rmap->ch_off * geo->luns_per_chnl) + + (lun_roffs[gaddr.g.lun])) * geo->sec_per_lun; + + entries[i] -= cpu_to_le64(diff); + } +} +EXPORT_SYMBOL(nvm_part_to_tgt); + struct nvm_tgt_type *nvm_find_target_type(const char *name, int lock) { struct nvm_tgt_type *tmp, *tt = NULL; @@ -92,78 +565,6 @@ void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler) } EXPORT_SYMBOL(nvm_dev_dma_free); -static struct nvmm_type *nvm_find_mgr_type(const char *name) -{ - struct nvmm_type *mt; - - list_for_each_entry(mt, &nvm_mgrs, list) - if (!strcmp(name, mt->name)) - return mt; - - return NULL; -} - -static struct nvmm_type *nvm_init_mgr(struct nvm_dev *dev) -{ - struct nvmm_type *mt; - int ret; - - lockdep_assert_held(&nvm_lock); - - list_for_each_entry(mt, &nvm_mgrs, list) { - if (strncmp(dev->sb.mmtype, mt->name, NVM_MMTYPE_LEN)) - continue; - - ret = mt->register_mgr(dev); - if (ret < 0) { - pr_err("nvm: media mgr failed to init (%d) on dev %s\n", - ret, dev->name); - return NULL; /* initialization failed */ - } else if (ret > 0) - return mt; - } - - return NULL; -} - -int nvm_register_mgr(struct nvmm_type *mt) -{ - struct nvm_dev *dev; - int ret = 0; - - down_write(&nvm_lock); - if (nvm_find_mgr_type(mt->name)) { - ret = -EEXIST; - goto finish; - } else { - list_add(&mt->list, &nvm_mgrs); - } - - /* try to register media mgr if any device have none configured */ - list_for_each_entry(dev, &nvm_devices, devices) { - if (dev->mt) - continue; - - dev->mt = nvm_init_mgr(dev); - } -finish: - up_write(&nvm_lock); - - return ret; -} -EXPORT_SYMBOL(nvm_register_mgr); - -void nvm_unregister_mgr(struct nvmm_type *mt) -{ - if (!mt) - return; - - down_write(&nvm_lock); - list_del(&mt->list); - up_write(&nvm_lock); -} -EXPORT_SYMBOL(nvm_unregister_mgr); - static struct nvm_dev *nvm_find_nvm_dev(const char *name) { struct nvm_dev *dev; @@ -175,53 +576,6 @@ static struct nvm_dev *nvm_find_nvm_dev(const char *name) return NULL; } -static void nvm_tgt_generic_to_addr_mode(struct nvm_tgt_dev *tgt_dev, - struct nvm_rq *rqd) -{ - struct nvm_dev *dev = tgt_dev->parent; - int i; - - if (rqd->nr_ppas > 1) { - for (i = 0; i < rqd->nr_ppas; i++) { - rqd->ppa_list[i] = dev->mt->trans_ppa(tgt_dev, - rqd->ppa_list[i], TRANS_TGT_TO_DEV); - rqd->ppa_list[i] = generic_to_dev_addr(dev, - rqd->ppa_list[i]); - } - } else { - rqd->ppa_addr = dev->mt->trans_ppa(tgt_dev, rqd->ppa_addr, - TRANS_TGT_TO_DEV); - rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr); - } -} - -int nvm_set_bb_tbl(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas, - int type) -{ - struct nvm_rq rqd; - int ret; - - if (nr_ppas > dev->ops->max_phys_sect) { - pr_err("nvm: unable to update all sysblocks atomically\n"); - return -EINVAL; - } - - memset(&rqd, 0, sizeof(struct nvm_rq)); - - nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1); - nvm_generic_to_addr_mode(dev, &rqd); - - ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type); - nvm_free_rqd_ppalist(dev, &rqd); - if (ret) { - pr_err("nvm: sysblk failed bb mark\n"); - return -EINVAL; - } - - return 0; -} -EXPORT_SYMBOL(nvm_set_bb_tbl); - int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas, int nr_ppas, int type) { @@ -237,12 +591,12 @@ int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas, memset(&rqd, 0, sizeof(struct nvm_rq)); nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1); - nvm_tgt_generic_to_addr_mode(tgt_dev, &rqd); + nvm_rq_tgt_to_dev(tgt_dev, &rqd); ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type); nvm_free_rqd_ppalist(dev, &rqd); if (ret) { - pr_err("nvm: sysblk failed bb mark\n"); + pr_err("nvm: failed bb mark\n"); return -EINVAL; } @@ -262,15 +616,42 @@ int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd) { struct nvm_dev *dev = tgt_dev->parent; - return dev->mt->submit_io(tgt_dev, rqd); + if (!dev->ops->submit_io) + return -ENODEV; + + nvm_rq_tgt_to_dev(tgt_dev, rqd); + + rqd->dev = tgt_dev; + return dev->ops->submit_io(dev, rqd); } EXPORT_SYMBOL(nvm_submit_io); -int nvm_erase_blk(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p, int flags) +int nvm_erase_blk(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas, int flags) { struct nvm_dev *dev = tgt_dev->parent; + struct nvm_rq rqd; + int ret; + + if (!dev->ops->erase_block) + return 0; + + nvm_map_to_dev(tgt_dev, ppas); + + memset(&rqd, 0, sizeof(struct nvm_rq)); + + ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, 1, 1); + if (ret) + return ret; + + nvm_rq_tgt_to_dev(tgt_dev, &rqd); + + rqd.flags = flags; + + ret = dev->ops->erase_block(dev, &rqd); - return dev->mt->erase_blk(tgt_dev, p, flags); + nvm_free_rqd_ppalist(dev, &rqd); + + return ret; } EXPORT_SYMBOL(nvm_erase_blk); @@ -289,46 +670,67 @@ EXPORT_SYMBOL(nvm_get_l2p_tbl); int nvm_get_area(struct nvm_tgt_dev *tgt_dev, sector_t *lba, sector_t len) { struct nvm_dev *dev = tgt_dev->parent; + struct nvm_geo *geo = &dev->geo; + struct nvm_area *area, *prev, *next; + sector_t begin = 0; + sector_t max_sectors = (geo->sec_size * dev->total_secs) >> 9; - return dev->mt->get_area(dev, lba, len); -} -EXPORT_SYMBOL(nvm_get_area); + if (len > max_sectors) + return -EINVAL; -void nvm_put_area(struct nvm_tgt_dev *tgt_dev, sector_t lba) -{ - struct nvm_dev *dev = tgt_dev->parent; + area = kmalloc(sizeof(struct nvm_area), GFP_KERNEL); + if (!area) + return -ENOMEM; - dev->mt->put_area(dev, lba); -} -EXPORT_SYMBOL(nvm_put_area); + prev = NULL; -void nvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd) -{ - int i; + spin_lock(&dev->lock); + list_for_each_entry(next, &dev->area_list, list) { + if (begin + len > next->begin) { + begin = next->end; + prev = next; + continue; + } + break; + } - if (rqd->nr_ppas > 1) { - for (i = 0; i < rqd->nr_ppas; i++) - rqd->ppa_list[i] = dev_to_generic_addr(dev, - rqd->ppa_list[i]); - } else { - rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr); + if ((begin + len) > max_sectors) { + spin_unlock(&dev->lock); + kfree(area); + return -EINVAL; } + + area->begin = *lba = begin; + area->end = begin + len; + + if (prev) /* insert into sorted order */ + list_add(&area->list, &prev->list); + else + list_add(&area->list, &dev->area_list); + spin_unlock(&dev->lock); + + return 0; } -EXPORT_SYMBOL(nvm_addr_to_generic_mode); +EXPORT_SYMBOL(nvm_get_area); -void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd) +void nvm_put_area(struct nvm_tgt_dev *tgt_dev, sector_t begin) { - int i; + struct nvm_dev *dev = tgt_dev->parent; + struct nvm_area *area; - if (rqd->nr_ppas > 1) { - for (i = 0; i < rqd->nr_ppas; i++) - rqd->ppa_list[i] = generic_to_dev_addr(dev, - rqd->ppa_list[i]); - } else { - rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr); + spin_lock(&dev->lock); + list_for_each_entry(area, &dev->area_list, list) { + if (area->begin != begin) + continue; + + list_del(&area->list); + spin_unlock(&dev->lock); + kfree(area); + return; } + spin_unlock(&dev->lock); } -EXPORT_SYMBOL(nvm_generic_to_addr_mode); +EXPORT_SYMBOL(nvm_put_area); int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd, const struct ppa_addr *ppas, int nr_ppas, int vblk) @@ -380,149 +782,19 @@ void nvm_free_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd) } EXPORT_SYMBOL(nvm_free_rqd_ppalist); -int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas, - int flags) +void nvm_end_io(struct nvm_rq *rqd) { - struct nvm_rq rqd; - int ret; + struct nvm_tgt_dev *tgt_dev = rqd->dev; - if (!dev->ops->erase_block) - return 0; + /* Convert address space */ + if (tgt_dev) + nvm_rq_dev_to_tgt(tgt_dev, rqd); - memset(&rqd, 0, sizeof(struct nvm_rq)); - - ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1); - if (ret) - return ret; - - nvm_generic_to_addr_mode(dev, &rqd); - - rqd.flags = flags; - - ret = dev->ops->erase_block(dev, &rqd); - - nvm_free_rqd_ppalist(dev, &rqd); - - return ret; -} -EXPORT_SYMBOL(nvm_erase_ppa); - -void nvm_end_io(struct nvm_rq *rqd, int error) -{ - rqd->error = error; - rqd->end_io(rqd); + if (rqd->end_io) + rqd->end_io(rqd); } EXPORT_SYMBOL(nvm_end_io); -static void nvm_end_io_sync(struct nvm_rq *rqd) -{ - struct completion *waiting = rqd->wait; - - rqd->wait = NULL; - - complete(waiting); -} - -static int __nvm_submit_ppa(struct nvm_dev *dev, struct nvm_rq *rqd, int opcode, - int flags, void *buf, int len) -{ - DECLARE_COMPLETION_ONSTACK(wait); - struct bio *bio; - int ret; - unsigned long hang_check; - - bio = bio_map_kern(dev->q, buf, len, GFP_KERNEL); - if (IS_ERR_OR_NULL(bio)) - return -ENOMEM; - - nvm_generic_to_addr_mode(dev, rqd); - - rqd->dev = NULL; - rqd->opcode = opcode; - rqd->flags = flags; - rqd->bio = bio; - rqd->wait = &wait; - rqd->end_io = nvm_end_io_sync; - - ret = dev->ops->submit_io(dev, rqd); - if (ret) { - bio_put(bio); - return ret; - } - - /* Prevent hang_check timer from firing at us during very long I/O */ - hang_check = sysctl_hung_task_timeout_secs; - if (hang_check) - while (!wait_for_completion_io_timeout(&wait, - hang_check * (HZ/2))) - ; - else - wait_for_completion_io(&wait); - - return rqd->error; -} - -/** - * nvm_submit_ppa_list - submit user-defined ppa list to device. The user must - * take to free ppa list if necessary. - * @dev: device - * @ppa_list: user created ppa_list - * @nr_ppas: length of ppa_list - * @opcode: device opcode - * @flags: device flags - * @buf: data buffer - * @len: data buffer length - */ -int nvm_submit_ppa_list(struct nvm_dev *dev, struct ppa_addr *ppa_list, - int nr_ppas, int opcode, int flags, void *buf, int len) -{ - struct nvm_rq rqd; - - if (dev->ops->max_phys_sect < nr_ppas) - return -EINVAL; - - memset(&rqd, 0, sizeof(struct nvm_rq)); - - rqd.nr_ppas = nr_ppas; - if (nr_ppas > 1) - rqd.ppa_list = ppa_list; - else - rqd.ppa_addr = ppa_list[0]; - - return __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len); -} -EXPORT_SYMBOL(nvm_submit_ppa_list); - -/** - * nvm_submit_ppa - submit PPAs to device. PPAs will automatically be unfolded - * as single, dual, quad plane PPAs depending on device type. - * @dev: device - * @ppa: user created ppa_list - * @nr_ppas: length of ppa_list - * @opcode: device opcode - * @flags: device flags - * @buf: data buffer - * @len: data buffer length - */ -int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas, - int opcode, int flags, void *buf, int len) -{ - struct nvm_rq rqd; - int ret; - - memset(&rqd, 0, sizeof(struct nvm_rq)); - ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas, 1); - if (ret) - return ret; - - ret = __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len); - - nvm_free_rqd_ppalist(dev, &rqd); - - return ret; -} -EXPORT_SYMBOL(nvm_submit_ppa); - /* * folds a bad block list from its plane representation to its virtual * block representation. The fold is done in place and reduced size is @@ -559,21 +831,14 @@ int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks) } EXPORT_SYMBOL(nvm_bb_tbl_fold); -int nvm_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa, u8 *blks) -{ - ppa = generic_to_dev_addr(dev, ppa); - - return dev->ops->get_bb_tbl(dev, ppa, blks); -} -EXPORT_SYMBOL(nvm_get_bb_tbl); - int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa, u8 *blks) { struct nvm_dev *dev = tgt_dev->parent; - ppa = dev->mt->trans_ppa(tgt_dev, ppa, TRANS_TGT_TO_DEV); - return nvm_get_bb_tbl(dev, ppa, blks); + nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1); + + return dev->ops->get_bb_tbl(dev, ppa, blks); } EXPORT_SYMBOL(nvm_get_tgt_bb_tbl); @@ -627,7 +892,7 @@ static int nvm_init_mlc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp) static int nvm_core_init(struct nvm_dev *dev) { struct nvm_id *id = &dev->identity; - struct nvm_id_group *grp = &id->groups[0]; + struct nvm_id_group *grp = &id->grp; struct nvm_geo *geo = &dev->geo; int ret; @@ -691,36 +956,31 @@ static int nvm_core_init(struct nvm_dev *dev) goto err_fmtype; } + INIT_LIST_HEAD(&dev->area_list); + INIT_LIST_HEAD(&dev->targets); mutex_init(&dev->mlock); spin_lock_init(&dev->lock); - blk_queue_logical_block_size(dev->q, geo->sec_size); + ret = nvm_register_map(dev); + if (ret) + goto err_fmtype; + blk_queue_logical_block_size(dev->q, geo->sec_size); return 0; err_fmtype: kfree(dev->lun_map); return ret; } -static void nvm_free_mgr(struct nvm_dev *dev) -{ - if (!dev->mt) - return; - - dev->mt->unregister_mgr(dev); - dev->mt = NULL; -} - void nvm_free(struct nvm_dev *dev) { if (!dev) return; - nvm_free_mgr(dev); - if (dev->dma_pool) dev->ops->destroy_dma_pool(dev->dma_pool); + kfree(dev->rmap); kfree(dev->lptbl); kfree(dev->lun_map); kfree(dev); @@ -731,28 +991,19 @@ static int nvm_init(struct nvm_dev *dev) struct nvm_geo *geo = &dev->geo; int ret = -EINVAL; - if (!dev->q || !dev->ops) - return ret; - if (dev->ops->identity(dev, &dev->identity)) { pr_err("nvm: device could not be identified\n"); goto err; } - pr_debug("nvm: ver:%x nvm_vendor:%x groups:%u\n", - dev->identity.ver_id, dev->identity.vmnt, - dev->identity.cgrps); + pr_debug("nvm: ver:%x nvm_vendor:%x\n", + dev->identity.ver_id, dev->identity.vmnt); if (dev->identity.ver_id != 1) { pr_err("nvm: device not supported by kernel."); goto err; } - if (dev->identity.cgrps != 1) { - pr_err("nvm: only one group configuration supported."); - goto err; - } - ret = nvm_core_init(dev); if (ret) { pr_err("nvm: could not initialize core structures.\n"); @@ -779,49 +1030,50 @@ int nvm_register(struct nvm_dev *dev) { int ret; - ret = nvm_init(dev); - if (ret) - goto err_init; + if (!dev->q || !dev->ops) + return -EINVAL; if (dev->ops->max_phys_sect > 256) { pr_info("nvm: max sectors supported is 256.\n"); - ret = -EINVAL; - goto err_init; + return -EINVAL; } if (dev->ops->max_phys_sect > 1) { dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist"); if (!dev->dma_pool) { pr_err("nvm: could not create dma pool\n"); - ret = -ENOMEM; - goto err_init; + return -ENOMEM; } } - if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) { - ret = nvm_get_sysblock(dev, &dev->sb); - if (!ret) - pr_err("nvm: device not initialized.\n"); - else if (ret < 0) - pr_err("nvm: err (%d) on device initialization\n", ret); - } + ret = nvm_init(dev); + if (ret) + goto err_init; /* register device with a supported media manager */ down_write(&nvm_lock); - if (ret > 0) - dev->mt = nvm_init_mgr(dev); list_add(&dev->devices, &nvm_devices); up_write(&nvm_lock); return 0; err_init: - kfree(dev->lun_map); + dev->ops->destroy_dma_pool(dev->dma_pool); return ret; } EXPORT_SYMBOL(nvm_register); void nvm_unregister(struct nvm_dev *dev) { + struct nvm_target *t, *tmp; + + mutex_lock(&dev->mlock); + list_for_each_entry_safe(t, tmp, &dev->targets, list) { + if (t->dev->parent != dev) + continue; + __nvm_remove_target(t); + } + mutex_unlock(&dev->mlock); + down_write(&nvm_lock); list_del(&dev->devices); up_write(&nvm_lock); @@ -844,24 +1096,24 @@ static int __nvm_configure_create(struct nvm_ioctl_create *create) return -EINVAL; } - if (!dev->mt) { - pr_info("nvm: device has no media manager registered.\n"); - return -ENODEV; - } - if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) { pr_err("nvm: config type not valid\n"); return -EINVAL; } s = &create->conf.s; - if (s->lun_begin > s->lun_end || s->lun_end > dev->geo.nr_luns) { + if (s->lun_begin == -1 && s->lun_end == -1) { + s->lun_begin = 0; + s->lun_end = dev->geo.nr_luns - 1; + } + + if (s->lun_begin > s->lun_end || s->lun_end >= dev->geo.nr_luns) { pr_err("nvm: lun out of bound (%u:%u > %u)\n", - s->lun_begin, s->lun_end, dev->geo.nr_luns); + s->lun_begin, s->lun_end, dev->geo.nr_luns - 1); return -EINVAL; } - return dev->mt->create_tgt(dev, create); + return nvm_create_tgt(dev, create); } static long nvm_ioctl_info(struct file *file, void __user *arg) @@ -923,16 +1175,14 @@ static long nvm_ioctl_get_devices(struct file *file, void __user *arg) struct nvm_ioctl_device_info *info = &devices->info[i]; sprintf(info->devname, "%s", dev->name); - if (dev->mt) { - info->bmversion[0] = dev->mt->version[0]; - info->bmversion[1] = dev->mt->version[1]; - info->bmversion[2] = dev->mt->version[2]; - sprintf(info->bmname, "%s", dev->mt->name); - } else { - sprintf(info->bmname, "none"); - } + /* kept for compatibility */ + info->bmversion[0] = 1; + info->bmversion[1] = 0; + info->bmversion[2] = 0; + sprintf(info->bmname, "%s", "gennvm"); i++; + if (i > 31) { pr_err("nvm: max 31 devices can be reported.\n"); break; @@ -994,7 +1244,7 @@ static long nvm_ioctl_dev_remove(struct file *file, void __user *arg) } list_for_each_entry(dev, &nvm_devices, devices) { - ret = dev->mt->remove_tgt(dev, &remove); + ret = nvm_remove_tgt(dev, &remove); if (!ret) break; } @@ -1002,47 +1252,7 @@ static long nvm_ioctl_dev_remove(struct file *file, void __user *arg) return ret; } -static void nvm_setup_nvm_sb_info(struct nvm_sb_info *info) -{ - info->seqnr = 1; - info->erase_cnt = 0; - info->version = 1; -} - -static long __nvm_ioctl_dev_init(struct nvm_ioctl_dev_init *init) -{ - struct nvm_dev *dev; - struct nvm_sb_info info; - int ret; - - down_write(&nvm_lock); - dev = nvm_find_nvm_dev(init->dev); - up_write(&nvm_lock); - if (!dev) { - pr_err("nvm: device not found\n"); - return -EINVAL; - } - - nvm_setup_nvm_sb_info(&info); - - strncpy(info.mmtype, init->mmtype, NVM_MMTYPE_LEN); - info.fs_ppa.ppa = -1; - - if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) { - ret = nvm_init_sysblock(dev, &info); - if (ret) - return ret; - } - - memcpy(&dev->sb, &info, sizeof(struct nvm_sb_info)); - - down_write(&nvm_lock); - dev->mt = nvm_init_mgr(dev); - up_write(&nvm_lock); - - return 0; -} - +/* kept for compatibility reasons */ static long nvm_ioctl_dev_init(struct file *file, void __user *arg) { struct nvm_ioctl_dev_init init; @@ -1058,15 +1268,13 @@ static long nvm_ioctl_dev_init(struct file *file, void __user *arg) return -EINVAL; } - init.dev[DISK_NAME_LEN - 1] = '\0'; - - return __nvm_ioctl_dev_init(&init); + return 0; } +/* Kept for compatibility reasons */ static long nvm_ioctl_dev_factory(struct file *file, void __user *arg) { struct nvm_ioctl_dev_factory fact; - struct nvm_dev *dev; if (!capable(CAP_SYS_ADMIN)) return -EPERM; @@ -1079,19 +1287,6 @@ static long nvm_ioctl_dev_factory(struct file *file, void __user *arg) if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1)) return -EINVAL; - down_write(&nvm_lock); - dev = nvm_find_nvm_dev(fact.dev); - up_write(&nvm_lock); - if (!dev) { - pr_err("nvm: device not found\n"); - return -EINVAL; - } - - nvm_free_mgr(dev); - - if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) - return nvm_dev_factory(dev, fact.flags); - return 0; } diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c deleted file mode 100644 index ca7880082d80..000000000000 --- a/drivers/lightnvm/gennvm.c +++ /dev/null @@ -1,657 +0,0 @@ -/* - * Copyright (C) 2015 Matias Bjorling <m@bjorling.me> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; see the file COPYING. If not, write to - * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, - * USA. - * - * Implementation of a general nvm manager for Open-Channel SSDs. - */ - -#include "gennvm.h" - -static struct nvm_target *gen_find_target(struct gen_dev *gn, const char *name) -{ - struct nvm_target *tgt; - - list_for_each_entry(tgt, &gn->targets, list) - if (!strcmp(name, tgt->disk->disk_name)) - return tgt; - - return NULL; -} - -static const struct block_device_operations gen_fops = { - .owner = THIS_MODULE, -}; - -static int gen_reserve_luns(struct nvm_dev *dev, struct nvm_target *t, - int lun_begin, int lun_end) -{ - int i; - - for (i = lun_begin; i <= lun_end; i++) { - if (test_and_set_bit(i, dev->lun_map)) { - pr_err("nvm: lun %d already allocated\n", i); - goto err; - } - } - - return 0; - -err: - while (--i > lun_begin) - clear_bit(i, dev->lun_map); - - return -EBUSY; -} - -static void gen_release_luns_err(struct nvm_dev *dev, int lun_begin, - int lun_end) -{ - int i; - - for (i = lun_begin; i <= lun_end; i++) - WARN_ON(!test_and_clear_bit(i, dev->lun_map)); -} - -static void gen_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev) -{ - struct nvm_dev *dev = tgt_dev->parent; - struct gen_dev_map *dev_map = tgt_dev->map; - int i, j; - - for (i = 0; i < dev_map->nr_chnls; i++) { - struct gen_ch_map *ch_map = &dev_map->chnls[i]; - int *lun_offs = ch_map->lun_offs; - int ch = i + ch_map->ch_off; - - for (j = 0; j < ch_map->nr_luns; j++) { - int lun = j + lun_offs[j]; - int lunid = (ch * dev->geo.luns_per_chnl) + lun; - - WARN_ON(!test_and_clear_bit(lunid, dev->lun_map)); - } - - kfree(ch_map->lun_offs); - } - - kfree(dev_map->chnls); - kfree(dev_map); - kfree(tgt_dev->luns); - kfree(tgt_dev); -} - -static struct nvm_tgt_dev *gen_create_tgt_dev(struct nvm_dev *dev, - int lun_begin, int lun_end) -{ - struct nvm_tgt_dev *tgt_dev = NULL; - struct gen_dev_map *dev_rmap = dev->rmap; - struct gen_dev_map *dev_map; - struct ppa_addr *luns; - int nr_luns = lun_end - lun_begin + 1; - int luns_left = nr_luns; - int nr_chnls = nr_luns / dev->geo.luns_per_chnl; - int nr_chnls_mod = nr_luns % dev->geo.luns_per_chnl; - int bch = lun_begin / dev->geo.luns_per_chnl; - int blun = lun_begin % dev->geo.luns_per_chnl; - int lunid = 0; - int lun_balanced = 1; - int prev_nr_luns; - int i, j; - - nr_chnls = nr_luns / dev->geo.luns_per_chnl; - nr_chnls = (nr_chnls_mod == 0) ? nr_chnls : nr_chnls + 1; - - dev_map = kmalloc(sizeof(struct gen_dev_map), GFP_KERNEL); - if (!dev_map) - goto err_dev; - - dev_map->chnls = kcalloc(nr_chnls, sizeof(struct gen_ch_map), - GFP_KERNEL); - if (!dev_map->chnls) - goto err_chnls; - - luns = kcalloc(nr_luns, sizeof(struct ppa_addr), GFP_KERNEL); - if (!luns) - goto err_luns; - - prev_nr_luns = (luns_left > dev->geo.luns_per_chnl) ? - dev->geo.luns_per_chnl : luns_left; - for (i = 0; i < nr_chnls; i++) { - struct gen_ch_map *ch_rmap = &dev_rmap->chnls[i + bch]; - int *lun_roffs = ch_rmap->lun_offs; - struct gen_ch_map *ch_map = &dev_map->chnls[i]; - int *lun_offs; - int luns_in_chnl = (luns_left > dev->geo.luns_per_chnl) ? - dev->geo.luns_per_chnl : luns_left; - - if (lun_balanced && prev_nr_luns != luns_in_chnl) - lun_balanced = 0; - - ch_map->ch_off = ch_rmap->ch_off = bch; - ch_map->nr_luns = luns_in_chnl; - - lun_offs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL); - if (!lun_offs) - goto err_ch; - - for (j = 0; j < luns_in_chnl; j++) { - luns[lunid].ppa = 0; - luns[lunid].g.ch = i; - luns[lunid++].g.lun = j; - - lun_offs[j] = blun; - lun_roffs[j + blun] = blun; - } - - ch_map->lun_offs = lun_offs; - - /* when starting a new channel, lun offset is reset */ - blun = 0; - luns_left -= luns_in_chnl; - } - - dev_map->nr_chnls = nr_chnls; - - tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL); - if (!tgt_dev) - goto err_ch; - - memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo)); - /* Target device only owns a portion of the physical device */ - tgt_dev->geo.nr_chnls = nr_chnls; - tgt_dev->geo.nr_luns = nr_luns; - tgt_dev->geo.luns_per_chnl = (lun_balanced) ? prev_nr_luns : -1; - tgt_dev->total_secs = nr_luns * tgt_dev->geo.sec_per_lun; - tgt_dev->q = dev->q; - tgt_dev->map = dev_map; - tgt_dev->luns = luns; - memcpy(&tgt_dev->identity, &dev->identity, sizeof(struct nvm_id)); - - tgt_dev->parent = dev; - - return tgt_dev; -err_ch: - while (--i > 0) - kfree(dev_map->chnls[i].lun_offs); - kfree(luns); -err_luns: - kfree(dev_map->chnls); -err_chnls: - kfree(dev_map); -err_dev: - return tgt_dev; -} - -static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create) -{ - struct gen_dev *gn = dev->mp; - struct nvm_ioctl_create_simple *s = &create->conf.s; - struct request_queue *tqueue; - struct gendisk *tdisk; - struct nvm_tgt_type *tt; - struct nvm_target *t; - struct nvm_tgt_dev *tgt_dev; - void *targetdata; - - tt = nvm_find_target_type(create->tgttype, 1); - if (!tt) { - pr_err("nvm: target type %s not found\n", create->tgttype); - return -EINVAL; - } - - mutex_lock(&gn->lock); - t = gen_find_target(gn, create->tgtname); - if (t) { - pr_err("nvm: target name already exists.\n"); - mutex_unlock(&gn->lock); - return -EINVAL; - } - mutex_unlock(&gn->lock); - - t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL); - if (!t) - return -ENOMEM; - - if (gen_reserve_luns(dev, t, s->lun_begin, s->lun_end)) - goto err_t; - - tgt_dev = gen_create_tgt_dev(dev, s->lun_begin, s->lun_end); - if (!tgt_dev) { - pr_err("nvm: could not create target device\n"); - goto err_reserve; - } - - tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node); - if (!tqueue) - goto err_dev; - blk_queue_make_request(tqueue, tt->make_rq); - - tdisk = alloc_disk(0); - if (!tdisk) - goto err_queue; - - sprintf(tdisk->disk_name, "%s", create->tgtname); - tdisk->flags = GENHD_FL_EXT_DEVT; - tdisk->major = 0; - tdisk->first_minor = 0; - tdisk->fops = &gen_fops; - tdisk->queue = tqueue; - - targetdata = tt->init(tgt_dev, tdisk); - if (IS_ERR(targetdata)) - goto err_init; - - tdisk->private_data = targetdata; - tqueue->queuedata = targetdata; - - blk_queue_max_hw_sectors(tqueue, 8 * dev->ops->max_phys_sect); - - set_capacity(tdisk, tt->capacity(targetdata)); - add_disk(tdisk); - - t->type = tt; - t->disk = tdisk; - t->dev = tgt_dev; - - mutex_lock(&gn->lock); - list_add_tail(&t->list, &gn->targets); - mutex_unlock(&gn->lock); - - return 0; -err_init: - put_disk(tdisk); -err_queue: - blk_cleanup_queue(tqueue); -err_dev: - kfree(tgt_dev); -err_reserve: - gen_release_luns_err(dev, s->lun_begin, s->lun_end); -err_t: - kfree(t); - return -ENOMEM; -} - -static void __gen_remove_target(struct nvm_target *t) -{ - struct nvm_tgt_type *tt = t->type; - struct gendisk *tdisk = t->disk; - struct request_queue *q = tdisk->queue; - - del_gendisk(tdisk); - blk_cleanup_queue(q); - - if (tt->exit) - tt->exit(tdisk->private_data); - - gen_remove_tgt_dev(t->dev); - put_disk(tdisk); - - list_del(&t->list); - kfree(t); -} - -/** - * gen_remove_tgt - Removes a target from the media manager - * @dev: device - * @remove: ioctl structure with target name to remove. - * - * Returns: - * 0: on success - * 1: on not found - * <0: on error - */ -static int gen_remove_tgt(struct nvm_dev *dev, struct nvm_ioctl_remove *remove) -{ - struct gen_dev *gn = dev->mp; - struct nvm_target *t; - - if (!gn) - return 1; - - mutex_lock(&gn->lock); - t = gen_find_target(gn, remove->tgtname); - if (!t) { - mutex_unlock(&gn->lock); - return 1; - } - __gen_remove_target(t); - mutex_unlock(&gn->lock); - - return 0; -} - -static int gen_get_area(struct nvm_dev *dev, sector_t *lba, sector_t len) -{ - struct nvm_geo *geo = &dev->geo; - struct gen_dev *gn = dev->mp; - struct gen_area *area, *prev, *next; - sector_t begin = 0; - sector_t max_sectors = (geo->sec_size * dev->total_secs) >> 9; - - if (len > max_sectors) - return -EINVAL; - - area = kmalloc(sizeof(struct gen_area), GFP_KERNEL); - if (!area) - return -ENOMEM; - - prev = NULL; - - spin_lock(&dev->lock); - list_for_each_entry(next, &gn->area_list, list) { - if (begin + len > next->begin) { - begin = next->end; - prev = next; - continue; - } - break; - } - - if ((begin + len) > max_sectors) { - spin_unlock(&dev->lock); - kfree(area); - return -EINVAL; - } - - area->begin = *lba = begin; - area->end = begin + len; - - if (prev) /* insert into sorted order */ - list_add(&area->list, &prev->list); - else - list_add(&area->list, &gn->area_list); - spin_unlock(&dev->lock); - - return 0; -} - -static void gen_put_area(struct nvm_dev *dev, sector_t begin) -{ - struct gen_dev *gn = dev->mp; - struct gen_area *area; - - spin_lock(&dev->lock); - list_for_each_entry(area, &gn->area_list, list) { - if (area->begin != begin) - continue; - - list_del(&area->list); - spin_unlock(&dev->lock); - kfree(area); - return; - } - spin_unlock(&dev->lock); -} - -static void gen_free(struct nvm_dev *dev) -{ - kfree(dev->mp); - kfree(dev->rmap); - dev->mp = NULL; -} - -static int gen_register(struct nvm_dev *dev) -{ - struct gen_dev *gn; - struct gen_dev_map *dev_rmap; - int i, j; - - if (!try_module_get(THIS_MODULE)) - return -ENODEV; - - gn = kzalloc(sizeof(struct gen_dev), GFP_KERNEL); - if (!gn) - goto err_gn; - - dev_rmap = kmalloc(sizeof(struct gen_dev_map), GFP_KERNEL); - if (!dev_rmap) - goto err_rmap; - - dev_rmap->chnls = kcalloc(dev->geo.nr_chnls, sizeof(struct gen_ch_map), - GFP_KERNEL); - if (!dev_rmap->chnls) - goto err_chnls; - - for (i = 0; i < dev->geo.nr_chnls; i++) { - struct gen_ch_map *ch_rmap; - int *lun_roffs; - int luns_in_chnl = dev->geo.luns_per_chnl; - - ch_rmap = &dev_rmap->chnls[i]; - - ch_rmap->ch_off = -1; - ch_rmap->nr_luns = luns_in_chnl; - - lun_roffs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL); - if (!lun_roffs) - goto err_ch; - - for (j = 0; j < luns_in_chnl; j++) - lun_roffs[j] = -1; - - ch_rmap->lun_offs = lun_roffs; - } - - gn->dev = dev; - gn->nr_luns = dev->geo.nr_luns; - INIT_LIST_HEAD(&gn->area_list); - mutex_init(&gn->lock); - INIT_LIST_HEAD(&gn->targets); - dev->mp = gn; - dev->rmap = dev_rmap; - - return 1; -err_ch: - while (--i >= 0) - kfree(dev_rmap->chnls[i].lun_offs); -err_chnls: - kfree(dev_rmap); -err_rmap: - gen_free(dev); -err_gn: - module_put(THIS_MODULE); - return -ENOMEM; -} - -static void gen_unregister(struct nvm_dev *dev) -{ - struct gen_dev *gn = dev->mp; - struct nvm_target *t, *tmp; - - mutex_lock(&gn->lock); - list_for_each_entry_safe(t, tmp, &gn->targets, list) { - if (t->dev->parent != dev) - continue; - __gen_remove_target(t); - } - mutex_unlock(&gn->lock); - - gen_free(dev); - module_put(THIS_MODULE); -} - -static int gen_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p) -{ - struct gen_dev_map *dev_map = tgt_dev->map; - struct gen_ch_map *ch_map = &dev_map->chnls[p->g.ch]; - int lun_off = ch_map->lun_offs[p->g.lun]; - struct nvm_dev *dev = tgt_dev->parent; - struct gen_dev_map *dev_rmap = dev->rmap; - struct gen_ch_map *ch_rmap; - int lun_roff; - - p->g.ch += ch_map->ch_off; - p->g.lun += lun_off; - - ch_rmap = &dev_rmap->chnls[p->g.ch]; - lun_roff = ch_rmap->lun_offs[p->g.lun]; - - if (unlikely(ch_rmap->ch_off < 0 || lun_roff < 0)) { - pr_err("nvm: corrupted device partition table\n"); - return -EINVAL; - } - - return 0; -} - -static int gen_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p) -{ - struct nvm_dev *dev = tgt_dev->parent; - struct gen_dev_map *dev_rmap = dev->rmap; - struct gen_ch_map *ch_rmap = &dev_rmap->chnls[p->g.ch]; - int lun_roff = ch_rmap->lun_offs[p->g.lun]; - - p->g.ch -= ch_rmap->ch_off; - p->g.lun -= lun_roff; - - return 0; -} - -static int gen_trans_rq(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd, - int flag) -{ - gen_trans_fn *f; - int i; - int ret = 0; - - f = (flag == TRANS_TGT_TO_DEV) ? gen_map_to_dev : gen_map_to_tgt; - - if (rqd->nr_ppas == 1) - return f(tgt_dev, &rqd->ppa_addr); - - for (i = 0; i < rqd->nr_ppas; i++) { - ret = f(tgt_dev, &rqd->ppa_list[i]); - if (ret) - goto out; - } - -out: - return ret; -} - -static void gen_end_io(struct nvm_rq *rqd) -{ - struct nvm_tgt_dev *tgt_dev = rqd->dev; - struct nvm_tgt_instance *ins = rqd->ins; - - /* Convert address space */ - if (tgt_dev) - gen_trans_rq(tgt_dev, rqd, TRANS_DEV_TO_TGT); - - ins->tt->end_io(rqd); -} - -static int gen_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd) -{ - struct nvm_dev *dev = tgt_dev->parent; - - if (!dev->ops->submit_io) - return -ENODEV; - - /* Convert address space */ - gen_trans_rq(tgt_dev, rqd, TRANS_TGT_TO_DEV); - nvm_generic_to_addr_mode(dev, rqd); - - rqd->dev = tgt_dev; - rqd->end_io = gen_end_io; - return dev->ops->submit_io(dev, rqd); -} - -static int gen_erase_blk(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p, - int flags) -{ - /* Convert address space */ - gen_map_to_dev(tgt_dev, p); - - return nvm_erase_ppa(tgt_dev->parent, p, 1, flags); -} - -static struct ppa_addr gen_trans_ppa(struct nvm_tgt_dev *tgt_dev, - struct ppa_addr p, int direction) -{ - gen_trans_fn *f; - struct ppa_addr ppa = p; - - f = (direction == TRANS_TGT_TO_DEV) ? gen_map_to_dev : gen_map_to_tgt; - f(tgt_dev, &ppa); - - return ppa; -} - -static void gen_part_to_tgt(struct nvm_dev *dev, sector_t *entries, - int len) -{ - struct nvm_geo *geo = &dev->geo; - struct gen_dev_map *dev_rmap = dev->rmap; - u64 i; - - for (i = 0; i < len; i++) { - struct gen_ch_map *ch_rmap; - int *lun_roffs; - struct ppa_addr gaddr; - u64 pba = le64_to_cpu(entries[i]); - int off; - u64 diff; - - if (!pba) - continue; - - gaddr = linear_to_generic_addr(geo, pba); - ch_rmap = &dev_rmap->chnls[gaddr.g.ch]; - lun_roffs = ch_rmap->lun_offs; - - off = gaddr.g.ch * geo->luns_per_chnl + gaddr.g.lun; - - diff = ((ch_rmap->ch_off * geo->luns_per_chnl) + - (lun_roffs[gaddr.g.lun])) * geo->sec_per_lun; - - entries[i] -= cpu_to_le64(diff); - } -} - -static struct nvmm_type gen = { - .name = "gennvm", - .version = {0, 1, 0}, - - .register_mgr = gen_register, - .unregister_mgr = gen_unregister, - - .create_tgt = gen_create_tgt, - .remove_tgt = gen_remove_tgt, - - .submit_io = gen_submit_io, - .erase_blk = gen_erase_blk, - - .get_area = gen_get_area, - .put_area = gen_put_area, - - .trans_ppa = gen_trans_ppa, - .part_to_tgt = gen_part_to_tgt, -}; - -static int __init gen_module_init(void) -{ - return nvm_register_mgr(&gen); -} - -static void gen_module_exit(void) -{ - nvm_unregister_mgr(&gen); -} - -module_init(gen_module_init); -module_exit(gen_module_exit); -MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("General media manager for Open-Channel SSDs"); diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h deleted file mode 100644 index 6a4b3f368848..000000000000 --- a/drivers/lightnvm/gennvm.h +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright: Matias Bjorling <mb@bjorling.me> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - */ - -#ifndef GENNVM_H_ -#define GENNVM_H_ - -#include <linux/module.h> -#include <linux/vmalloc.h> - -#include <linux/lightnvm.h> - -struct gen_dev { - struct nvm_dev *dev; - - int nr_luns; - struct list_head area_list; - - struct mutex lock; - struct list_head targets; -}; - -/* Map between virtual and physical channel and lun */ -struct gen_ch_map { - int ch_off; - int nr_luns; - int *lun_offs; -}; - -struct gen_dev_map { - struct gen_ch_map *chnls; - int nr_chnls; -}; - -struct gen_area { - struct list_head list; - sector_t begin; - sector_t end; /* end is excluded */ -}; - -static inline void *ch_map_to_lun_offs(struct gen_ch_map *ch_map) -{ - return ch_map + 1; -} - -typedef int (gen_trans_fn)(struct nvm_tgt_dev *, struct ppa_addr *); - -#define gen_for_each_lun(bm, lun, i) \ - for ((i) = 0, lun = &(bm)->luns[0]; \ - (i) < (bm)->nr_luns; (i)++, lun = &(bm)->luns[(i)]) - -#endif /* GENNVM_H_ */ diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c index 9fb7de395915..e00b1d7b976f 100644 --- a/drivers/lightnvm/rrpc.c +++ b/drivers/lightnvm/rrpc.c @@ -779,7 +779,7 @@ static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd, static void rrpc_end_io(struct nvm_rq *rqd) { - struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance); + struct rrpc *rrpc = rqd->private; struct nvm_tgt_dev *dev = rrpc->dev; struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd); uint8_t npages = rqd->nr_ppas; @@ -972,8 +972,9 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio, bio_get(bio); rqd->bio = bio; - rqd->ins = &rrpc->instance; + rqd->private = rrpc; rqd->nr_ppas = nr_pages; + rqd->end_io = rrpc_end_io; rrq->flags = flags; err = nvm_submit_io(dev, rqd); @@ -1532,7 +1533,6 @@ static void *rrpc_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk) if (!rrpc) return ERR_PTR(-ENOMEM); - rrpc->instance.tt = &tt_rrpc; rrpc->dev = dev; rrpc->disk = tdisk; @@ -1611,7 +1611,6 @@ static struct nvm_tgt_type tt_rrpc = { .make_rq = rrpc_make_rq, .capacity = rrpc_capacity, - .end_io = rrpc_end_io, .init = rrpc_init, .exit = rrpc_exit, diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h index 94e4d73116b2..fdb6ff902903 100644 --- a/drivers/lightnvm/rrpc.h +++ b/drivers/lightnvm/rrpc.h @@ -102,9 +102,6 @@ struct rrpc_lun { }; struct rrpc { - /* instance must be kept in top to resolve rrpc in unprep */ - struct nvm_tgt_instance instance; - struct nvm_tgt_dev *dev; struct gendisk *disk; diff --git a/drivers/lightnvm/sysblk.c b/drivers/lightnvm/sysblk.c deleted file mode 100644 index 12002bf4efc2..000000000000 --- a/drivers/lightnvm/sysblk.c +++ /dev/null @@ -1,733 +0,0 @@ -/* - * Copyright (C) 2015 Matias Bjorling. All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; see the file COPYING. If not, write to - * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, - * USA. - * - */ - -#include <linux/lightnvm.h> - -#define MAX_SYSBLKS 3 /* remember to update mapping scheme on change */ -#define MAX_BLKS_PR_SYSBLK 2 /* 2 blks with 256 pages and 3000 erases - * enables ~1.5M updates per sysblk unit - */ - -struct sysblk_scan { - /* A row is a collection of flash blocks for a system block. */ - int nr_rows; - int row; - int act_blk[MAX_SYSBLKS]; - - int nr_ppas; - struct ppa_addr ppas[MAX_SYSBLKS * MAX_BLKS_PR_SYSBLK];/* all sysblks */ -}; - -static inline int scan_ppa_idx(int row, int blkid) -{ - return (row * MAX_BLKS_PR_SYSBLK) + blkid; -} - -static void nvm_sysblk_to_cpu(struct nvm_sb_info *info, - struct nvm_system_block *sb) -{ - info->seqnr = be32_to_cpu(sb->seqnr); - info->erase_cnt = be32_to_cpu(sb->erase_cnt); - info->version = be16_to_cpu(sb->version); - strncpy(info->mmtype, sb->mmtype, NVM_MMTYPE_LEN); - info->fs_ppa.ppa = be64_to_cpu(sb->fs_ppa); -} - -static void nvm_cpu_to_sysblk(struct nvm_system_block *sb, - struct nvm_sb_info *info) -{ - sb->magic = cpu_to_be32(NVM_SYSBLK_MAGIC); - sb->seqnr = cpu_to_be32(info->seqnr); - sb->erase_cnt = cpu_to_be32(info->erase_cnt); - sb->version = cpu_to_be16(info->version); - strncpy(sb->mmtype, info->mmtype, NVM_MMTYPE_LEN); - sb->fs_ppa = cpu_to_be64(info->fs_ppa.ppa); -} - -static int nvm_setup_sysblks(struct nvm_dev *dev, struct ppa_addr *sysblk_ppas) -{ - struct nvm_geo *geo = &dev->geo; - int nr_rows = min_t(int, MAX_SYSBLKS, geo->nr_chnls); - int i; - - for (i = 0; i < nr_rows; i++) - sysblk_ppas[i].ppa = 0; - - /* if possible, place sysblk at first channel, middle channel and last - * channel of the device. If not, create only one or two sys blocks - */ - switch (geo->nr_chnls) { - case 2: - sysblk_ppas[1].g.ch = 1; - /* fall-through */ - case 1: - sysblk_ppas[0].g.ch = 0; - break; - default: - sysblk_ppas[0].g.ch = 0; - sysblk_ppas[1].g.ch = geo->nr_chnls / 2; - sysblk_ppas[2].g.ch = geo->nr_chnls - 1; - break; - } - - return nr_rows; -} - -static void nvm_setup_sysblk_scan(struct nvm_dev *dev, struct sysblk_scan *s, - struct ppa_addr *sysblk_ppas) -{ - memset(s, 0, sizeof(struct sysblk_scan)); - s->nr_rows = nvm_setup_sysblks(dev, sysblk_ppas); -} - -static int sysblk_get_free_blks(struct nvm_dev *dev, struct ppa_addr ppa, - u8 *blks, int nr_blks, - struct sysblk_scan *s) -{ - struct ppa_addr *sppa; - int i, blkid = 0; - - nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks); - if (nr_blks < 0) - return nr_blks; - - for (i = 0; i < nr_blks; i++) { - if (blks[i] == NVM_BLK_T_HOST) - return -EEXIST; - - if (blks[i] != NVM_BLK_T_FREE) - continue; - - sppa = &s->ppas[scan_ppa_idx(s->row, blkid)]; - sppa->g.ch = ppa.g.ch; - sppa->g.lun = ppa.g.lun; - sppa->g.blk = i; - s->nr_ppas++; - blkid++; - - pr_debug("nvm: use (%u %u %u) as sysblk\n", - sppa->g.ch, sppa->g.lun, sppa->g.blk); - if (blkid > MAX_BLKS_PR_SYSBLK - 1) - return 0; - } - - pr_err("nvm: sysblk failed get sysblk\n"); - return -EINVAL; -} - -static int sysblk_get_host_blks(struct nvm_dev *dev, struct ppa_addr ppa, - u8 *blks, int nr_blks, - struct sysblk_scan *s) -{ - int i, nr_sysblk = 0; - - nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks); - if (nr_blks < 0) - return nr_blks; - - for (i = 0; i < nr_blks; i++) { - if (blks[i] != NVM_BLK_T_HOST) - continue; - - if (s->nr_ppas == MAX_BLKS_PR_SYSBLK * MAX_SYSBLKS) { - pr_err("nvm: too many host blks\n"); - return -EINVAL; - } - - ppa.g.blk = i; - - s->ppas[scan_ppa_idx(s->row, nr_sysblk)] = ppa; - s->nr_ppas++; - nr_sysblk++; - } - - return 0; -} - -static int nvm_get_all_sysblks(struct nvm_dev *dev, struct sysblk_scan *s, - struct ppa_addr *ppas, int get_free) -{ - struct nvm_geo *geo = &dev->geo; - int i, nr_blks, ret = 0; - u8 *blks; - - s->nr_ppas = 0; - nr_blks = geo->blks_per_lun * geo->plane_mode; - - blks = kmalloc(nr_blks, GFP_KERNEL); - if (!blks) - return -ENOMEM; - - for (i = 0; i < s->nr_rows; i++) { - s->row = i; - - ret = nvm_get_bb_tbl(dev, ppas[i], blks); - if (ret) { - pr_err("nvm: failed bb tbl for ppa (%u %u)\n", - ppas[i].g.ch, - ppas[i].g.blk); - goto err_get; - } - - if (get_free) - ret = sysblk_get_free_blks(dev, ppas[i], blks, nr_blks, - s); - else - ret = sysblk_get_host_blks(dev, ppas[i], blks, nr_blks, - s); - - if (ret) - goto err_get; - } - -err_get: - kfree(blks); - return ret; -} - -/* - * scans a block for latest sysblk. - * Returns: - * 0 - newer sysblk not found. PPA is updated to latest page. - * 1 - newer sysblk found and stored in *cur. PPA is updated to - * next valid page. - * <0- error. - */ -static int nvm_scan_block(struct nvm_dev *dev, struct ppa_addr *ppa, - struct nvm_system_block *sblk) -{ - struct nvm_geo *geo = &dev->geo; - struct nvm_system_block *cur; - int pg, ret, found = 0; - - /* the full buffer for a flash page is allocated. Only the first of it - * contains the system block information - */ - cur = kmalloc(geo->pfpg_size, GFP_KERNEL); - if (!cur) - return -ENOMEM; - - /* perform linear scan through the block */ - for (pg = 0; pg < dev->lps_per_blk; pg++) { - ppa->g.pg = ppa_to_slc(dev, pg); - - ret = nvm_submit_ppa(dev, ppa, 1, NVM_OP_PREAD, NVM_IO_SLC_MODE, - cur, geo->pfpg_size); - if (ret) { - if (ret == NVM_RSP_ERR_EMPTYPAGE) { - pr_debug("nvm: sysblk scan empty ppa (%u %u %u %u)\n", - ppa->g.ch, - ppa->g.lun, - ppa->g.blk, - ppa->g.pg); - break; - } - pr_err("nvm: read failed (%x) for ppa (%u %u %u %u)", - ret, - ppa->g.ch, - ppa->g.lun, - ppa->g.blk, - ppa->g.pg); - break; /* if we can't read a page, continue to the - * next blk - */ - } - - if (be32_to_cpu(cur->magic) != NVM_SYSBLK_MAGIC) { - pr_debug("nvm: scan break for ppa (%u %u %u %u)\n", - ppa->g.ch, - ppa->g.lun, - ppa->g.blk, - ppa->g.pg); - break; /* last valid page already found */ - } - - if (be32_to_cpu(cur->seqnr) < be32_to_cpu(sblk->seqnr)) - continue; - - memcpy(sblk, cur, sizeof(struct nvm_system_block)); - found = 1; - } - - kfree(cur); - - return found; -} - -static int nvm_sysblk_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s, - int type) -{ - return nvm_set_bb_tbl(dev, s->ppas, s->nr_ppas, type); -} - -static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info, - struct sysblk_scan *s) -{ - struct nvm_geo *geo = &dev->geo; - struct nvm_system_block nvmsb; - void *buf; - int i, sect, ret = 0; - struct ppa_addr *ppas; - - nvm_cpu_to_sysblk(&nvmsb, info); - - buf = kzalloc(geo->pfpg_size, GFP_KERNEL); - if (!buf) - return -ENOMEM; - memcpy(buf, &nvmsb, sizeof(struct nvm_system_block)); - - ppas = kcalloc(geo->sec_per_pg, sizeof(struct ppa_addr), GFP_KERNEL); - if (!ppas) { - ret = -ENOMEM; - goto err; - } - - /* Write and verify */ - for (i = 0; i < s->nr_rows; i++) { - ppas[0] = s->ppas[scan_ppa_idx(i, s->act_blk[i])]; - - pr_debug("nvm: writing sysblk to ppa (%u %u %u %u)\n", - ppas[0].g.ch, - ppas[0].g.lun, - ppas[0].g.blk, - ppas[0].g.pg); - - /* Expand to all sectors within a flash page */ - if (geo->sec_per_pg > 1) { - for (sect = 1; sect < geo->sec_per_pg; sect++) { - ppas[sect].ppa = ppas[0].ppa; - ppas[sect].g.sec = sect; - } - } - - ret = nvm_submit_ppa(dev, ppas, geo->sec_per_pg, NVM_OP_PWRITE, - NVM_IO_SLC_MODE, buf, geo->pfpg_size); - if (ret) { - pr_err("nvm: sysblk failed program (%u %u %u)\n", - ppas[0].g.ch, - ppas[0].g.lun, - ppas[0].g.blk); - break; - } - - ret = nvm_submit_ppa(dev, ppas, geo->sec_per_pg, NVM_OP_PREAD, - NVM_IO_SLC_MODE, buf, geo->pfpg_size); - if (ret) { - pr_err("nvm: sysblk failed read (%u %u %u)\n", - ppas[0].g.ch, - ppas[0].g.lun, - ppas[0].g.blk); - break; - } - - if (memcmp(buf, &nvmsb, sizeof(struct nvm_system_block))) { - pr_err("nvm: sysblk failed verify (%u %u %u)\n", - ppas[0].g.ch, - ppas[0].g.lun, - ppas[0].g.blk); - ret = -EINVAL; - break; - } - } - - kfree(ppas); -err: - kfree(buf); - - return ret; -} - -static int nvm_prepare_new_sysblks(struct nvm_dev *dev, struct sysblk_scan *s) -{ - int i, ret; - unsigned long nxt_blk; - struct ppa_addr *ppa; - - for (i = 0; i < s->nr_rows; i++) { - nxt_blk = (s->act_blk[i] + 1) % MAX_BLKS_PR_SYSBLK; - ppa = &s->ppas[scan_ppa_idx(i, nxt_blk)]; - ppa->g.pg = ppa_to_slc(dev, 0); - - ret = nvm_erase_ppa(dev, ppa, 1, 0); - if (ret) - return ret; - - s->act_blk[i] = nxt_blk; - } - - return 0; -} - -int nvm_get_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info) -{ - struct ppa_addr sysblk_ppas[MAX_SYSBLKS]; - struct sysblk_scan s; - struct nvm_system_block *cur; - int i, j, found = 0; - int ret = -ENOMEM; - - /* - * 1. setup sysblk locations - * 2. get bad block list - * 3. filter on host-specific (type 3) - * 4. iterate through all and find the highest seq nr. - * 5. return superblock information - */ - - if (!dev->ops->get_bb_tbl) - return -EINVAL; - - nvm_setup_sysblk_scan(dev, &s, sysblk_ppas); - - mutex_lock(&dev->mlock); - ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0); - if (ret) - goto err_sysblk; - - /* no sysblocks initialized */ - if (!s.nr_ppas) - goto err_sysblk; - - cur = kzalloc(sizeof(struct nvm_system_block), GFP_KERNEL); - if (!cur) - goto err_sysblk; - - /* find the latest block across all sysblocks */ - for (i = 0; i < s.nr_rows; i++) { - for (j = 0; j < MAX_BLKS_PR_SYSBLK; j++) { - struct ppa_addr ppa = s.ppas[scan_ppa_idx(i, j)]; - - ret = nvm_scan_block(dev, &ppa, cur); - if (ret > 0) - found = 1; - else if (ret < 0) - break; - } - } - - nvm_sysblk_to_cpu(info, cur); - - kfree(cur); -err_sysblk: - mutex_unlock(&dev->mlock); - - if (found) - return 1; - return ret; -} - -int nvm_update_sysblock(struct nvm_dev *dev, struct nvm_sb_info *new) -{ - /* 1. for each latest superblock - * 2. if room - * a. write new flash page entry with the updated information - * 3. if no room - * a. find next available block on lun (linear search) - * if none, continue to next lun - * if none at all, report error. also report that it wasn't - * possible to write to all superblocks. - * c. write data to block. - */ - struct ppa_addr sysblk_ppas[MAX_SYSBLKS]; - struct sysblk_scan s; - struct nvm_system_block *cur; - int i, j, ppaidx, found = 0; - int ret = -ENOMEM; - - if (!dev->ops->get_bb_tbl) - return -EINVAL; - - nvm_setup_sysblk_scan(dev, &s, sysblk_ppas); - - mutex_lock(&dev->mlock); - ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0); - if (ret) - goto err_sysblk; - - cur = kzalloc(sizeof(struct nvm_system_block), GFP_KERNEL); - if (!cur) - goto err_sysblk; - - /* Get the latest sysblk for each sysblk row */ - for (i = 0; i < s.nr_rows; i++) { - found = 0; - for (j = 0; j < MAX_BLKS_PR_SYSBLK; j++) { - ppaidx = scan_ppa_idx(i, j); - ret = nvm_scan_block(dev, &s.ppas[ppaidx], cur); - if (ret > 0) { - s.act_blk[i] = j; - found = 1; - } else if (ret < 0) - break; - } - } - - if (!found) { - pr_err("nvm: no valid sysblks found to update\n"); - ret = -EINVAL; - goto err_cur; - } - - /* - * All sysblocks found. Check that they have same page id in their flash - * blocks - */ - for (i = 1; i < s.nr_rows; i++) { - struct ppa_addr l = s.ppas[scan_ppa_idx(0, s.act_blk[0])]; - struct ppa_addr r = s.ppas[scan_ppa_idx(i, s.act_blk[i])]; - - if (l.g.pg != r.g.pg) { - pr_err("nvm: sysblks not on same page. Previous update failed.\n"); - ret = -EINVAL; - goto err_cur; - } - } - - /* - * Check that there haven't been another update to the seqnr since we - * began - */ - if ((new->seqnr - 1) != be32_to_cpu(cur->seqnr)) { - pr_err("nvm: seq is not sequential\n"); - ret = -EINVAL; - goto err_cur; - } - - /* - * When all pages in a block has been written, a new block is selected - * and writing is performed on the new block. - */ - if (s.ppas[scan_ppa_idx(0, s.act_blk[0])].g.pg == - dev->lps_per_blk - 1) { - ret = nvm_prepare_new_sysblks(dev, &s); - if (ret) - goto err_cur; - } - - ret = nvm_write_and_verify(dev, new, &s); -err_cur: - kfree(cur); -err_sysblk: - mutex_unlock(&dev->mlock); - - return ret; -} - -int nvm_init_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info) -{ - struct nvm_geo *geo = &dev->geo; - struct ppa_addr sysblk_ppas[MAX_SYSBLKS]; - struct sysblk_scan s; - int ret; - - /* - * 1. select master blocks and select first available blks - * 2. get bad block list - * 3. mark MAX_SYSBLKS block as host-based device allocated. - * 4. write and verify data to block - */ - - if (!dev->ops->get_bb_tbl || !dev->ops->set_bb_tbl) - return -EINVAL; - - if (!(geo->mccap & NVM_ID_CAP_SLC) || !dev->lps_per_blk) { - pr_err("nvm: memory does not support SLC access\n"); - return -EINVAL; - } - - /* Index all sysblocks and mark them as host-driven */ - nvm_setup_sysblk_scan(dev, &s, sysblk_ppas); - - mutex_lock(&dev->mlock); - ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 1); - if (ret) - goto err_mark; - - ret = nvm_sysblk_set_bb_tbl(dev, &s, NVM_BLK_T_HOST); - if (ret) - goto err_mark; - - /* Write to the first block of each row */ - ret = nvm_write_and_verify(dev, info, &s); -err_mark: - mutex_unlock(&dev->mlock); - return ret; -} - -static int factory_nblks(int nblks) -{ - /* Round up to nearest BITS_PER_LONG */ - return (nblks + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1); -} - -static unsigned int factory_blk_offset(struct nvm_geo *geo, struct ppa_addr ppa) -{ - int nblks = factory_nblks(geo->blks_per_lun); - - return ((ppa.g.ch * geo->luns_per_chnl * nblks) + (ppa.g.lun * nblks)) / - BITS_PER_LONG; -} - -static int nvm_factory_blks(struct nvm_dev *dev, struct ppa_addr ppa, - u8 *blks, int nr_blks, - unsigned long *blk_bitmap, int flags) -{ - int i, lunoff; - - nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks); - if (nr_blks < 0) - return nr_blks; - - lunoff = factory_blk_offset(&dev->geo, ppa); - - /* non-set bits correspond to the block must be erased */ - for (i = 0; i < nr_blks; i++) { - switch (blks[i]) { - case NVM_BLK_T_FREE: - if (flags & NVM_FACTORY_ERASE_ONLY_USER) - set_bit(i, &blk_bitmap[lunoff]); - break; - case NVM_BLK_T_HOST: - if (!(flags & NVM_FACTORY_RESET_HOST_BLKS)) - set_bit(i, &blk_bitmap[lunoff]); - break; - case NVM_BLK_T_GRWN_BAD: - if (!(flags & NVM_FACTORY_RESET_GRWN_BBLKS)) - set_bit(i, &blk_bitmap[lunoff]); - break; - default: - set_bit(i, &blk_bitmap[lunoff]); - break; - } - } - - return 0; -} - -static int nvm_fact_get_blks(struct nvm_dev *dev, struct ppa_addr *erase_list, - int max_ppas, unsigned long *blk_bitmap) -{ - struct nvm_geo *geo = &dev->geo; - struct ppa_addr ppa; - int ch, lun, blkid, idx, done = 0, ppa_cnt = 0; - unsigned long *offset; - - while (!done) { - done = 1; - nvm_for_each_lun_ppa(geo, ppa, ch, lun) { - idx = factory_blk_offset(geo, ppa); - offset = &blk_bitmap[idx]; - - blkid = find_first_zero_bit(offset, geo->blks_per_lun); - if (blkid >= geo->blks_per_lun) - continue; - set_bit(blkid, offset); - - ppa.g.blk = blkid; - pr_debug("nvm: erase ppa (%u %u %u)\n", - ppa.g.ch, - ppa.g.lun, - ppa.g.blk); - - erase_list[ppa_cnt] = ppa; - ppa_cnt++; - done = 0; - - if (ppa_cnt == max_ppas) - return ppa_cnt; - } - } - - return ppa_cnt; -} - -static int nvm_fact_select_blks(struct nvm_dev *dev, unsigned long *blk_bitmap, - int flags) -{ - struct nvm_geo *geo = &dev->geo; - struct ppa_addr ppa; - int ch, lun, nr_blks, ret = 0; - u8 *blks; - - nr_blks = geo->blks_per_lun * geo->plane_mode; - blks = kmalloc(nr_blks, GFP_KERNEL); - if (!blks) - return -ENOMEM; - - nvm_for_each_lun_ppa(geo, ppa, ch, lun) { - ret = nvm_get_bb_tbl(dev, ppa, blks); - if (ret) - pr_err("nvm: failed bb tbl for ch%u lun%u\n", - ppa.g.ch, ppa.g.blk); - - ret = nvm_factory_blks(dev, ppa, blks, nr_blks, blk_bitmap, - flags); - if (ret) - break; - } - - kfree(blks); - return ret; -} - -int nvm_dev_factory(struct nvm_dev *dev, int flags) -{ - struct nvm_geo *geo = &dev->geo; - struct ppa_addr *ppas; - int ppa_cnt, ret = -ENOMEM; - int max_ppas = dev->ops->max_phys_sect / geo->nr_planes; - struct ppa_addr sysblk_ppas[MAX_SYSBLKS]; - struct sysblk_scan s; - unsigned long *blk_bitmap; - - blk_bitmap = kzalloc(factory_nblks(geo->blks_per_lun) * geo->nr_luns, - GFP_KERNEL); - if (!blk_bitmap) - return ret; - - ppas = kcalloc(max_ppas, sizeof(struct ppa_addr), GFP_KERNEL); - if (!ppas) - goto err_blks; - - /* create list of blks to be erased */ - ret = nvm_fact_select_blks(dev, blk_bitmap, flags); - if (ret) - goto err_ppas; - - /* continue to erase until list of blks until empty */ - while ((ppa_cnt = - nvm_fact_get_blks(dev, ppas, max_ppas, blk_bitmap)) > 0) - nvm_erase_ppa(dev, ppas, ppa_cnt, 0); - - /* mark host reserved blocks free */ - if (flags & NVM_FACTORY_RESET_HOST_BLKS) { - nvm_setup_sysblk_scan(dev, &s, sysblk_ppas); - mutex_lock(&dev->mlock); - ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0); - if (!ret) - ret = nvm_sysblk_set_bb_tbl(dev, &s, NVM_BLK_T_FREE); - mutex_unlock(&dev->mlock); - } -err_ppas: - kfree(ppas); -err_blks: - kfree(blk_bitmap); - return ret; -} -EXPORT_SYMBOL(nvm_dev_factory); diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 76d20875503c..709c9cc34369 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -666,7 +666,7 @@ static inline struct search *search_alloc(struct bio *bio, s->iop.write_prio = 0; s->iop.error = 0; s->iop.flags = 0; - s->iop.flush_journal = (bio->bi_opf & (REQ_PREFLUSH|REQ_FUA)) != 0; + s->iop.flush_journal = op_is_flush(bio->bi_opf); s->iop.wq = bcache_wq; return s; @@ -1009,7 +1009,7 @@ static int cached_dev_congested(void *data, int bits) struct request_queue *q = bdev_get_queue(dc->bdev); int ret = 0; - if (bdi_congested(&q->backing_dev_info, bits)) + if (bdi_congested(q->backing_dev_info, bits)) return 1; if (cached_dev_get(dc)) { @@ -1018,7 +1018,7 @@ static int cached_dev_congested(void *data, int bits) for_each_cache(ca, d->c, i) { q = bdev_get_queue(ca->bdev); - ret |= bdi_congested(&q->backing_dev_info, bits); + ret |= bdi_congested(q->backing_dev_info, bits); } cached_dev_put(dc); @@ -1032,7 +1032,7 @@ void bch_cached_dev_request_init(struct cached_dev *dc) struct gendisk *g = dc->disk.disk; g->queue->make_request_fn = cached_dev_make_request; - g->queue->backing_dev_info.congested_fn = cached_dev_congested; + g->queue->backing_dev_info->congested_fn = cached_dev_congested; dc->disk.cache_miss = cached_dev_cache_miss; dc->disk.ioctl = cached_dev_ioctl; } @@ -1125,7 +1125,7 @@ static int flash_dev_congested(void *data, int bits) for_each_cache(ca, d->c, i) { q = bdev_get_queue(ca->bdev); - ret |= bdi_congested(&q->backing_dev_info, bits); + ret |= bdi_congested(q->backing_dev_info, bits); } return ret; @@ -1136,7 +1136,7 @@ void bch_flash_dev_request_init(struct bcache_device *d) struct gendisk *g = d->disk; g->queue->make_request_fn = flash_dev_make_request; - g->queue->backing_dev_info.congested_fn = flash_dev_congested; + g->queue->backing_dev_info->congested_fn = flash_dev_congested; d->cache_miss = flash_dev_cache_miss; d->ioctl = flash_dev_ioctl; } diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 3a19cbc8b230..85e3f21c2514 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -807,7 +807,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size, blk_queue_make_request(q, NULL); d->disk->queue = q; q->queuedata = d; - q->backing_dev_info.congested_data = d; + q->backing_dev_info->congested_data = d; q->limits.max_hw_sectors = UINT_MAX; q->limits.max_sectors = UINT_MAX; q->limits.max_segment_size = UINT_MAX; @@ -1132,9 +1132,9 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size) set_capacity(dc->disk.disk, dc->bdev->bd_part->nr_sects - dc->sb.data_offset); - dc->disk.disk->queue->backing_dev_info.ra_pages = - max(dc->disk.disk->queue->backing_dev_info.ra_pages, - q->backing_dev_info.ra_pages); + dc->disk.disk->queue->backing_dev_info->ra_pages = + max(dc->disk.disk->queue->backing_dev_info->ra_pages, + q->backing_dev_info->ra_pages); bch_cached_dev_request_init(dc); bch_cached_dev_writeback_init(dc); diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index e04c61e0839e..894bc14469c8 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -787,8 +787,7 @@ static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); spin_lock_irqsave(&cache->lock, flags); - if (cache->need_tick_bio && - !(bio->bi_opf & (REQ_FUA | REQ_PREFLUSH)) && + if (cache->need_tick_bio && !op_is_flush(bio->bi_opf) && bio_op(bio) != REQ_OP_DISCARD) { pb->tick = true; cache->need_tick_bio = false; @@ -828,11 +827,6 @@ static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio) return to_oblock(block_nr); } -static int bio_triggers_commit(struct cache *cache, struct bio *bio) -{ - return bio->bi_opf & (REQ_PREFLUSH | REQ_FUA); -} - /* * You must increment the deferred set whilst the prison cell is held. To * encourage this, we ask for 'cell' to be passed in. @@ -884,7 +878,7 @@ static void issue(struct cache *cache, struct bio *bio) { unsigned long flags; - if (!bio_triggers_commit(cache, bio)) { + if (!op_is_flush(bio->bi_opf)) { accounted_request(cache, bio); return; } @@ -1069,8 +1063,7 @@ static void dec_io_migrations(struct cache *cache) static bool discard_or_flush(struct bio *bio) { - return bio_op(bio) == REQ_OP_DISCARD || - bio->bi_opf & (REQ_PREFLUSH | REQ_FUA); + return bio_op(bio) == REQ_OP_DISCARD || op_is_flush(bio->bi_opf); } static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell) @@ -2291,7 +2284,7 @@ static void do_waker(struct work_struct *ws) static int is_congested(struct dm_dev *dev, int bdi_bits) { struct request_queue *q = bdev_get_queue(dev->bdev); - return bdi_congested(&q->backing_dev_info, bdi_bits); + return bdi_congested(q->backing_dev_info, bdi_bits); } static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits) diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index 40ceba1fe8be..136fda3ff9e5 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -92,7 +92,6 @@ struct mapped_device { * io objects are allocated from here. */ mempool_t *io_pool; - mempool_t *rq_pool; struct bio_set *bs; diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c index bf2b2676cb8a..9fab33b113c4 100644 --- a/drivers/md/dm-era-target.c +++ b/drivers/md/dm-era-target.c @@ -1379,7 +1379,7 @@ static void stop_worker(struct era *era) static int dev_is_congested(struct dm_dev *dev, int bdi_bits) { struct request_queue *q = bdev_get_queue(dev->bdev); - return bdi_congested(&q->backing_dev_info, bdi_bits); + return bdi_congested(q->backing_dev_info, bdi_bits); } static int era_is_congested(struct dm_target_callbacks *cb, int bdi_bits) diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 3570bcb7a4a4..7f223dbed49f 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -92,12 +92,6 @@ struct multipath { unsigned queue_mode; - /* - * We must use a mempool of dm_mpath_io structs so that we - * can resubmit bios on error. - */ - mempool_t *mpio_pool; - struct mutex work_mutex; struct work_struct trigger_event; @@ -115,8 +109,6 @@ struct dm_mpath_io { typedef int (*action_fn) (struct pgpath *pgpath); -static struct kmem_cache *_mpio_cache; - static struct workqueue_struct *kmultipathd, *kmpath_handlerd; static void trigger_event(struct work_struct *work); static void activate_path(struct work_struct *work); @@ -209,7 +201,6 @@ static struct multipath *alloc_multipath(struct dm_target *ti) init_waitqueue_head(&m->pg_init_wait); mutex_init(&m->work_mutex); - m->mpio_pool = NULL; m->queue_mode = DM_TYPE_NONE; m->ti = ti; @@ -229,16 +220,7 @@ static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m) m->queue_mode = DM_TYPE_MQ_REQUEST_BASED; else m->queue_mode = DM_TYPE_REQUEST_BASED; - } - - if (m->queue_mode == DM_TYPE_REQUEST_BASED) { - unsigned min_ios = dm_get_reserved_rq_based_ios(); - - m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache); - if (!m->mpio_pool) - return -ENOMEM; - } - else if (m->queue_mode == DM_TYPE_BIO_BASED) { + } else if (m->queue_mode == DM_TYPE_BIO_BASED) { INIT_WORK(&m->process_queued_bios, process_queued_bios); /* * bio-based doesn't support any direct scsi_dh management; @@ -263,7 +245,6 @@ static void free_multipath(struct multipath *m) kfree(m->hw_handler_name); kfree(m->hw_handler_params); - mempool_destroy(m->mpio_pool); kfree(m); } @@ -272,38 +253,6 @@ static struct dm_mpath_io *get_mpio(union map_info *info) return info->ptr; } -static struct dm_mpath_io *set_mpio(struct multipath *m, union map_info *info) -{ - struct dm_mpath_io *mpio; - - if (!m->mpio_pool) { - /* Use blk-mq pdu memory requested via per_io_data_size */ - mpio = get_mpio(info); - memset(mpio, 0, sizeof(*mpio)); - return mpio; - } - - mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC); - if (!mpio) - return NULL; - - memset(mpio, 0, sizeof(*mpio)); - info->ptr = mpio; - - return mpio; -} - -static void clear_request_fn_mpio(struct multipath *m, union map_info *info) -{ - /* Only needed for non blk-mq (.request_fn) multipath */ - if (m->mpio_pool) { - struct dm_mpath_io *mpio = info->ptr; - - info->ptr = NULL; - mempool_free(mpio, m->mpio_pool); - } -} - static size_t multipath_per_bio_data_size(void) { return sizeof(struct dm_mpath_io) + sizeof(struct dm_bio_details); @@ -530,16 +479,17 @@ static bool must_push_back_bio(struct multipath *m) /* * Map cloned requests (request-based multipath) */ -static int __multipath_map(struct dm_target *ti, struct request *clone, - union map_info *map_context, - struct request *rq, struct request **__clone) +static int multipath_clone_and_map(struct dm_target *ti, struct request *rq, + union map_info *map_context, + struct request **__clone) { struct multipath *m = ti->private; int r = DM_MAPIO_REQUEUE; - size_t nr_bytes = clone ? blk_rq_bytes(clone) : blk_rq_bytes(rq); + size_t nr_bytes = blk_rq_bytes(rq); struct pgpath *pgpath; struct block_device *bdev; - struct dm_mpath_io *mpio; + struct dm_mpath_io *mpio = get_mpio(map_context); + struct request *clone; /* Do we need to select a new pgpath? */ pgpath = lockless_dereference(m->current_pgpath); @@ -556,42 +506,23 @@ static int __multipath_map(struct dm_target *ti, struct request *clone, return r; } - mpio = set_mpio(m, map_context); - if (!mpio) - /* ENOMEM, requeue */ - return r; - + memset(mpio, 0, sizeof(*mpio)); mpio->pgpath = pgpath; mpio->nr_bytes = nr_bytes; bdev = pgpath->path.dev->bdev; - if (clone) { - /* - * Old request-based interface: allocated clone is passed in. - * Used by: .request_fn stacked on .request_fn path(s). - */ - clone->q = bdev_get_queue(bdev); - clone->rq_disk = bdev->bd_disk; - clone->cmd_flags |= REQ_FAILFAST_TRANSPORT; - } else { - /* - * blk-mq request-based interface; used by both: - * .request_fn stacked on blk-mq path(s) and - * blk-mq stacked on blk-mq path(s). - */ - clone = blk_mq_alloc_request(bdev_get_queue(bdev), - rq_data_dir(rq), BLK_MQ_REQ_NOWAIT); - if (IS_ERR(clone)) { - /* EBUSY, ENODEV or EWOULDBLOCK: requeue */ - clear_request_fn_mpio(m, map_context); - return r; - } - clone->bio = clone->biotail = NULL; - clone->rq_disk = bdev->bd_disk; - clone->cmd_flags |= REQ_FAILFAST_TRANSPORT; - *__clone = clone; + clone = blk_get_request(bdev_get_queue(bdev), + rq->cmd_flags | REQ_NOMERGE, + GFP_ATOMIC); + if (IS_ERR(clone)) { + /* EBUSY, ENODEV or EWOULDBLOCK: requeue */ + return r; } + clone->bio = clone->biotail = NULL; + clone->rq_disk = bdev->bd_disk; + clone->cmd_flags |= REQ_FAILFAST_TRANSPORT; + *__clone = clone; if (pgpath->pg->ps.type->start_io) pgpath->pg->ps.type->start_io(&pgpath->pg->ps, @@ -600,22 +531,9 @@ static int __multipath_map(struct dm_target *ti, struct request *clone, return DM_MAPIO_REMAPPED; } -static int multipath_map(struct dm_target *ti, struct request *clone, - union map_info *map_context) -{ - return __multipath_map(ti, clone, map_context, NULL, NULL); -} - -static int multipath_clone_and_map(struct dm_target *ti, struct request *rq, - union map_info *map_context, - struct request **clone) -{ - return __multipath_map(ti, NULL, map_context, rq, clone); -} - static void multipath_release_clone(struct request *clone) { - blk_mq_free_request(clone); + blk_put_request(clone); } /* @@ -1187,7 +1105,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv) ti->num_write_same_bios = 1; if (m->queue_mode == DM_TYPE_BIO_BASED) ti->per_io_data_size = multipath_per_bio_data_size(); - else if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED) + else ti->per_io_data_size = sizeof(struct dm_mpath_io); return 0; @@ -1610,7 +1528,6 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone, if (ps->type->end_io) ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes); } - clear_request_fn_mpio(m, map_context); return r; } @@ -2060,7 +1977,6 @@ static struct target_type multipath_target = { .module = THIS_MODULE, .ctr = multipath_ctr, .dtr = multipath_dtr, - .map_rq = multipath_map, .clone_and_map_rq = multipath_clone_and_map, .release_clone_rq = multipath_release_clone, .rq_end_io = multipath_end_io, @@ -2080,11 +1996,6 @@ static int __init dm_multipath_init(void) { int r; - /* allocate a slab for the dm_mpath_ios */ - _mpio_cache = KMEM_CACHE(dm_mpath_io, 0); - if (!_mpio_cache) - return -ENOMEM; - r = dm_register_target(&multipath_target); if (r < 0) { DMERR("request-based register failed %d", r); @@ -2120,8 +2031,6 @@ bad_alloc_kmpath_handlerd: bad_alloc_kmultipathd: dm_unregister_target(&multipath_target); bad_register_target: - kmem_cache_destroy(_mpio_cache); - return r; } @@ -2131,7 +2040,6 @@ static void __exit dm_multipath_exit(void) destroy_workqueue(kmultipathd); dm_unregister_target(&multipath_target); - kmem_cache_destroy(_mpio_cache); } module_init(dm_multipath_init); diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 6e702fc69a83..67d76f21fecd 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -109,28 +109,6 @@ void dm_stop_queue(struct request_queue *q) dm_mq_stop_queue(q); } -static struct dm_rq_target_io *alloc_old_rq_tio(struct mapped_device *md, - gfp_t gfp_mask) -{ - return mempool_alloc(md->io_pool, gfp_mask); -} - -static void free_old_rq_tio(struct dm_rq_target_io *tio) -{ - mempool_free(tio, tio->md->io_pool); -} - -static struct request *alloc_old_clone_request(struct mapped_device *md, - gfp_t gfp_mask) -{ - return mempool_alloc(md->rq_pool, gfp_mask); -} - -static void free_old_clone_request(struct mapped_device *md, struct request *rq) -{ - mempool_free(rq, md->rq_pool); -} - /* * Partial completion handling for request-based dm */ @@ -185,7 +163,7 @@ static void end_clone_bio(struct bio *clone) static struct dm_rq_target_io *tio_from_request(struct request *rq) { - return (rq->q->mq_ops ? blk_mq_rq_to_pdu(rq) : rq->special); + return blk_mq_rq_to_pdu(rq); } static void rq_end_stats(struct mapped_device *md, struct request *orig) @@ -233,31 +211,6 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue) dm_put(md); } -static void free_rq_clone(struct request *clone) -{ - struct dm_rq_target_io *tio = clone->end_io_data; - struct mapped_device *md = tio->md; - - blk_rq_unprep_clone(clone); - - /* - * It is possible for a clone_old_rq() allocated clone to - * get passed in -- it may not yet have a request_queue. - * This is known to occur if the error target replaces - * a multipath target that has a request_fn queue stacked - * on blk-mq queue(s). - */ - if (clone->q && clone->q->mq_ops) - /* stacked on blk-mq queue(s) */ - tio->ti->type->release_clone_rq(clone); - else if (!md->queue->mq_ops) - /* request_fn queue stacked on request_fn queue(s) */ - free_old_clone_request(md, clone); - - if (!md->queue->mq_ops) - free_old_rq_tio(tio); -} - /* * Complete the clone and the original request. * Must be called without clone's queue lock held, @@ -270,20 +223,9 @@ static void dm_end_request(struct request *clone, int error) struct mapped_device *md = tio->md; struct request *rq = tio->orig; - if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { - rq->errors = clone->errors; - rq->resid_len = clone->resid_len; - - if (rq->sense) - /* - * We are using the sense buffer of the original - * request. - * So setting the length of the sense data is enough. - */ - rq->sense_len = clone->sense_len; - } + blk_rq_unprep_clone(clone); + tio->ti->type->release_clone_rq(clone); - free_rq_clone(clone); rq_end_stats(md, rq); if (!rq->q->mq_ops) blk_end_request_all(rq, error); @@ -292,22 +234,6 @@ static void dm_end_request(struct request *clone, int error) rq_completed(md, rw, true); } -static void dm_unprep_request(struct request *rq) -{ - struct dm_rq_target_io *tio = tio_from_request(rq); - struct request *clone = tio->clone; - - if (!rq->q->mq_ops) { - rq->special = NULL; - rq->rq_flags &= ~RQF_DONTPREP; - } - - if (clone) - free_rq_clone(clone); - else if (!tio->md->queue->mq_ops) - free_old_rq_tio(tio); -} - /* * Requeue the original request of a clone. */ @@ -346,7 +272,10 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_ int rw = rq_data_dir(rq); rq_end_stats(md, rq); - dm_unprep_request(rq); + if (tio->clone) { + blk_rq_unprep_clone(tio->clone); + tio->ti->type->release_clone_rq(tio->clone); + } if (!rq->q->mq_ops) dm_old_requeue_request(rq); @@ -401,14 +330,11 @@ static void dm_softirq_done(struct request *rq) if (!clone) { rq_end_stats(tio->md, rq); rw = rq_data_dir(rq); - if (!rq->q->mq_ops) { + if (!rq->q->mq_ops) blk_end_request_all(rq, tio->error); - rq_completed(tio->md, rw, false); - free_old_rq_tio(tio); - } else { + else blk_mq_end_request(rq, tio->error); - rq_completed(tio->md, rw, false); - } + rq_completed(tio->md, rw, false); return; } @@ -452,16 +378,6 @@ static void end_clone_request(struct request *clone, int error) { struct dm_rq_target_io *tio = clone->end_io_data; - if (!clone->q->mq_ops) { - /* - * For just cleaning up the information of the queue in which - * the clone was dispatched. - * The clone is *NOT* freed actually here because it is alloced - * from dm own mempool (RQF_ALLOCED isn't set). - */ - __blk_put_request(clone->q, clone); - } - /* * Actual request completion is done in a softirq context which doesn't * hold the clone's queue lock. Otherwise, deadlock could occur because: @@ -511,9 +427,6 @@ static int setup_clone(struct request *clone, struct request *rq, if (r) return r; - clone->cmd = rq->cmd; - clone->cmd_len = rq->cmd_len; - clone->sense = rq->sense; clone->end_io = end_clone_request; clone->end_io_data = tio; @@ -522,28 +435,6 @@ static int setup_clone(struct request *clone, struct request *rq, return 0; } -static struct request *clone_old_rq(struct request *rq, struct mapped_device *md, - struct dm_rq_target_io *tio, gfp_t gfp_mask) -{ - /* - * Create clone for use with .request_fn request_queue - */ - struct request *clone; - - clone = alloc_old_clone_request(md, gfp_mask); - if (!clone) - return NULL; - - blk_rq_init(NULL, clone); - if (setup_clone(clone, rq, tio, gfp_mask)) { - /* -ENOMEM */ - free_old_clone_request(md, clone); - return NULL; - } - - return clone; -} - static void map_tio_request(struct kthread_work *work); static void init_tio(struct dm_rq_target_io *tio, struct request *rq, @@ -565,60 +456,6 @@ static void init_tio(struct dm_rq_target_io *tio, struct request *rq, kthread_init_work(&tio->work, map_tio_request); } -static struct dm_rq_target_io *dm_old_prep_tio(struct request *rq, - struct mapped_device *md, - gfp_t gfp_mask) -{ - struct dm_rq_target_io *tio; - int srcu_idx; - struct dm_table *table; - - tio = alloc_old_rq_tio(md, gfp_mask); - if (!tio) - return NULL; - - init_tio(tio, rq, md); - - table = dm_get_live_table(md, &srcu_idx); - /* - * Must clone a request if this .request_fn DM device - * is stacked on .request_fn device(s). - */ - if (!dm_table_all_blk_mq_devices(table)) { - if (!clone_old_rq(rq, md, tio, gfp_mask)) { - dm_put_live_table(md, srcu_idx); - free_old_rq_tio(tio); - return NULL; - } - } - dm_put_live_table(md, srcu_idx); - - return tio; -} - -/* - * Called with the queue lock held. - */ -static int dm_old_prep_fn(struct request_queue *q, struct request *rq) -{ - struct mapped_device *md = q->queuedata; - struct dm_rq_target_io *tio; - - if (unlikely(rq->special)) { - DMWARN("Already has something in rq->special."); - return BLKPREP_KILL; - } - - tio = dm_old_prep_tio(rq, md, GFP_ATOMIC); - if (!tio) - return BLKPREP_DEFER; - - rq->special = tio; - rq->rq_flags |= RQF_DONTPREP; - - return BLKPREP_OK; -} - /* * Returns: * DM_MAPIO_* : the request has been processed as indicated @@ -633,31 +470,18 @@ static int map_request(struct dm_rq_target_io *tio) struct request *rq = tio->orig; struct request *clone = NULL; - if (tio->clone) { - clone = tio->clone; - r = ti->type->map_rq(ti, clone, &tio->info); - if (r == DM_MAPIO_DELAY_REQUEUE) - return DM_MAPIO_REQUEUE; /* .request_fn requeue is always immediate */ - } else { - r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone); - if (r < 0) { - /* The target wants to complete the I/O */ - dm_kill_unmapped_request(rq, r); - return r; - } - if (r == DM_MAPIO_REMAPPED && - setup_clone(clone, rq, tio, GFP_ATOMIC)) { - /* -ENOMEM */ - ti->type->release_clone_rq(clone); - return DM_MAPIO_REQUEUE; - } - } - + r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone); switch (r) { case DM_MAPIO_SUBMITTED: /* The target has taken the I/O to submit by itself later */ break; case DM_MAPIO_REMAPPED: + if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { + /* -ENOMEM */ + ti->type->release_clone_rq(clone); + return DM_MAPIO_REQUEUE; + } + /* The target has remapped the I/O so dispatch it */ trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), blk_rq_pos(rq)); @@ -716,6 +540,29 @@ static void dm_start_request(struct mapped_device *md, struct request *orig) dm_get(md); } +static int __dm_rq_init_rq(struct mapped_device *md, struct request *rq) +{ + struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq); + + /* + * Must initialize md member of tio, otherwise it won't + * be available in dm_mq_queue_rq. + */ + tio->md = md; + + if (md->init_tio_pdu) { + /* target-specific per-io data is immediately after the tio */ + tio->info.ptr = tio + 1; + } + + return 0; +} + +static int dm_rq_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp) +{ + return __dm_rq_init_rq(q->rq_alloc_data, rq); +} + static void map_tio_request(struct kthread_work *work) { struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work); @@ -814,6 +661,7 @@ static void dm_old_request_fn(struct request_queue *q) dm_start_request(md, rq); tio = tio_from_request(rq); + init_tio(tio, rq, md); /* Establish tio->ti before queuing work (map_tio_request) */ tio->ti = ti; kthread_queue_work(&md->kworker, &tio->work); @@ -824,10 +672,23 @@ static void dm_old_request_fn(struct request_queue *q) /* * Fully initialize a .request_fn request-based queue. */ -int dm_old_init_request_queue(struct mapped_device *md) +int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t) { + struct dm_target *immutable_tgt; + /* Fully initialize the queue */ - if (!blk_init_allocated_queue(md->queue, dm_old_request_fn, NULL)) + md->queue->cmd_size = sizeof(struct dm_rq_target_io); + md->queue->rq_alloc_data = md; + md->queue->request_fn = dm_old_request_fn; + md->queue->init_rq_fn = dm_rq_init_rq; + + immutable_tgt = dm_table_get_immutable_target(t); + if (immutable_tgt && immutable_tgt->per_io_data_size) { + /* any target-specific per-io data is immediately after the tio */ + md->queue->cmd_size += immutable_tgt->per_io_data_size; + md->init_tio_pdu = true; + } + if (blk_init_allocated_queue(md->queue) < 0) return -EINVAL; /* disable dm_old_request_fn's merge heuristic by default */ @@ -835,7 +696,6 @@ int dm_old_init_request_queue(struct mapped_device *md) dm_init_normal_md_queue(md); blk_queue_softirq_done(md->queue, dm_softirq_done); - blk_queue_prep_rq(md->queue, dm_old_prep_fn); /* Initialize the request-based DM worker thread */ kthread_init_worker(&md->kworker); @@ -856,21 +716,7 @@ static int dm_mq_init_request(void *data, struct request *rq, unsigned int hctx_idx, unsigned int request_idx, unsigned int numa_node) { - struct mapped_device *md = data; - struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq); - - /* - * Must initialize md member of tio, otherwise it won't - * be available in dm_mq_queue_rq. - */ - tio->md = md; - - if (md->init_tio_pdu) { - /* target-specific per-io data is immediately after the tio */ - tio->info.ptr = tio + 1; - } - - return 0; + return __dm_rq_init_rq(data, rq); } static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, diff --git a/drivers/md/dm-rq.h b/drivers/md/dm-rq.h index 4da06cae7bad..f0020d21b95f 100644 --- a/drivers/md/dm-rq.h +++ b/drivers/md/dm-rq.h @@ -48,7 +48,7 @@ struct dm_rq_clone_bio_info { bool dm_use_blk_mq_default(void); bool dm_use_blk_mq(struct mapped_device *md); -int dm_old_init_request_queue(struct mapped_device *md); +int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t); int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t); void dm_mq_cleanup_mapped_device(struct mapped_device *md); diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 0a427de23ed2..3ad16d9c9d5a 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1750,7 +1750,7 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits) char b[BDEVNAME_SIZE]; if (likely(q)) - r |= bdi_congested(&q->backing_dev_info, bdi_bits); + r |= bdi_congested(q->backing_dev_info, bdi_bits); else DMWARN_LIMIT("%s: any_congested: nonexistent device %s", dm_device_name(t->md), diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c index 710ae28fd618..43d3445b121d 100644 --- a/drivers/md/dm-target.c +++ b/drivers/md/dm-target.c @@ -131,12 +131,6 @@ static int io_err_map(struct dm_target *tt, struct bio *bio) return -EIO; } -static int io_err_map_rq(struct dm_target *ti, struct request *clone, - union map_info *map_context) -{ - return -EIO; -} - static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq, union map_info *map_context, struct request **clone) @@ -161,7 +155,6 @@ static struct target_type error_target = { .ctr = io_err_ctr, .dtr = io_err_dtr, .map = io_err_map, - .map_rq = io_err_map_rq, .clone_and_map_rq = io_err_clone_and_map_rq, .release_clone_rq = io_err_release_clone_rq, .direct_access = io_err_direct_access, diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index d1c05c12a9db..2b266a2b5035 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -699,7 +699,7 @@ static void remap_to_origin(struct thin_c *tc, struct bio *bio) static int bio_triggers_commit(struct thin_c *tc, struct bio *bio) { - return (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) && + return op_is_flush(bio->bi_opf) && dm_thin_changed_this_transaction(tc->td); } @@ -870,8 +870,7 @@ static void __inc_remap_and_issue_cell(void *context, struct bio *bio; while ((bio = bio_list_pop(&cell->bios))) { - if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) || - bio_op(bio) == REQ_OP_DISCARD) + if (op_is_flush(bio->bi_opf) || bio_op(bio) == REQ_OP_DISCARD) bio_list_add(&info->defer_bios, bio); else { inc_all_io_entry(info->tc->pool, bio); @@ -1716,9 +1715,8 @@ static void __remap_and_issue_shared_cell(void *context, struct bio *bio; while ((bio = bio_list_pop(&cell->bios))) { - if ((bio_data_dir(bio) == WRITE) || - (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) || - bio_op(bio) == REQ_OP_DISCARD)) + if (bio_data_dir(bio) == WRITE || op_is_flush(bio->bi_opf) || + bio_op(bio) == REQ_OP_DISCARD) bio_list_add(&info->defer_bios, bio); else { struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));; @@ -2635,8 +2633,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio) return DM_MAPIO_SUBMITTED; } - if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) || - bio_op(bio) == REQ_OP_DISCARD) { + if (op_is_flush(bio->bi_opf) || bio_op(bio) == REQ_OP_DISCARD) { thin_defer_bio_with_throttle(tc, bio); return DM_MAPIO_SUBMITTED; } @@ -2714,7 +2711,7 @@ static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits) return 1; q = bdev_get_queue(pt->data_dev->bdev); - return bdi_congested(&q->backing_dev_info, bdi_bits); + return bdi_congested(q->backing_dev_info, bdi_bits); } static void requeue_bios(struct pool *pool) diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 3086da5664f3..5bd9ab06a562 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -91,7 +91,6 @@ static int dm_numa_node = DM_NUMA_NODE; */ struct dm_md_mempools { mempool_t *io_pool; - mempool_t *rq_pool; struct bio_set *bs; }; @@ -466,13 +465,16 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, if (r > 0) { /* - * Target determined this ioctl is being issued against - * a logical partition of the parent bdev; so extra - * validation is needed. + * Target determined this ioctl is being issued against a + * subset of the parent bdev; require extra privileges. */ - r = scsi_verify_blk_ioctl(NULL, cmd); - if (r) + if (!capable(CAP_SYS_RAWIO)) { + DMWARN_LIMIT( + "%s: sending ioctl %x to DM device without required privilege.", + current->comm, cmd); + r = -ENOIOCTLCMD; goto out; + } } r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); @@ -1314,7 +1316,7 @@ static int dm_any_congested(void *congested_data, int bdi_bits) * With request-based DM we only need to check the * top-level queue for congestion. */ - r = md->queue->backing_dev_info.wb.state & bdi_bits; + r = md->queue->backing_dev_info->wb.state & bdi_bits; } else { map = dm_get_live_table_fast(md); if (map) @@ -1397,7 +1399,7 @@ void dm_init_md_queue(struct mapped_device *md) * - must do so here (in alloc_dev callchain) before queue is used */ md->queue->queuedata = md; - md->queue->backing_dev_info.congested_data = md; + md->queue->backing_dev_info->congested_data = md; } void dm_init_normal_md_queue(struct mapped_device *md) @@ -1408,7 +1410,7 @@ void dm_init_normal_md_queue(struct mapped_device *md) /* * Initialize aspects of queue that aren't relevant for blk-mq */ - md->queue->backing_dev_info.congested_fn = dm_any_congested; + md->queue->backing_dev_info->congested_fn = dm_any_congested; blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); } @@ -1419,7 +1421,6 @@ static void cleanup_mapped_device(struct mapped_device *md) if (md->kworker_task) kthread_stop(md->kworker_task); mempool_destroy(md->io_pool); - mempool_destroy(md->rq_pool); if (md->bs) bioset_free(md->bs); @@ -1595,12 +1596,10 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t) goto out; } - BUG_ON(!p || md->io_pool || md->rq_pool || md->bs); + BUG_ON(!p || md->io_pool || md->bs); md->io_pool = p->io_pool; p->io_pool = NULL; - md->rq_pool = p->rq_pool; - p->rq_pool = NULL; md->bs = p->bs; p->bs = NULL; @@ -1777,7 +1776,7 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) switch (type) { case DM_TYPE_REQUEST_BASED: - r = dm_old_init_request_queue(md); + r = dm_old_init_request_queue(md, t); if (r) { DMERR("Cannot initialize queue for request-based mapped device"); return r; @@ -2493,7 +2492,6 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t unsigned integrity, unsigned per_io_data_size) { struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); - struct kmem_cache *cachep = NULL; unsigned int pool_size = 0; unsigned int front_pad; @@ -2503,20 +2501,16 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t switch (type) { case DM_TYPE_BIO_BASED: case DM_TYPE_DAX_BIO_BASED: - cachep = _io_cache; pool_size = dm_get_reserved_bio_based_ios(); front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); + + pools->io_pool = mempool_create_slab_pool(pool_size, _io_cache); + if (!pools->io_pool) + goto out; break; case DM_TYPE_REQUEST_BASED: - cachep = _rq_tio_cache; - pool_size = dm_get_reserved_rq_based_ios(); - pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache); - if (!pools->rq_pool) - goto out; - /* fall through to setup remaining rq-based pools */ case DM_TYPE_MQ_REQUEST_BASED: - if (!pool_size) - pool_size = dm_get_reserved_rq_based_ios(); + pool_size = dm_get_reserved_rq_based_ios(); front_pad = offsetof(struct dm_rq_clone_bio_info, clone); /* per_io_data_size is used for blk-mq pdu at queue allocation */ break; @@ -2524,12 +2518,6 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t BUG(); } - if (cachep) { - pools->io_pool = mempool_create_slab_pool(pool_size, cachep); - if (!pools->io_pool) - goto out; - } - pools->bs = bioset_create_nobvec(pool_size, front_pad); if (!pools->bs) goto out; @@ -2551,7 +2539,6 @@ void dm_free_md_mempools(struct dm_md_mempools *pools) return; mempool_destroy(pools->io_pool); - mempool_destroy(pools->rq_pool); if (pools->bs) bioset_free(pools->bs); diff --git a/drivers/md/dm.h b/drivers/md/dm.h index f0aad08b9654..f298b01f7ab3 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -95,8 +95,7 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t); /* * To check whether the target type is request-based or not (bio-based). */ -#define dm_target_request_based(t) (((t)->type->map_rq != NULL) || \ - ((t)->type->clone_and_map_rq != NULL)) +#define dm_target_request_based(t) ((t)->type->clone_and_map_rq != NULL) /* * To check whether the target type is a hybrid (capable of being diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 5975c9915684..f1c7bbac31a5 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c @@ -62,7 +62,7 @@ static int linear_congested(struct mddev *mddev, int bits) for (i = 0; i < mddev->raid_disks && !ret ; i++) { struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev); - ret |= bdi_congested(&q->backing_dev_info, bits); + ret |= bdi_congested(q->backing_dev_info, bits); } return ret; diff --git a/drivers/md/md.c b/drivers/md/md.c index 01175dac0db6..ba485dcf1064 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -5346,8 +5346,8 @@ int md_run(struct mddev *mddev) queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mddev->queue); else queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, mddev->queue); - mddev->queue->backing_dev_info.congested_data = mddev; - mddev->queue->backing_dev_info.congested_fn = md_congested; + mddev->queue->backing_dev_info->congested_data = mddev; + mddev->queue->backing_dev_info->congested_fn = md_congested; } if (pers->sync_request) { if (mddev->kobj.sd && @@ -5704,7 +5704,7 @@ static int do_md_stop(struct mddev *mddev, int mode, __md_stop_writes(mddev); __md_stop(mddev); - mddev->queue->backing_dev_info.congested_fn = NULL; + mddev->queue->backing_dev_info->congested_fn = NULL; /* tell userspace to handle 'inactive' */ sysfs_notify_dirent_safe(mddev->sysfs_state); diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index aa8c4e5c1ee2..d457afa672d5 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -169,7 +169,7 @@ static int multipath_congested(struct mddev *mddev, int bits) if (rdev && !test_bit(Faulty, &rdev->flags)) { struct request_queue *q = bdev_get_queue(rdev->bdev); - ret |= bdi_congested(&q->backing_dev_info, bits); + ret |= bdi_congested(q->backing_dev_info, bits); /* Just like multipath_map, we just check the * first available device */ diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 848365d474f3..d6585239bff2 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -41,7 +41,7 @@ static int raid0_congested(struct mddev *mddev, int bits) for (i = 0; i < raid_disks && !ret ; i++) { struct request_queue *q = bdev_get_queue(devlist[i]->bdev); - ret |= bdi_congested(&q->backing_dev_info, bits); + ret |= bdi_congested(q->backing_dev_info, bits); } return ret; } @@ -420,8 +420,8 @@ static int raid0_run(struct mddev *mddev) */ int stripe = mddev->raid_disks * (mddev->chunk_sectors << 9) / PAGE_SIZE; - if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) - mddev->queue->backing_dev_info.ra_pages = 2* stripe; + if (mddev->queue->backing_dev_info->ra_pages < 2* stripe) + mddev->queue->backing_dev_info->ra_pages = 2* stripe; } dump_zones(mddev); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 7b0f647bcccb..830ff2b20346 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -744,9 +744,9 @@ static int raid1_congested(struct mddev *mddev, int bits) * non-congested targets, it can be removed */ if ((bits & (1 << WB_async_congested)) || 1) - ret |= bdi_congested(&q->backing_dev_info, bits); + ret |= bdi_congested(q->backing_dev_info, bits); else - ret &= bdi_congested(&q->backing_dev_info, bits); + ret &= bdi_congested(q->backing_dev_info, bits); } } rcu_read_unlock(); @@ -1170,10 +1170,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, int i, disks; struct bitmap *bitmap = mddev->bitmap; unsigned long flags; - const int op = bio_op(bio); - const unsigned long do_sync = (bio->bi_opf & REQ_SYNC); - const unsigned long do_flush_fua = (bio->bi_opf & - (REQ_PREFLUSH | REQ_FUA)); struct md_rdev *blocked_rdev; struct blk_plug_cb *cb; struct raid1_plug_cb *plug = NULL; @@ -1389,7 +1385,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, conf->mirrors[i].rdev->data_offset); mbio->bi_bdev = conf->mirrors[i].rdev->bdev; mbio->bi_end_io = raid1_end_write_request; - bio_set_op_attrs(mbio, op, do_flush_fua | do_sync); + mbio->bi_opf = bio_op(bio) | + (bio->bi_opf & (REQ_SYNC | REQ_PREFLUSH | REQ_FUA)); if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) && !test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) && conf->raid_disks - mddev->degraded > 1) diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 1920756828df..6bc5c2a85160 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -860,7 +860,7 @@ static int raid10_congested(struct mddev *mddev, int bits) if (rdev && !test_bit(Faulty, &rdev->flags)) { struct request_queue *q = bdev_get_queue(rdev->bdev); - ret |= bdi_congested(&q->backing_dev_info, bits); + ret |= bdi_congested(q->backing_dev_info, bits); } } rcu_read_unlock(); @@ -3841,8 +3841,8 @@ static int raid10_run(struct mddev *mddev) * maybe... */ stripe /= conf->geo.near_copies; - if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) - mddev->queue->backing_dev_info.ra_pages = 2 * stripe; + if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe) + mddev->queue->backing_dev_info->ra_pages = 2 * stripe; } if (md_integrity_register(mddev)) @@ -4643,8 +4643,8 @@ static void end_reshape(struct r10conf *conf) int stripe = conf->geo.raid_disks * ((conf->mddev->chunk_sectors << 9) / PAGE_SIZE); stripe /= conf->geo.near_copies; - if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) - conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; + if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe) + conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe; } conf->fullsync = 0; } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 3c7e106c12a2..6214e699342c 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -6331,10 +6331,10 @@ raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len) mddev_suspend(mddev); conf->skip_copy = new; if (new) - mddev->queue->backing_dev_info.capabilities |= + mddev->queue->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; else - mddev->queue->backing_dev_info.capabilities &= + mddev->queue->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES; mddev_resume(mddev); } @@ -7153,8 +7153,8 @@ static int raid5_run(struct mddev *mddev) int data_disks = conf->previous_raid_disks - conf->max_degraded; int stripe = data_disks * ((mddev->chunk_sectors << 9) / PAGE_SIZE); - if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) - mddev->queue->backing_dev_info.ra_pages = 2 * stripe; + if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe) + mddev->queue->backing_dev_info->ra_pages = 2 * stripe; chunk_size = mddev->chunk_sectors << 9; blk_queue_io_min(mddev->queue, chunk_size); @@ -7763,8 +7763,8 @@ static void end_reshape(struct r5conf *conf) int data_disks = conf->raid_disks - conf->max_degraded; int stripe = data_disks * ((conf->chunk_sectors << 9) / PAGE_SIZE); - if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) - conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; + if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe) + conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe; } } } diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c index f3512404bc52..99e651c27fb7 100644 --- a/drivers/memstick/core/ms_block.c +++ b/drivers/memstick/core/ms_block.c @@ -2000,16 +2000,6 @@ static int msb_bd_getgeo(struct block_device *bdev, return 0; } -static int msb_prepare_req(struct request_queue *q, struct request *req) -{ - if (req->cmd_type != REQ_TYPE_FS) { - blk_dump_rq_flags(req, "MS unsupported request"); - return BLKPREP_KILL; - } - req->rq_flags |= RQF_DONTPREP; - return BLKPREP_OK; -} - static void msb_submit_req(struct request_queue *q) { struct memstick_dev *card = q->queuedata; @@ -2132,7 +2122,6 @@ static int msb_init_disk(struct memstick_dev *card) } msb->queue->queuedata = card; - blk_queue_prep_rq(msb->queue, msb_prepare_req); blk_queue_bounce_limit(msb->queue, limit); blk_queue_max_hw_sectors(msb->queue, MS_BLOCK_MAX_PAGES); diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c index fa0746d182ff..c00d8a266878 100644 --- a/drivers/memstick/core/mspro_block.c +++ b/drivers/memstick/core/mspro_block.c @@ -827,18 +827,6 @@ static void mspro_block_start(struct memstick_dev *card) spin_unlock_irqrestore(&msb->q_lock, flags); } -static int mspro_block_prepare_req(struct request_queue *q, struct request *req) -{ - if (req->cmd_type != REQ_TYPE_FS) { - blk_dump_rq_flags(req, "MSPro unsupported request"); - return BLKPREP_KILL; - } - - req->rq_flags |= RQF_DONTPREP; - - return BLKPREP_OK; -} - static void mspro_block_submit_req(struct request_queue *q) { struct memstick_dev *card = q->queuedata; @@ -1228,7 +1216,6 @@ static int mspro_block_init_disk(struct memstick_dev *card) } msb->queue->queuedata = card; - blk_queue_prep_rq(msb->queue, mspro_block_prepare_req); blk_queue_bounce_limit(msb->queue, limit); blk_queue_max_hw_sectors(msb->queue, MSPRO_BLOCK_MAX_PAGES); diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index 7ee1667acde4..b8c4b2ba7519 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -2320,10 +2320,10 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, SmpPassthroughReply_t *smprep; smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply; - memcpy(req->sense, smprep, sizeof(*smprep)); - req->sense_len = sizeof(*smprep); - req->resid_len = 0; - rsp->resid_len -= smprep->ResponseDataLength; + memcpy(scsi_req(req)->sense, smprep, sizeof(*smprep)); + scsi_req(req)->sense_len = sizeof(*smprep); + scsi_req(req)->resid_len = 0; + scsi_req(rsp)->resid_len -= smprep->ResponseDataLength; } else { printk(MYIOC_s_ERR_FMT "%s: smp passthru reply failed to be returned\n", diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index a6496d8027bc..033f641eb8b7 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -30,15 +30,6 @@ static int mmc_prep_request(struct request_queue *q, struct request *req) { struct mmc_queue *mq = q->queuedata; - /* - * We only like normal block requests and discards. - */ - if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD && - req_op(req) != REQ_OP_SECURE_ERASE) { - blk_dump_rq_flags(req, "MMC bad request"); - return BLKPREP_KILL; - } - if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq))) return BLKPREP_KILL; diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index df8a5ef334c0..6b8d5cd7dbf6 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c @@ -84,9 +84,6 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr, nsect = blk_rq_cur_bytes(req) >> tr->blkshift; buf = bio_data(req->bio); - if (req->cmd_type != REQ_TYPE_FS) - return -EIO; - if (req_op(req) == REQ_OP_FLUSH) return tr->flush(dev); @@ -94,16 +91,16 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr, get_capacity(req->rq_disk)) return -EIO; - if (req_op(req) == REQ_OP_DISCARD) + switch (req_op(req)) { + case REQ_OP_DISCARD: return tr->discard(dev, block, nsect); - - if (rq_data_dir(req) == READ) { + case REQ_OP_READ: for (; nsect > 0; nsect--, block++, buf += tr->blksize) if (tr->readsect(dev, block, buf)) return -EIO; rq_flush_dcache_pages(req); return 0; - } else { + case REQ_OP_WRITE: if (!tr->writesect) return -EIO; @@ -112,6 +109,8 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr, if (tr->writesect(dev, block, buf)) return -EIO; return 0; + default: + return -EIO; } } diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c index d1e6931c132f..c80869e60909 100644 --- a/drivers/mtd/ubi/block.c +++ b/drivers/mtd/ubi/block.c @@ -323,16 +323,15 @@ static int ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx, struct ubiblock *dev = hctx->queue->queuedata; struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req); - if (req->cmd_type != REQ_TYPE_FS) + switch (req_op(req)) { + case REQ_OP_READ: + ubi_sgl_init(&pdu->usgl); + queue_work(dev->wq, &pdu->work); + return BLK_MQ_RQ_QUEUE_OK; + default: return BLK_MQ_RQ_QUEUE_ERROR; + } - if (rq_data_dir(req) != READ) - return BLK_MQ_RQ_QUEUE_ERROR; /* Write not implemented */ - - ubi_sgl_init(&pdu->usgl); - queue_work(dev->wq, &pdu->work); - - return BLK_MQ_RQ_QUEUE_OK; } static int ubiblock_init_request(void *data, struct request *req, diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 8a3c3e32a704..44a1a257e0b5 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -208,18 +208,18 @@ EXPORT_SYMBOL_GPL(nvme_requeue_req); struct request *nvme_alloc_request(struct request_queue *q, struct nvme_command *cmd, unsigned int flags, int qid) { + unsigned op = nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN; struct request *req; if (qid == NVME_QID_ANY) { - req = blk_mq_alloc_request(q, nvme_is_write(cmd), flags); + req = blk_mq_alloc_request(q, op, flags); } else { - req = blk_mq_alloc_request_hctx(q, nvme_is_write(cmd), flags, + req = blk_mq_alloc_request_hctx(q, op, flags, qid ? qid - 1 : 0); } if (IS_ERR(req)) return req; - req->cmd_type = REQ_TYPE_DRV_PRIV; req->cmd_flags |= REQ_FAILFAST_DRIVER; nvme_req(req)->cmd = cmd; @@ -238,26 +238,38 @@ static inline void nvme_setup_flush(struct nvme_ns *ns, static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req, struct nvme_command *cmnd) { + unsigned short segments = blk_rq_nr_discard_segments(req), n = 0; struct nvme_dsm_range *range; - unsigned int nr_bytes = blk_rq_bytes(req); + struct bio *bio; - range = kmalloc(sizeof(*range), GFP_ATOMIC); + range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC); if (!range) return BLK_MQ_RQ_QUEUE_BUSY; - range->cattr = cpu_to_le32(0); - range->nlb = cpu_to_le32(nr_bytes >> ns->lba_shift); - range->slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); + __rq_for_each_bio(bio, req) { + u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector); + u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift; + + range[n].cattr = cpu_to_le32(0); + range[n].nlb = cpu_to_le32(nlb); + range[n].slba = cpu_to_le64(slba); + n++; + } + + if (WARN_ON_ONCE(n != segments)) { + kfree(range); + return BLK_MQ_RQ_QUEUE_ERROR; + } memset(cmnd, 0, sizeof(*cmnd)); cmnd->dsm.opcode = nvme_cmd_dsm; cmnd->dsm.nsid = cpu_to_le32(ns->ns_id); - cmnd->dsm.nr = 0; + cmnd->dsm.nr = segments - 1; cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); req->special_vec.bv_page = virt_to_page(range); req->special_vec.bv_offset = offset_in_page(range); - req->special_vec.bv_len = sizeof(*range); + req->special_vec.bv_len = sizeof(*range) * segments; req->rq_flags |= RQF_SPECIAL_PAYLOAD; return BLK_MQ_RQ_QUEUE_OK; @@ -309,17 +321,27 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req, { int ret = BLK_MQ_RQ_QUEUE_OK; - if (req->cmd_type == REQ_TYPE_DRV_PRIV) + switch (req_op(req)) { + case REQ_OP_DRV_IN: + case REQ_OP_DRV_OUT: memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd)); - else if (req_op(req) == REQ_OP_FLUSH) + break; + case REQ_OP_FLUSH: nvme_setup_flush(ns, cmd); - else if (req_op(req) == REQ_OP_DISCARD) + break; + case REQ_OP_DISCARD: ret = nvme_setup_discard(ns, req, cmd); - else + break; + case REQ_OP_READ: + case REQ_OP_WRITE: nvme_setup_rw(ns, req, cmd); + break; + default: + WARN_ON_ONCE(1); + return BLK_MQ_RQ_QUEUE_ERROR; + } cmd->common.command_id = req->tag; - return ret; } EXPORT_SYMBOL_GPL(nvme_setup_cmd); @@ -784,6 +806,13 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode, return nvme_sg_io(ns, (void __user *)arg); #endif default: +#ifdef CONFIG_NVM + if (ns->ndev) + return nvme_nvm_ioctl(ns, cmd, arg); +#endif + if (is_sed_ioctl(cmd)) + return sed_ioctl(ns->ctrl->opal_dev, cmd, + (void __user *) arg); return -ENOTTY; } } @@ -861,6 +890,9 @@ static void nvme_config_discard(struct nvme_ns *ns) struct nvme_ctrl *ctrl = ns->ctrl; u32 logical_block_size = queue_logical_block_size(ns->queue); + BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) < + NVME_DSM_MAX_RANGES); + if (ctrl->quirks & NVME_QUIRK_DISCARD_ZEROES) ns->queue->limits.discard_zeroes_data = 1; else @@ -869,6 +901,7 @@ static void nvme_config_discard(struct nvme_ns *ns) ns->queue->limits.discard_alignment = logical_block_size; ns->queue->limits.discard_granularity = logical_block_size; blk_queue_max_discard_sectors(ns->queue, UINT_MAX); + blk_queue_max_discard_segments(ns->queue, NVME_DSM_MAX_RANGES); queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); } @@ -1051,6 +1084,28 @@ static const struct pr_ops nvme_pr_ops = { .pr_clear = nvme_pr_clear, }; +#ifdef CONFIG_BLK_SED_OPAL +int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, + bool send) +{ + struct nvme_ctrl *ctrl = data; + struct nvme_command cmd; + + memset(&cmd, 0, sizeof(cmd)); + if (send) + cmd.common.opcode = nvme_admin_security_send; + else + cmd.common.opcode = nvme_admin_security_recv; + cmd.common.nsid = 0; + cmd.common.cdw10[0] = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8); + cmd.common.cdw10[1] = cpu_to_le32(len); + + return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, + ADMIN_TIMEOUT, NVME_QID_ANY, 1, 0); +} +EXPORT_SYMBOL_GPL(nvme_sec_submit); +#endif /* CONFIG_BLK_SED_OPAL */ + static const struct block_device_operations nvme_fops = { .owner = THIS_MODULE, .ioctl = nvme_ioctl, @@ -1230,6 +1285,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) return -EIO; } + ctrl->oacs = le16_to_cpu(id->oacs); ctrl->vid = le16_to_cpu(id->vid); ctrl->oncs = le16_to_cpup(&id->oncs); atomic_set(&ctrl->abort_limit, id->acl + 1); diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index e65041c640cb..fb51a8de9b29 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -1937,7 +1937,7 @@ nvme_fc_complete_rq(struct request *rq) return; } - if (rq->cmd_type == REQ_TYPE_DRV_PRIV) + if (blk_rq_is_passthrough(rq)) error = rq->errors; else error = nvme_error_status(rq->errors); diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index 588d4a34c083..21cac8523bd8 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c @@ -26,6 +26,8 @@ #include <linux/bitops.h> #include <linux/lightnvm.h> #include <linux/vmalloc.h> +#include <linux/sched/sysctl.h> +#include <uapi/linux/lightnvm.h> enum nvme_nvm_admin_opcode { nvme_nvm_admin_identity = 0xe2, @@ -248,50 +250,48 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id) { struct nvme_nvm_id_group *src; struct nvm_id_group *dst; - int i, end; - - end = min_t(u32, 4, nvm_id->cgrps); - - for (i = 0; i < end; i++) { - src = &nvme_nvm_id->groups[i]; - dst = &nvm_id->groups[i]; - - dst->mtype = src->mtype; - dst->fmtype = src->fmtype; - dst->num_ch = src->num_ch; - dst->num_lun = src->num_lun; - dst->num_pln = src->num_pln; - - dst->num_pg = le16_to_cpu(src->num_pg); - dst->num_blk = le16_to_cpu(src->num_blk); - dst->fpg_sz = le16_to_cpu(src->fpg_sz); - dst->csecs = le16_to_cpu(src->csecs); - dst->sos = le16_to_cpu(src->sos); - - dst->trdt = le32_to_cpu(src->trdt); - dst->trdm = le32_to_cpu(src->trdm); - dst->tprt = le32_to_cpu(src->tprt); - dst->tprm = le32_to_cpu(src->tprm); - dst->tbet = le32_to_cpu(src->tbet); - dst->tbem = le32_to_cpu(src->tbem); - dst->mpos = le32_to_cpu(src->mpos); - dst->mccap = le32_to_cpu(src->mccap); - - dst->cpar = le16_to_cpu(src->cpar); - - if (dst->fmtype == NVM_ID_FMTYPE_MLC) { - memcpy(dst->lptbl.id, src->lptbl.id, 8); - dst->lptbl.mlc.num_pairs = - le16_to_cpu(src->lptbl.mlc.num_pairs); - - if (dst->lptbl.mlc.num_pairs > NVME_NVM_LP_MLC_PAIRS) { - pr_err("nvm: number of MLC pairs not supported\n"); - return -EINVAL; - } - memcpy(dst->lptbl.mlc.pairs, src->lptbl.mlc.pairs, - dst->lptbl.mlc.num_pairs); + if (nvme_nvm_id->cgrps != 1) + return -EINVAL; + + src = &nvme_nvm_id->groups[0]; + dst = &nvm_id->grp; + + dst->mtype = src->mtype; + dst->fmtype = src->fmtype; + dst->num_ch = src->num_ch; + dst->num_lun = src->num_lun; + dst->num_pln = src->num_pln; + + dst->num_pg = le16_to_cpu(src->num_pg); + dst->num_blk = le16_to_cpu(src->num_blk); + dst->fpg_sz = le16_to_cpu(src->fpg_sz); + dst->csecs = le16_to_cpu(src->csecs); + dst->sos = le16_to_cpu(src->sos); + + dst->trdt = le32_to_cpu(src->trdt); + dst->trdm = le32_to_cpu(src->trdm); + dst->tprt = le32_to_cpu(src->tprt); + dst->tprm = le32_to_cpu(src->tprm); + dst->tbet = le32_to_cpu(src->tbet); + dst->tbem = le32_to_cpu(src->tbem); + dst->mpos = le32_to_cpu(src->mpos); + dst->mccap = le32_to_cpu(src->mccap); + + dst->cpar = le16_to_cpu(src->cpar); + + if (dst->fmtype == NVM_ID_FMTYPE_MLC) { + memcpy(dst->lptbl.id, src->lptbl.id, 8); + dst->lptbl.mlc.num_pairs = + le16_to_cpu(src->lptbl.mlc.num_pairs); + + if (dst->lptbl.mlc.num_pairs > NVME_NVM_LP_MLC_PAIRS) { + pr_err("nvm: number of MLC pairs not supported\n"); + return -EINVAL; } + + memcpy(dst->lptbl.mlc.pairs, src->lptbl.mlc.pairs, + dst->lptbl.mlc.num_pairs); } return 0; @@ -321,7 +321,6 @@ static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id) nvm_id->ver_id = nvme_nvm_id->ver_id; nvm_id->vmnt = nvme_nvm_id->vmnt; - nvm_id->cgrps = nvme_nvm_id->cgrps; nvm_id->cap = le32_to_cpu(nvme_nvm_id->cap); nvm_id->dom = le32_to_cpu(nvme_nvm_id->dom); memcpy(&nvm_id->ppaf, &nvme_nvm_id->ppaf, @@ -372,7 +371,7 @@ static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb, } /* Transform physical address to target address space */ - nvmdev->mt->part_to_tgt(nvmdev, entries, cmd_nlb); + nvm_part_to_tgt(nvmdev, entries, cmd_nlb); if (update_l2p(cmd_slba, cmd_nlb, entries, priv)) { ret = -EINTR; @@ -485,7 +484,8 @@ static void nvme_nvm_end_io(struct request *rq, int error) struct nvm_rq *rqd = rq->end_io_data; rqd->ppa_status = nvme_req(rq)->result.u64; - nvm_end_io(rqd, error); + rqd->error = error; + nvm_end_io(rqd); kfree(nvme_req(rq)->cmd); blk_mq_free_request(rq); @@ -586,6 +586,224 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = { .max_phys_sect = 64, }; +static void nvme_nvm_end_user_vio(struct request *rq, int error) +{ + struct completion *waiting = rq->end_io_data; + + complete(waiting); +} + +static int nvme_nvm_submit_user_cmd(struct request_queue *q, + struct nvme_ns *ns, + struct nvme_nvm_command *vcmd, + void __user *ubuf, unsigned int bufflen, + void __user *meta_buf, unsigned int meta_len, + void __user *ppa_buf, unsigned int ppa_len, + u32 *result, u64 *status, unsigned int timeout) +{ + bool write = nvme_is_write((struct nvme_command *)vcmd); + struct nvm_dev *dev = ns->ndev; + struct gendisk *disk = ns->disk; + struct request *rq; + struct bio *bio = NULL; + __le64 *ppa_list = NULL; + dma_addr_t ppa_dma; + __le64 *metadata = NULL; + dma_addr_t metadata_dma; + DECLARE_COMPLETION_ONSTACK(wait); + int ret; + + rq = nvme_alloc_request(q, (struct nvme_command *)vcmd, 0, + NVME_QID_ANY); + if (IS_ERR(rq)) { + ret = -ENOMEM; + goto err_cmd; + } + + rq->timeout = timeout ? timeout : ADMIN_TIMEOUT; + + rq->cmd_flags &= ~REQ_FAILFAST_DRIVER; + rq->end_io_data = &wait; + + if (ppa_buf && ppa_len) { + ppa_list = dma_pool_alloc(dev->dma_pool, GFP_KERNEL, &ppa_dma); + if (!ppa_list) { + ret = -ENOMEM; + goto err_rq; + } + if (copy_from_user(ppa_list, (void __user *)ppa_buf, + sizeof(u64) * (ppa_len + 1))) { + ret = -EFAULT; + goto err_ppa; + } + vcmd->ph_rw.spba = cpu_to_le64(ppa_dma); + } else { + vcmd->ph_rw.spba = cpu_to_le64((uintptr_t)ppa_buf); + } + + if (ubuf && bufflen) { + ret = blk_rq_map_user(q, rq, NULL, ubuf, bufflen, GFP_KERNEL); + if (ret) + goto err_ppa; + bio = rq->bio; + + if (meta_buf && meta_len) { + metadata = dma_pool_alloc(dev->dma_pool, GFP_KERNEL, + &metadata_dma); + if (!metadata) { + ret = -ENOMEM; + goto err_map; + } + + if (write) { + if (copy_from_user(metadata, + (void __user *)meta_buf, + meta_len)) { + ret = -EFAULT; + goto err_meta; + } + } + vcmd->ph_rw.metadata = cpu_to_le64(metadata_dma); + } + + if (!disk) + goto submit; + + bio->bi_bdev = bdget_disk(disk, 0); + if (!bio->bi_bdev) { + ret = -ENODEV; + goto err_meta; + } + } + +submit: + blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_user_vio); + + wait_for_completion_io(&wait); + + ret = nvme_error_status(rq->errors); + if (result) + *result = rq->errors & 0x7ff; + if (status) + *status = le64_to_cpu(nvme_req(rq)->result.u64); + + if (metadata && !ret && !write) { + if (copy_to_user(meta_buf, (void *)metadata, meta_len)) + ret = -EFAULT; + } +err_meta: + if (meta_buf && meta_len) + dma_pool_free(dev->dma_pool, metadata, metadata_dma); +err_map: + if (bio) { + if (disk && bio->bi_bdev) + bdput(bio->bi_bdev); + blk_rq_unmap_user(bio); + } +err_ppa: + if (ppa_buf && ppa_len) + dma_pool_free(dev->dma_pool, ppa_list, ppa_dma); +err_rq: + blk_mq_free_request(rq); +err_cmd: + return ret; +} + +static int nvme_nvm_submit_vio(struct nvme_ns *ns, + struct nvm_user_vio __user *uvio) +{ + struct nvm_user_vio vio; + struct nvme_nvm_command c; + unsigned int length; + int ret; + + if (copy_from_user(&vio, uvio, sizeof(vio))) + return -EFAULT; + if (vio.flags) + return -EINVAL; + + memset(&c, 0, sizeof(c)); + c.ph_rw.opcode = vio.opcode; + c.ph_rw.nsid = cpu_to_le32(ns->ns_id); + c.ph_rw.control = cpu_to_le16(vio.control); + c.ph_rw.length = cpu_to_le16(vio.nppas); + + length = (vio.nppas + 1) << ns->lba_shift; + + ret = nvme_nvm_submit_user_cmd(ns->queue, ns, &c, + (void __user *)(uintptr_t)vio.addr, length, + (void __user *)(uintptr_t)vio.metadata, + vio.metadata_len, + (void __user *)(uintptr_t)vio.ppa_list, vio.nppas, + &vio.result, &vio.status, 0); + + if (ret && copy_to_user(uvio, &vio, sizeof(vio))) + return -EFAULT; + + return ret; +} + +static int nvme_nvm_user_vcmd(struct nvme_ns *ns, int admin, + struct nvm_passthru_vio __user *uvcmd) +{ + struct nvm_passthru_vio vcmd; + struct nvme_nvm_command c; + struct request_queue *q; + unsigned int timeout = 0; + int ret; + + if (copy_from_user(&vcmd, uvcmd, sizeof(vcmd))) + return -EFAULT; + if ((vcmd.opcode != 0xF2) && (!capable(CAP_SYS_ADMIN))) + return -EACCES; + if (vcmd.flags) + return -EINVAL; + + memset(&c, 0, sizeof(c)); + c.common.opcode = vcmd.opcode; + c.common.nsid = cpu_to_le32(ns->ns_id); + c.common.cdw2[0] = cpu_to_le32(vcmd.cdw2); + c.common.cdw2[1] = cpu_to_le32(vcmd.cdw3); + /* cdw11-12 */ + c.ph_rw.length = cpu_to_le16(vcmd.nppas); + c.ph_rw.control = cpu_to_le32(vcmd.control); + c.common.cdw10[3] = cpu_to_le32(vcmd.cdw13); + c.common.cdw10[4] = cpu_to_le32(vcmd.cdw14); + c.common.cdw10[5] = cpu_to_le32(vcmd.cdw15); + + if (vcmd.timeout_ms) + timeout = msecs_to_jiffies(vcmd.timeout_ms); + + q = admin ? ns->ctrl->admin_q : ns->queue; + + ret = nvme_nvm_submit_user_cmd(q, ns, + (struct nvme_nvm_command *)&c, + (void __user *)(uintptr_t)vcmd.addr, vcmd.data_len, + (void __user *)(uintptr_t)vcmd.metadata, + vcmd.metadata_len, + (void __user *)(uintptr_t)vcmd.ppa_list, vcmd.nppas, + &vcmd.result, &vcmd.status, timeout); + + if (ret && copy_to_user(uvcmd, &vcmd, sizeof(vcmd))) + return -EFAULT; + + return ret; +} + +int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg) +{ + switch (cmd) { + case NVME_NVM_IOCTL_ADMIN_VIO: + return nvme_nvm_user_vcmd(ns, 1, (void __user *)arg); + case NVME_NVM_IOCTL_IO_VIO: + return nvme_nvm_user_vcmd(ns, 0, (void __user *)arg); + case NVME_NVM_IOCTL_SUBMIT_VIO: + return nvme_nvm_submit_vio(ns, (void __user *)arg); + default: + return -ENOTTY; + } +} + int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node) { struct request_queue *q = ns->queue; @@ -622,7 +840,7 @@ static ssize_t nvm_dev_attr_show(struct device *dev, return 0; id = &ndev->identity; - grp = &id->groups[0]; + grp = &id->grp; attr = &dattr->attr; if (strcmp(attr->name, "version") == 0) { @@ -633,10 +851,9 @@ static ssize_t nvm_dev_attr_show(struct device *dev, return scnprintf(page, PAGE_SIZE, "%u\n", id->cap); } else if (strcmp(attr->name, "device_mode") == 0) { return scnprintf(page, PAGE_SIZE, "%u\n", id->dom); + /* kept for compatibility */ } else if (strcmp(attr->name, "media_manager") == 0) { - if (!ndev->mt) - return scnprintf(page, PAGE_SIZE, "%s\n", "none"); - return scnprintf(page, PAGE_SIZE, "%s\n", ndev->mt->name); + return scnprintf(page, PAGE_SIZE, "%s\n", "gennvm"); } else if (strcmp(attr->name, "ppa_format") == 0) { return scnprintf(page, PAGE_SIZE, "0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index aead6d08ed2c..14cfc6f7facb 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -19,6 +19,7 @@ #include <linux/kref.h> #include <linux/blk-mq.h> #include <linux/lightnvm.h> +#include <linux/sed-opal.h> enum { /* @@ -125,6 +126,8 @@ struct nvme_ctrl { struct list_head node; struct ida ns_ida; + struct opal_dev *opal_dev; + char name[12]; char serial[20]; char model[40]; @@ -137,6 +140,7 @@ struct nvme_ctrl { u32 max_hw_sectors; u16 oncs; u16 vid; + u16 oacs; atomic_t abort_limit; u8 event_limit; u8 vwc; @@ -267,6 +271,9 @@ int nvme_init_identify(struct nvme_ctrl *ctrl); void nvme_queue_scan(struct nvme_ctrl *ctrl); void nvme_remove_namespaces(struct nvme_ctrl *ctrl); +int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, + bool send); + #define NVME_NR_AERS 1 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, union nvme_result *res); @@ -318,6 +325,7 @@ int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node); void nvme_nvm_unregister(struct nvme_ns *ns); int nvme_nvm_register_sysfs(struct nvme_ns *ns); void nvme_nvm_unregister_sysfs(struct nvme_ns *ns); +int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg); #else static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node) @@ -335,6 +343,11 @@ static inline int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *i { return 0; } +static inline int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, + unsigned long arg) +{ + return -ENOTTY; +} #endif /* CONFIG_NVM */ static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 3faefabf339c..ddc51adb594d 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -43,6 +43,7 @@ #include <linux/types.h> #include <linux/io-64-nonatomic-lo-hi.h> #include <asm/unaligned.h> +#include <linux/sed-opal.h> #include "nvme.h" @@ -588,7 +589,7 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx, */ if (ns && ns->ms && !blk_integrity_rq(req)) { if (!(ns->pi_type && ns->ms == 8) && - req->cmd_type != REQ_TYPE_DRV_PRIV) { + !blk_rq_is_passthrough(req)) { blk_mq_end_request(req, -EFAULT); return BLK_MQ_RQ_QUEUE_OK; } @@ -645,7 +646,7 @@ static void nvme_complete_rq(struct request *req) return; } - if (req->cmd_type == REQ_TYPE_DRV_PRIV) + if (blk_rq_is_passthrough(req)) error = req->errors; else error = nvme_error_status(req->errors); @@ -895,12 +896,11 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) return BLK_EH_HANDLED; } - iod->aborted = 1; - if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) { atomic_inc(&dev->ctrl.abort_limit); return BLK_EH_RESET_TIMER; } + iod->aborted = 1; memset(&cmd, 0, sizeof(cmd)); cmd.abort.opcode = nvme_admin_abort_cmd; @@ -1178,6 +1178,7 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev) dev->admin_tagset.timeout = ADMIN_TIMEOUT; dev->admin_tagset.numa_node = dev_to_node(dev->dev); dev->admin_tagset.cmd_size = nvme_cmd_size(dev); + dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED; dev->admin_tagset.driver_data = dev; if (blk_mq_alloc_tag_set(&dev->admin_tagset)) @@ -1738,6 +1739,7 @@ static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl) if (dev->ctrl.admin_q) blk_put_queue(dev->ctrl.admin_q); kfree(dev->queues); + kfree(dev->ctrl.opal_dev); kfree(dev); } @@ -1754,6 +1756,7 @@ static void nvme_remove_dead_ctrl(struct nvme_dev *dev, int status) static void nvme_reset_work(struct work_struct *work) { struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work); + bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL); int result = -ENODEV; if (WARN_ON(dev->ctrl.state == NVME_CTRL_RESETTING)) @@ -1786,6 +1789,14 @@ static void nvme_reset_work(struct work_struct *work) if (result) goto out; + if ((dev->ctrl.oacs & NVME_CTRL_OACS_SEC_SUPP) && !dev->ctrl.opal_dev) { + dev->ctrl.opal_dev = + init_opal_dev(&dev->ctrl, &nvme_sec_submit); + } + + if (was_suspend) + opal_unlock_from_suspend(dev->ctrl.opal_dev); + result = nvme_setup_io_queues(dev); if (result) goto out; diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 557f29b1f1bb..a75e95d42b3f 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1423,7 +1423,7 @@ static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) { struct nvme_command *cmd = nvme_req(rq)->cmd; - if (rq->cmd_type != REQ_TYPE_DRV_PRIV || + if (!blk_rq_is_passthrough(rq) || cmd->common.opcode != nvme_fabrics_command || cmd->fabrics.fctype != nvme_fabrics_type_connect) return false; @@ -1471,7 +1471,7 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, ib_dma_sync_single_for_device(dev, sqe->dma, sizeof(struct nvme_command), DMA_TO_DEVICE); - if (rq->cmd_type == REQ_TYPE_FS && req_op(rq) == REQ_OP_FLUSH) + if (req_op(rq) == REQ_OP_FLUSH) flush = true; ret = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge, req->mr->need_inval ? &req->reg_wr.wr : NULL, flush); @@ -1522,7 +1522,7 @@ static void nvme_rdma_complete_rq(struct request *rq) return; } - if (rq->cmd_type == REQ_TYPE_DRV_PRIV) + if (blk_rq_is_passthrough(rq)) error = rq->errors; else error = nvme_error_status(rq->errors); diff --git a/drivers/nvme/host/scsi.c b/drivers/nvme/host/scsi.c index a5c09e703bd8..f49ae2758bb7 100644 --- a/drivers/nvme/host/scsi.c +++ b/drivers/nvme/host/scsi.c @@ -43,6 +43,7 @@ #include <asm/unaligned.h> #include <scsi/sg.h> #include <scsi/scsi.h> +#include <scsi/scsi_request.h> #include "nvme.h" @@ -2347,12 +2348,14 @@ static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr, static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr) { - u8 cmd[BLK_MAX_CDB]; + u8 cmd[16]; int retcode; unsigned int opcode; if (hdr->cmdp == NULL) return -EMSGSIZE; + if (hdr->cmd_len > sizeof(cmd)) + return -EINVAL; if (copy_from_user(cmd, hdr->cmdp, hdr->cmd_len)) return -EFAULT; @@ -2451,8 +2454,6 @@ int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr) return -EFAULT; if (hdr.interface_id != 'S') return -EINVAL; - if (hdr.cmd_len > BLK_MAX_CDB) - return -EINVAL; /* * A positive return code means a NVMe status, which has been diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 9aaa70071ae5..f3862e38f574 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -104,7 +104,7 @@ static void nvme_loop_complete_rq(struct request *req) return; } - if (req->cmd_type == REQ_TYPE_DRV_PRIV) + if (blk_rq_is_passthrough(req)) error = req->errors; else error = nvme_error_status(req->errors); diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index 9f16ea6964ec..152de6817875 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c @@ -300,13 +300,6 @@ static void scm_blk_request(struct request_queue *rq) struct request *req; while ((req = blk_peek_request(rq))) { - if (req->cmd_type != REQ_TYPE_FS) { - blk_start_request(req); - blk_dump_rq_flags(req, KMSG_COMPONENT " bad request"); - __blk_end_request_all(req, -EIO); - continue; - } - if (!scm_permit_request(bdev, req)) goto out; diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index a4f6b0d95515..d4023bf1e739 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -18,6 +18,7 @@ config SCSI depends on BLOCK select SCSI_DMA if HAS_DMA select SG_POOL + select BLK_SCSI_REQUEST ---help--- If you want to use a SCSI hard disk, SCSI tape drive, SCSI CD-ROM or any other SCSI device under Linux, say Y and make sure that you know diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c index 5b80746980b8..4a7679f6c73d 100644 --- a/drivers/scsi/device_handler/scsi_dh_emc.c +++ b/drivers/scsi/device_handler/scsi_dh_emc.c @@ -88,12 +88,6 @@ struct clariion_dh_data { */ unsigned char buffer[CLARIION_BUFFER_SIZE]; /* - * SCSI sense buffer for commands -- assumes serial issuance - * and completion sequence of all commands for same multipath. - */ - unsigned char sense[SCSI_SENSE_BUFFERSIZE]; - unsigned int senselen; - /* * LUN state */ int lun_state; @@ -116,44 +110,38 @@ struct clariion_dh_data { /* * Parse MODE_SELECT cmd reply. */ -static int trespass_endio(struct scsi_device *sdev, char *sense) +static int trespass_endio(struct scsi_device *sdev, + struct scsi_sense_hdr *sshdr) { int err = SCSI_DH_IO; - struct scsi_sense_hdr sshdr; - - if (!scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)) { - sdev_printk(KERN_ERR, sdev, "%s: Found valid sense data 0x%2x, " - "0x%2x, 0x%2x while sending CLARiiON trespass " - "command.\n", CLARIION_NAME, sshdr.sense_key, - sshdr.asc, sshdr.ascq); - if ((sshdr.sense_key == 0x05) && (sshdr.asc == 0x04) && - (sshdr.ascq == 0x00)) { - /* - * Array based copy in progress -- do not send - * mode_select or copy will be aborted mid-stream. - */ - sdev_printk(KERN_INFO, sdev, "%s: Array Based Copy in " - "progress while sending CLARiiON trespass " - "command.\n", CLARIION_NAME); - err = SCSI_DH_DEV_TEMP_BUSY; - } else if ((sshdr.sense_key == 0x02) && (sshdr.asc == 0x04) && - (sshdr.ascq == 0x03)) { - /* - * LUN Not Ready - Manual Intervention Required - * indicates in-progress ucode upgrade (NDU). - */ - sdev_printk(KERN_INFO, sdev, "%s: Detected in-progress " - "ucode upgrade NDU operation while sending " - "CLARiiON trespass command.\n", CLARIION_NAME); - err = SCSI_DH_DEV_TEMP_BUSY; - } else - err = SCSI_DH_DEV_FAILED; - } else { - sdev_printk(KERN_INFO, sdev, - "%s: failed to send MODE SELECT, no sense available\n", - CLARIION_NAME); - } + sdev_printk(KERN_ERR, sdev, "%s: Found valid sense data 0x%2x, " + "0x%2x, 0x%2x while sending CLARiiON trespass " + "command.\n", CLARIION_NAME, sshdr->sense_key, + sshdr->asc, sshdr->ascq); + + if (sshdr->sense_key == 0x05 && sshdr->asc == 0x04 && + sshdr->ascq == 0x00) { + /* + * Array based copy in progress -- do not send + * mode_select or copy will be aborted mid-stream. + */ + sdev_printk(KERN_INFO, sdev, "%s: Array Based Copy in " + "progress while sending CLARiiON trespass " + "command.\n", CLARIION_NAME); + err = SCSI_DH_DEV_TEMP_BUSY; + } else if (sshdr->sense_key == 0x02 && sshdr->asc == 0x04 && + sshdr->ascq == 0x03) { + /* + * LUN Not Ready - Manual Intervention Required + * indicates in-progress ucode upgrade (NDU). + */ + sdev_printk(KERN_INFO, sdev, "%s: Detected in-progress " + "ucode upgrade NDU operation while sending " + "CLARiiON trespass command.\n", CLARIION_NAME); + err = SCSI_DH_DEV_TEMP_BUSY; + } else + err = SCSI_DH_DEV_FAILED; return err; } @@ -257,103 +245,15 @@ out: return sp_model; } -/* - * Get block request for REQ_BLOCK_PC command issued to path. Currently - * limited to MODE_SELECT (trespass) and INQUIRY (VPD page 0xC0) commands. - * - * Uses data and sense buffers in hardware handler context structure and - * assumes serial servicing of commands, both issuance and completion. - */ -static struct request *get_req(struct scsi_device *sdev, int cmd, - unsigned char *buffer) -{ - struct request *rq; - int len = 0; - - rq = blk_get_request(sdev->request_queue, - (cmd != INQUIRY) ? WRITE : READ, GFP_NOIO); - if (IS_ERR(rq)) { - sdev_printk(KERN_INFO, sdev, "get_req: blk_get_request failed"); - return NULL; - } - - blk_rq_set_block_pc(rq); - rq->cmd_len = COMMAND_SIZE(cmd); - rq->cmd[0] = cmd; - - switch (cmd) { - case MODE_SELECT: - len = sizeof(short_trespass); - rq->cmd[1] = 0x10; - rq->cmd[4] = len; - break; - case MODE_SELECT_10: - len = sizeof(long_trespass); - rq->cmd[1] = 0x10; - rq->cmd[8] = len; - break; - case INQUIRY: - len = CLARIION_BUFFER_SIZE; - rq->cmd[4] = len; - memset(buffer, 0, len); - break; - default: - BUG_ON(1); - break; - } - - rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | - REQ_FAILFAST_DRIVER; - rq->timeout = CLARIION_TIMEOUT; - rq->retries = CLARIION_RETRIES; - - if (blk_rq_map_kern(rq->q, rq, buffer, len, GFP_NOIO)) { - blk_put_request(rq); - return NULL; - } - - return rq; -} - -static int send_inquiry_cmd(struct scsi_device *sdev, int page, - struct clariion_dh_data *csdev) -{ - struct request *rq = get_req(sdev, INQUIRY, csdev->buffer); - int err; - - if (!rq) - return SCSI_DH_RES_TEMP_UNAVAIL; - - rq->sense = csdev->sense; - memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); - rq->sense_len = csdev->senselen = 0; - - rq->cmd[0] = INQUIRY; - if (page != 0) { - rq->cmd[1] = 1; - rq->cmd[2] = page; - } - err = blk_execute_rq(sdev->request_queue, NULL, rq, 1); - if (err == -EIO) { - sdev_printk(KERN_INFO, sdev, - "%s: failed to send %s INQUIRY: %x\n", - CLARIION_NAME, page?"EVPD":"standard", - rq->errors); - csdev->senselen = rq->sense_len; - err = SCSI_DH_IO; - } - - blk_put_request(rq); - - return err; -} - static int send_trespass_cmd(struct scsi_device *sdev, struct clariion_dh_data *csdev) { - struct request *rq; unsigned char *page22; - int err, len, cmd; + unsigned char cdb[COMMAND_SIZE(MODE_SELECT)]; + int err, res = SCSI_DH_OK, len; + struct scsi_sense_hdr sshdr; + u64 req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | + REQ_FAILFAST_DRIVER; if (csdev->flags & CLARIION_SHORT_TRESPASS) { page22 = short_trespass; @@ -361,40 +261,37 @@ static int send_trespass_cmd(struct scsi_device *sdev, /* Set Honor Reservations bit */ page22[6] |= 0x80; len = sizeof(short_trespass); - cmd = MODE_SELECT; + cdb[0] = MODE_SELECT; + cdb[1] = 0x10; + cdb[4] = len; } else { page22 = long_trespass; if (!(csdev->flags & CLARIION_HONOR_RESERVATIONS)) /* Set Honor Reservations bit */ page22[10] |= 0x80; len = sizeof(long_trespass); - cmd = MODE_SELECT_10; + cdb[0] = MODE_SELECT_10; + cdb[8] = len; } BUG_ON((len > CLARIION_BUFFER_SIZE)); memcpy(csdev->buffer, page22, len); - rq = get_req(sdev, cmd, csdev->buffer); - if (!rq) - return SCSI_DH_RES_TEMP_UNAVAIL; - - rq->sense = csdev->sense; - memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); - rq->sense_len = csdev->senselen = 0; - - err = blk_execute_rq(sdev->request_queue, NULL, rq, 1); - if (err == -EIO) { - if (rq->sense_len) { - err = trespass_endio(sdev, csdev->sense); - } else { + err = scsi_execute_req_flags(sdev, cdb, DMA_TO_DEVICE, + csdev->buffer, len, &sshdr, + CLARIION_TIMEOUT * HZ, CLARIION_RETRIES, + NULL, req_flags, 0); + if (err) { + if (scsi_sense_valid(&sshdr)) + res = trespass_endio(sdev, &sshdr); + else { sdev_printk(KERN_INFO, sdev, "%s: failed to send MODE SELECT: %x\n", - CLARIION_NAME, rq->errors); + CLARIION_NAME, err); + res = SCSI_DH_IO; } } - blk_put_request(rq); - - return err; + return res; } static int clariion_check_sense(struct scsi_device *sdev, @@ -464,21 +361,7 @@ static int clariion_std_inquiry(struct scsi_device *sdev, int err; char *sp_model; - err = send_inquiry_cmd(sdev, 0, csdev); - if (err != SCSI_DH_OK && csdev->senselen) { - struct scsi_sense_hdr sshdr; - - if (scsi_normalize_sense(csdev->sense, SCSI_SENSE_BUFFERSIZE, - &sshdr)) { - sdev_printk(KERN_ERR, sdev, "%s: INQUIRY sense code " - "%02x/%02x/%02x\n", CLARIION_NAME, - sshdr.sense_key, sshdr.asc, sshdr.ascq); - } - err = SCSI_DH_IO; - goto out; - } - - sp_model = parse_sp_model(sdev, csdev->buffer); + sp_model = parse_sp_model(sdev, sdev->inquiry); if (!sp_model) { err = SCSI_DH_DEV_UNSUPP; goto out; @@ -500,30 +383,12 @@ out: static int clariion_send_inquiry(struct scsi_device *sdev, struct clariion_dh_data *csdev) { - int err, retry = CLARIION_RETRIES; - -retry: - err = send_inquiry_cmd(sdev, 0xC0, csdev); - if (err != SCSI_DH_OK && csdev->senselen) { - struct scsi_sense_hdr sshdr; - - err = scsi_normalize_sense(csdev->sense, SCSI_SENSE_BUFFERSIZE, - &sshdr); - if (!err) - return SCSI_DH_IO; - - err = clariion_check_sense(sdev, &sshdr); - if (retry > 0 && err == ADD_TO_MLQUEUE) { - retry--; - goto retry; - } - sdev_printk(KERN_ERR, sdev, "%s: INQUIRY sense code " - "%02x/%02x/%02x\n", CLARIION_NAME, - sshdr.sense_key, sshdr.asc, sshdr.ascq); - err = SCSI_DH_IO; - } else { + int err = SCSI_DH_IO; + + if (!scsi_get_vpd_page(sdev, 0xC0, csdev->buffer, + CLARIION_BUFFER_SIZE)) err = parse_sp_info_reply(sdev, csdev); - } + return err; } diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c index 308e87195dc1..be43c940636d 100644 --- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c +++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c @@ -38,13 +38,10 @@ #define HP_SW_PATH_PASSIVE 1 struct hp_sw_dh_data { - unsigned char sense[SCSI_SENSE_BUFFERSIZE]; int path_state; int retries; int retry_cnt; struct scsi_device *sdev; - activate_complete callback_fn; - void *callback_data; }; static int hp_sw_start_stop(struct hp_sw_dh_data *); @@ -56,43 +53,34 @@ static int hp_sw_start_stop(struct hp_sw_dh_data *); * * Returns SCSI_DH_DEV_OFFLINED if the sdev is on the passive path */ -static int tur_done(struct scsi_device *sdev, unsigned char *sense) +static int tur_done(struct scsi_device *sdev, struct hp_sw_dh_data *h, + struct scsi_sense_hdr *sshdr) { - struct scsi_sense_hdr sshdr; - int ret; + int ret = SCSI_DH_IO; - ret = scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr); - if (!ret) { - sdev_printk(KERN_WARNING, sdev, - "%s: sending tur failed, no sense available\n", - HP_SW_NAME); - ret = SCSI_DH_IO; - goto done; - } - switch (sshdr.sense_key) { + switch (sshdr->sense_key) { case UNIT_ATTENTION: ret = SCSI_DH_IMM_RETRY; break; case NOT_READY: - if ((sshdr.asc == 0x04) && (sshdr.ascq == 2)) { + if (sshdr->asc == 0x04 && sshdr->ascq == 2) { /* * LUN not ready - Initialization command required * * This is the passive path */ - ret = SCSI_DH_DEV_OFFLINED; + h->path_state = HP_SW_PATH_PASSIVE; + ret = SCSI_DH_OK; break; } /* Fallthrough */ default: sdev_printk(KERN_WARNING, sdev, "%s: sending tur failed, sense %x/%x/%x\n", - HP_SW_NAME, sshdr.sense_key, sshdr.asc, - sshdr.ascq); + HP_SW_NAME, sshdr->sense_key, sshdr->asc, + sshdr->ascq); break; } - -done: return ret; } @@ -105,131 +93,36 @@ done: */ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h) { - struct request *req; - int ret; + unsigned char cmd[6] = { TEST_UNIT_READY }; + struct scsi_sense_hdr sshdr; + int ret = SCSI_DH_OK, res; + u64 req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | + REQ_FAILFAST_DRIVER; retry: - req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO); - if (IS_ERR(req)) - return SCSI_DH_RES_TEMP_UNAVAIL; - - blk_rq_set_block_pc(req); - req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | - REQ_FAILFAST_DRIVER; - req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY); - req->cmd[0] = TEST_UNIT_READY; - req->timeout = HP_SW_TIMEOUT; - req->sense = h->sense; - memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE); - req->sense_len = 0; - - ret = blk_execute_rq(req->q, NULL, req, 1); - if (ret == -EIO) { - if (req->sense_len > 0) { - ret = tur_done(sdev, h->sense); - } else { + res = scsi_execute_req_flags(sdev, cmd, DMA_NONE, NULL, 0, &sshdr, + HP_SW_TIMEOUT, HP_SW_RETRIES, + NULL, req_flags, 0); + if (res) { + if (scsi_sense_valid(&sshdr)) + ret = tur_done(sdev, h, &sshdr); + else { sdev_printk(KERN_WARNING, sdev, "%s: sending tur failed with %x\n", - HP_SW_NAME, req->errors); + HP_SW_NAME, res); ret = SCSI_DH_IO; } } else { h->path_state = HP_SW_PATH_ACTIVE; ret = SCSI_DH_OK; } - if (ret == SCSI_DH_IMM_RETRY) { - blk_put_request(req); + if (ret == SCSI_DH_IMM_RETRY) goto retry; - } - if (ret == SCSI_DH_DEV_OFFLINED) { - h->path_state = HP_SW_PATH_PASSIVE; - ret = SCSI_DH_OK; - } - - blk_put_request(req); return ret; } /* - * start_done - Handle START STOP UNIT return status - * @sdev: sdev the command has been sent to - * @errors: blk error code - */ -static int start_done(struct scsi_device *sdev, unsigned char *sense) -{ - struct scsi_sense_hdr sshdr; - int rc; - - rc = scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr); - if (!rc) { - sdev_printk(KERN_WARNING, sdev, - "%s: sending start_stop_unit failed, " - "no sense available\n", - HP_SW_NAME); - return SCSI_DH_IO; - } - switch (sshdr.sense_key) { - case NOT_READY: - if ((sshdr.asc == 0x04) && (sshdr.ascq == 3)) { - /* - * LUN not ready - manual intervention required - * - * Switch-over in progress, retry. - */ - rc = SCSI_DH_RETRY; - break; - } - /* fall through */ - default: - sdev_printk(KERN_WARNING, sdev, - "%s: sending start_stop_unit failed, sense %x/%x/%x\n", - HP_SW_NAME, sshdr.sense_key, sshdr.asc, - sshdr.ascq); - rc = SCSI_DH_IO; - } - - return rc; -} - -static void start_stop_endio(struct request *req, int error) -{ - struct hp_sw_dh_data *h = req->end_io_data; - unsigned err = SCSI_DH_OK; - - if (error || host_byte(req->errors) != DID_OK || - msg_byte(req->errors) != COMMAND_COMPLETE) { - sdev_printk(KERN_WARNING, h->sdev, - "%s: sending start_stop_unit failed with %x\n", - HP_SW_NAME, req->errors); - err = SCSI_DH_IO; - goto done; - } - - if (req->sense_len > 0) { - err = start_done(h->sdev, h->sense); - if (err == SCSI_DH_RETRY) { - err = SCSI_DH_IO; - if (--h->retry_cnt) { - blk_put_request(req); - err = hp_sw_start_stop(h); - if (err == SCSI_DH_OK) - return; - } - } - } -done: - req->end_io_data = NULL; - __blk_put_request(req->q, req); - if (h->callback_fn) { - h->callback_fn(h->callback_data, err); - h->callback_fn = h->callback_data = NULL; - } - return; - -} - -/* * hp_sw_start_stop - Send START STOP UNIT command * @sdev: sdev command should be sent to * @@ -237,26 +130,48 @@ done: */ static int hp_sw_start_stop(struct hp_sw_dh_data *h) { - struct request *req; - - req = blk_get_request(h->sdev->request_queue, WRITE, GFP_ATOMIC); - if (IS_ERR(req)) - return SCSI_DH_RES_TEMP_UNAVAIL; - - blk_rq_set_block_pc(req); - req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | - REQ_FAILFAST_DRIVER; - req->cmd_len = COMMAND_SIZE(START_STOP); - req->cmd[0] = START_STOP; - req->cmd[4] = 1; /* Start spin cycle */ - req->timeout = HP_SW_TIMEOUT; - req->sense = h->sense; - memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE); - req->sense_len = 0; - req->end_io_data = h; + unsigned char cmd[6] = { START_STOP, 0, 0, 0, 1, 0 }; + struct scsi_sense_hdr sshdr; + struct scsi_device *sdev = h->sdev; + int res, rc = SCSI_DH_OK; + int retry_cnt = HP_SW_RETRIES; + u64 req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | + REQ_FAILFAST_DRIVER; - blk_execute_rq_nowait(req->q, NULL, req, 1, start_stop_endio); - return SCSI_DH_OK; +retry: + res = scsi_execute_req_flags(sdev, cmd, DMA_NONE, NULL, 0, &sshdr, + HP_SW_TIMEOUT, HP_SW_RETRIES, + NULL, req_flags, 0); + if (res) { + if (!scsi_sense_valid(&sshdr)) { + sdev_printk(KERN_WARNING, sdev, + "%s: sending start_stop_unit failed, " + "no sense available\n", HP_SW_NAME); + return SCSI_DH_IO; + } + switch (sshdr.sense_key) { + case NOT_READY: + if (sshdr.asc == 0x04 && sshdr.ascq == 3) { + /* + * LUN not ready - manual intervention required + * + * Switch-over in progress, retry. + */ + if (--retry_cnt) + goto retry; + rc = SCSI_DH_RETRY; + break; + } + /* fall through */ + default: + sdev_printk(KERN_WARNING, sdev, + "%s: sending start_stop_unit failed, " + "sense %x/%x/%x\n", HP_SW_NAME, + sshdr.sense_key, sshdr.asc, sshdr.ascq); + rc = SCSI_DH_IO; + } + } + return rc; } static int hp_sw_prep_fn(struct scsi_device *sdev, struct request *req) @@ -290,15 +205,8 @@ static int hp_sw_activate(struct scsi_device *sdev, ret = hp_sw_tur(sdev, h); - if (ret == SCSI_DH_OK && h->path_state == HP_SW_PATH_PASSIVE) { - h->retry_cnt = h->retries; - h->callback_fn = fn; - h->callback_data = data; + if (ret == SCSI_DH_OK && h->path_state == HP_SW_PATH_PASSIVE) ret = hp_sw_start_stop(h); - if (ret == SCSI_DH_OK) - return 0; - h->callback_fn = h->callback_data = NULL; - } if (fn) fn(data, ret); diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index 00d9c326158e..b64eaae8533d 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c @@ -205,7 +205,6 @@ struct rdac_dh_data { #define RDAC_NON_PREFERRED 1 char preferred; - unsigned char sense[SCSI_SENSE_BUFFERSIZE]; union { struct c2_inquiry c2; struct c4_inquiry c4; @@ -262,40 +261,12 @@ do { \ sdev_printk(KERN_INFO, sdev, RDAC_NAME ": " f "\n", ## arg); \ } while (0); -static struct request *get_rdac_req(struct scsi_device *sdev, - void *buffer, unsigned buflen, int rw) +static unsigned int rdac_failover_get(struct rdac_controller *ctlr, + struct list_head *list, + unsigned char *cdb) { - struct request *rq; - struct request_queue *q = sdev->request_queue; - - rq = blk_get_request(q, rw, GFP_NOIO); - - if (IS_ERR(rq)) { - sdev_printk(KERN_INFO, sdev, - "get_rdac_req: blk_get_request failed.\n"); - return NULL; - } - blk_rq_set_block_pc(rq); - - if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) { - blk_put_request(rq); - sdev_printk(KERN_INFO, sdev, - "get_rdac_req: blk_rq_map_kern failed.\n"); - return NULL; - } - - rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | - REQ_FAILFAST_DRIVER; - rq->retries = RDAC_RETRIES; - rq->timeout = RDAC_TIMEOUT; - - return rq; -} - -static struct request *rdac_failover_get(struct scsi_device *sdev, - struct rdac_dh_data *h, struct list_head *list) -{ - struct request *rq; + struct scsi_device *sdev = ctlr->ms_sdev; + struct rdac_dh_data *h = sdev->handler_data; struct rdac_mode_common *common; unsigned data_size; struct rdac_queue_data *qdata; @@ -332,27 +303,17 @@ static struct request *rdac_failover_get(struct scsi_device *sdev, lun_table[qdata->h->lun] = 0x81; } - /* get request for block layer packet command */ - rq = get_rdac_req(sdev, &h->ctlr->mode_select, data_size, WRITE); - if (!rq) - return NULL; - /* Prepare the command. */ if (h->ctlr->use_ms10) { - rq->cmd[0] = MODE_SELECT_10; - rq->cmd[7] = data_size >> 8; - rq->cmd[8] = data_size & 0xff; + cdb[0] = MODE_SELECT_10; + cdb[7] = data_size >> 8; + cdb[8] = data_size & 0xff; } else { - rq->cmd[0] = MODE_SELECT; - rq->cmd[4] = data_size; + cdb[0] = MODE_SELECT; + cdb[4] = data_size; } - rq->cmd_len = COMMAND_SIZE(rq->cmd[0]); - - rq->sense = h->sense; - memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); - rq->sense_len = 0; - return rq; + return data_size; } static void release_controller(struct kref *kref) @@ -400,46 +361,14 @@ static struct rdac_controller *get_controller(int index, char *array_name, return ctlr; } -static int submit_inquiry(struct scsi_device *sdev, int page_code, - unsigned int len, struct rdac_dh_data *h) -{ - struct request *rq; - struct request_queue *q = sdev->request_queue; - int err = SCSI_DH_RES_TEMP_UNAVAIL; - - rq = get_rdac_req(sdev, &h->inq, len, READ); - if (!rq) - goto done; - - /* Prepare the command. */ - rq->cmd[0] = INQUIRY; - rq->cmd[1] = 1; - rq->cmd[2] = page_code; - rq->cmd[4] = len; - rq->cmd_len = COMMAND_SIZE(INQUIRY); - - rq->sense = h->sense; - memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); - rq->sense_len = 0; - - err = blk_execute_rq(q, NULL, rq, 1); - if (err == -EIO) - err = SCSI_DH_IO; - - blk_put_request(rq); -done: - return err; -} - static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h, char *array_name, u8 *array_id) { - int err, i; - struct c8_inquiry *inqp; + int err = SCSI_DH_IO, i; + struct c8_inquiry *inqp = &h->inq.c8; - err = submit_inquiry(sdev, 0xC8, sizeof(struct c8_inquiry), h); - if (err == SCSI_DH_OK) { - inqp = &h->inq.c8; + if (!scsi_get_vpd_page(sdev, 0xC8, (unsigned char *)inqp, + sizeof(struct c8_inquiry))) { if (inqp->page_code != 0xc8) return SCSI_DH_NOSYS; if (inqp->page_id[0] != 'e' || inqp->page_id[1] != 'd' || @@ -453,20 +382,20 @@ static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h, *(array_name+ARRAY_LABEL_LEN-1) = '\0'; memset(array_id, 0, UNIQUE_ID_LEN); memcpy(array_id, inqp->array_unique_id, inqp->array_uniq_id_len); + err = SCSI_DH_OK; } return err; } static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h) { - int err, access_state; + int err = SCSI_DH_IO, access_state; struct rdac_dh_data *tmp; - struct c9_inquiry *inqp; + struct c9_inquiry *inqp = &h->inq.c9; h->state = RDAC_STATE_ACTIVE; - err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry), h); - if (err == SCSI_DH_OK) { - inqp = &h->inq.c9; + if (!scsi_get_vpd_page(sdev, 0xC9, (unsigned char *)inqp, + sizeof(struct c9_inquiry))) { /* detect the operating mode */ if ((inqp->avte_cvp >> 5) & 0x1) h->mode = RDAC_MODE_IOSHIP; /* LUN in IOSHIP mode */ @@ -501,6 +430,7 @@ static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h) tmp->sdev->access_state = access_state; } rcu_read_unlock(); + err = SCSI_DH_OK; } return err; @@ -509,12 +439,11 @@ static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h) static int initialize_controller(struct scsi_device *sdev, struct rdac_dh_data *h, char *array_name, u8 *array_id) { - int err, index; - struct c4_inquiry *inqp; + int err = SCSI_DH_IO, index; + struct c4_inquiry *inqp = &h->inq.c4; - err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry), h); - if (err == SCSI_DH_OK) { - inqp = &h->inq.c4; + if (!scsi_get_vpd_page(sdev, 0xC4, (unsigned char *)inqp, + sizeof(struct c4_inquiry))) { /* get the controller index */ if (inqp->slot_id[1] == 0x31) index = 0; @@ -530,18 +459,18 @@ static int initialize_controller(struct scsi_device *sdev, h->sdev = sdev; } spin_unlock(&list_lock); + err = SCSI_DH_OK; } return err; } static int set_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h) { - int err; - struct c2_inquiry *inqp; + int err = SCSI_DH_IO; + struct c2_inquiry *inqp = &h->inq.c2; - err = submit_inquiry(sdev, 0xC2, sizeof(struct c2_inquiry), h); - if (err == SCSI_DH_OK) { - inqp = &h->inq.c2; + if (!scsi_get_vpd_page(sdev, 0xC2, (unsigned char *)inqp, + sizeof(struct c2_inquiry))) { /* * If more than MODE6_MAX_LUN luns are supported, use * mode select 10 @@ -550,36 +479,35 @@ static int set_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h) h->ctlr->use_ms10 = 1; else h->ctlr->use_ms10 = 0; + err = SCSI_DH_OK; } return err; } static int mode_select_handle_sense(struct scsi_device *sdev, - unsigned char *sensebuf) + struct scsi_sense_hdr *sense_hdr) { - struct scsi_sense_hdr sense_hdr; - int err = SCSI_DH_IO, ret; + int err = SCSI_DH_IO; struct rdac_dh_data *h = sdev->handler_data; - ret = scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, &sense_hdr); - if (!ret) + if (!scsi_sense_valid(sense_hdr)) goto done; - switch (sense_hdr.sense_key) { + switch (sense_hdr->sense_key) { case NO_SENSE: case ABORTED_COMMAND: case UNIT_ATTENTION: err = SCSI_DH_RETRY; break; case NOT_READY: - if (sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x01) + if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x01) /* LUN Not Ready and is in the Process of Becoming * Ready */ err = SCSI_DH_RETRY; break; case ILLEGAL_REQUEST: - if (sense_hdr.asc == 0x91 && sense_hdr.ascq == 0x36) + if (sense_hdr->asc == 0x91 && sense_hdr->ascq == 0x36) /* * Command Lock contention */ @@ -592,7 +520,7 @@ static int mode_select_handle_sense(struct scsi_device *sdev, RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, " "MODE_SELECT returned with sense %02x/%02x/%02x", (char *) h->ctlr->array_name, h->ctlr->index, - sense_hdr.sense_key, sense_hdr.asc, sense_hdr.ascq); + sense_hdr->sense_key, sense_hdr->asc, sense_hdr->ascq); done: return err; @@ -602,13 +530,16 @@ static void send_mode_select(struct work_struct *work) { struct rdac_controller *ctlr = container_of(work, struct rdac_controller, ms_work); - struct request *rq; struct scsi_device *sdev = ctlr->ms_sdev; struct rdac_dh_data *h = sdev->handler_data; - struct request_queue *q = sdev->request_queue; - int err, retry_cnt = RDAC_RETRY_COUNT; + int err = SCSI_DH_OK, retry_cnt = RDAC_RETRY_COUNT; struct rdac_queue_data *tmp, *qdata; LIST_HEAD(list); + unsigned char cdb[COMMAND_SIZE(MODE_SELECT_10)]; + struct scsi_sense_hdr sshdr; + unsigned int data_size; + u64 req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | + REQ_FAILFAST_DRIVER; spin_lock(&ctlr->ms_lock); list_splice_init(&ctlr->ms_head, &list); @@ -616,21 +547,19 @@ static void send_mode_select(struct work_struct *work) ctlr->ms_sdev = NULL; spin_unlock(&ctlr->ms_lock); -retry: - err = SCSI_DH_RES_TEMP_UNAVAIL; - rq = rdac_failover_get(sdev, h, &list); - if (!rq) - goto done; + retry: + data_size = rdac_failover_get(ctlr, &list, cdb); RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, " "%s MODE_SELECT command", (char *) h->ctlr->array_name, h->ctlr->index, (retry_cnt == RDAC_RETRY_COUNT) ? "queueing" : "retrying"); - err = blk_execute_rq(q, NULL, rq, 1); - blk_put_request(rq); - if (err != SCSI_DH_OK) { - err = mode_select_handle_sense(sdev, h->sense); + if (scsi_execute_req_flags(sdev, cdb, DMA_TO_DEVICE, + &h->ctlr->mode_select, data_size, &sshdr, + RDAC_TIMEOUT * HZ, + RDAC_RETRIES, NULL, req_flags, 0)) { + err = mode_select_handle_sense(sdev, &sshdr); if (err == SCSI_DH_RETRY && retry_cnt--) goto retry; if (err == SCSI_DH_IMM_RETRY) @@ -643,7 +572,6 @@ retry: (char *) h->ctlr->array_name, h->ctlr->index); } -done: list_for_each_entry_safe(qdata, tmp, &list, entry) { list_del(&qdata->entry); if (err == SCSI_DH_OK) diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index 258a3f9a2519..831a1c8b9f89 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -213,6 +213,10 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev, goto fail; } + error = scsi_init_sense_cache(shost); + if (error) + goto fail; + if (shost_use_blk_mq(shost)) { error = scsi_mq_setup_tags(shost); if (error) @@ -226,19 +230,6 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev, } } - /* - * Note that we allocate the freelist even for the MQ case for now, - * as we need a command set aside for scsi_reset_provider. Having - * the full host freelist and one command available for that is a - * little heavy-handed, but avoids introducing a special allocator - * just for this. Eventually the structure of scsi_reset_provider - * will need a major overhaul. - */ - error = scsi_setup_command_freelist(shost); - if (error) - goto out_destroy_tags; - - if (!shost->shost_gendev.parent) shost->shost_gendev.parent = dev ? dev : &platform_bus; if (!dma_dev) @@ -258,7 +249,7 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev, error = device_add(&shost->shost_gendev); if (error) - goto out_destroy_freelist; + goto out_disable_runtime_pm; scsi_host_set_state(shost, SHOST_RUNNING); get_device(shost->shost_gendev.parent); @@ -308,13 +299,11 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev, device_del(&shost->shost_dev); out_del_gendev: device_del(&shost->shost_gendev); - out_destroy_freelist: + out_disable_runtime_pm: device_disable_async_suspend(&shost->shost_gendev); pm_runtime_disable(&shost->shost_gendev); pm_runtime_set_suspended(&shost->shost_gendev); pm_runtime_put_noidle(&shost->shost_gendev); - scsi_destroy_command_freelist(shost); - out_destroy_tags: if (shost_use_blk_mq(shost)) scsi_mq_destroy_tags(shost); fail: @@ -355,7 +344,6 @@ static void scsi_host_dev_release(struct device *dev) kfree(dev_name(&shost->shost_dev)); } - scsi_destroy_command_freelist(shost); if (shost_use_blk_mq(shost)) { if (shost->tag_set.tags) scsi_mq_destroy_tags(shost); diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index cbc0c5fe5a60..c611412a8de9 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -5539,8 +5539,8 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd) * Retries always go down the normal I/O path. */ if (likely(cmd->retries == 0 && - cmd->request->cmd_type == REQ_TYPE_FS && - h->acciopath_status)) { + !blk_rq_is_passthrough(cmd->request) && + h->acciopath_status)) { rc = hpsa_ioaccel_submit(h, c, cmd, scsi3addr); if (rc == 0) return 0; diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index 919736a74ffa..aa76f36abe03 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -2095,7 +2095,7 @@ int fc_lport_bsg_request(struct bsg_job *job) bsg_reply->reply_payload_rcv_len = 0; if (rsp) - rsp->resid_len = job->reply_payload.payload_len; + scsi_req(rsp)->resid_len = job->reply_payload.payload_len; mutex_lock(&lport->lp_mutex); diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index 022bb6e10d98..570b2cb2da43 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -2174,12 +2174,12 @@ int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, bio_data(rsp->bio), blk_rq_bytes(rsp)); if (ret > 0) { /* positive number is the untransferred residual */ - rsp->resid_len = ret; - req->resid_len = 0; + scsi_req(rsp)->resid_len = ret; + scsi_req(req)->resid_len = 0; ret = 0; } else if (ret == 0) { - rsp->resid_len = 0; - req->resid_len = 0; + scsi_req(rsp)->resid_len = 0; + scsi_req(req)->resid_len = 0; } return ret; diff --git a/drivers/scsi/libsas/sas_host_smp.c b/drivers/scsi/libsas/sas_host_smp.c index d24792575169..45cbbc44f4d7 100644 --- a/drivers/scsi/libsas/sas_host_smp.c +++ b/drivers/scsi/libsas/sas_host_smp.c @@ -274,15 +274,15 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req, switch (req_data[1]) { case SMP_REPORT_GENERAL: - req->resid_len -= 8; - rsp->resid_len -= 32; + scsi_req(req)->resid_len -= 8; + scsi_req(rsp)->resid_len -= 32; resp_data[2] = SMP_RESP_FUNC_ACC; resp_data[9] = sas_ha->num_phys; break; case SMP_REPORT_MANUF_INFO: - req->resid_len -= 8; - rsp->resid_len -= 64; + scsi_req(req)->resid_len -= 8; + scsi_req(rsp)->resid_len -= 64; resp_data[2] = SMP_RESP_FUNC_ACC; memcpy(resp_data + 12, shost->hostt->name, SAS_EXPANDER_VENDOR_ID_LEN); @@ -295,13 +295,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req, break; case SMP_DISCOVER: - req->resid_len -= 16; - if ((int)req->resid_len < 0) { - req->resid_len = 0; + scsi_req(req)->resid_len -= 16; + if ((int)scsi_req(req)->resid_len < 0) { + scsi_req(req)->resid_len = 0; error = -EINVAL; goto out; } - rsp->resid_len -= 56; + scsi_req(rsp)->resid_len -= 56; sas_host_smp_discover(sas_ha, resp_data, req_data[9]); break; @@ -311,13 +311,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req, break; case SMP_REPORT_PHY_SATA: - req->resid_len -= 16; - if ((int)req->resid_len < 0) { - req->resid_len = 0; + scsi_req(req)->resid_len -= 16; + if ((int)scsi_req(req)->resid_len < 0) { + scsi_req(req)->resid_len = 0; error = -EINVAL; goto out; } - rsp->resid_len -= 60; + scsi_req(rsp)->resid_len -= 60; sas_report_phy_sata(sas_ha, resp_data, req_data[9]); break; @@ -331,15 +331,15 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req, int to_write = req_data[4]; if (blk_rq_bytes(req) < base_frame_size + to_write * 4 || - req->resid_len < base_frame_size + to_write * 4) { + scsi_req(req)->resid_len < base_frame_size + to_write * 4) { resp_data[2] = SMP_RESP_INV_FRM_LEN; break; } to_write = sas_host_smp_write_gpio(sas_ha, resp_data, req_data[2], req_data[3], to_write, &req_data[8]); - req->resid_len -= base_frame_size + to_write * 4; - rsp->resid_len -= 8; + scsi_req(req)->resid_len -= base_frame_size + to_write * 4; + scsi_req(rsp)->resid_len -= 8; break; } @@ -348,13 +348,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req, break; case SMP_PHY_CONTROL: - req->resid_len -= 44; - if ((int)req->resid_len < 0) { - req->resid_len = 0; + scsi_req(req)->resid_len -= 44; + if ((int)scsi_req(req)->resid_len < 0) { + scsi_req(req)->resid_len = 0; error = -EINVAL; goto out; } - rsp->resid_len -= 8; + scsi_req(rsp)->resid_len -= 8; sas_phy_control(sas_ha, req_data[9], req_data[10], req_data[32] >> 4, req_data[33] >> 4, resp_data); diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 0b5b423b1db0..c6d550551504 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -4723,7 +4723,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) * then scsi-ml does not need to handle this misbehavior. */ sector_sz = scmd->device->sector_size; - if (unlikely(scmd->request->cmd_type == REQ_TYPE_FS && sector_sz && + if (unlikely(!blk_rq_is_passthrough(scmd->request) && sector_sz && xfer_cnt % sector_sz)) { sdev_printk(KERN_INFO, scmd->device, "unaligned partial completion avoided (xfer_cnt=%u, sector_sz=%u)\n", diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c index 7f1d5785bc30..e7a7a704a315 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_transport.c +++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c @@ -2057,10 +2057,10 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, ioc->name, __func__, le16_to_cpu(mpi_reply->ResponseDataLength))); - memcpy(req->sense, mpi_reply, sizeof(*mpi_reply)); - req->sense_len = sizeof(*mpi_reply); - req->resid_len = 0; - rsp->resid_len -= + memcpy(scsi_req(req)->sense, mpi_reply, sizeof(*mpi_reply)); + scsi_req(req)->sense_len = sizeof(*mpi_reply); + scsi_req(req)->resid_len = 0; + scsi_req(rsp)->resid_len -= le16_to_cpu(mpi_reply->ResponseDataLength); /* check if the resp needs to be copied from the allocated diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index ef99f62831fb..30b905080c61 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c @@ -48,6 +48,7 @@ #include <scsi/osd_sense.h> #include <scsi/scsi_device.h> +#include <scsi/scsi_request.h> #include "osd_debug.h" @@ -477,11 +478,13 @@ static void _set_error_resid(struct osd_request *or, struct request *req, { or->async_error = error; or->req_errors = req->errors ? : error; - or->sense_len = req->sense_len; + or->sense_len = scsi_req(req)->sense_len; + if (or->sense_len) + memcpy(or->sense, scsi_req(req)->sense, or->sense_len); if (or->out.req) - or->out.residual = or->out.req->resid_len; + or->out.residual = scsi_req(or->out.req)->resid_len; if (or->in.req) - or->in.residual = or->in.req->resid_len; + or->in.residual = scsi_req(or->in.req)->resid_len; } int osd_execute_request(struct osd_request *or) @@ -1562,10 +1565,11 @@ static struct request *_make_request(struct request_queue *q, bool has_write, struct bio *bio = oii->bio; int ret; - req = blk_get_request(q, has_write ? WRITE : READ, flags); + req = blk_get_request(q, has_write ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, + flags); if (IS_ERR(req)) return req; - blk_rq_set_block_pc(req); + scsi_req_init(req); for_each_bio(bio) { struct bio *bounce_bio = bio; @@ -1599,8 +1603,6 @@ static int _init_blk_request(struct osd_request *or, req->timeout = or->timeout; req->retries = or->retries; - req->sense = or->sense; - req->sense_len = 0; if (has_out) { or->out.req = req; @@ -1612,7 +1614,7 @@ static int _init_blk_request(struct osd_request *or, ret = PTR_ERR(req); goto out; } - blk_rq_set_block_pc(req); + scsi_req_init(req); or->in.req = or->request->next_rq = req; } } else if (has_in) @@ -1699,8 +1701,8 @@ int osd_finalize_request(struct osd_request *or, osd_sec_sign_cdb(&or->cdb, cap_key); - or->request->cmd = or->cdb.buff; - or->request->cmd_len = _osd_req_cdb_len(or); + scsi_req(or->request)->cmd = or->cdb.buff; + scsi_req(or->request)->cmd_len = _osd_req_cdb_len(or); return 0; } diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c index e8196c55b633..451de6c5e3c9 100644 --- a/drivers/scsi/osst.c +++ b/drivers/scsi/osst.c @@ -322,6 +322,7 @@ static int osst_chk_result(struct osst_tape * STp, struct osst_request * SRpnt) /* Wakeup from interrupt */ static void osst_end_async(struct request *req, int update) { + struct scsi_request *rq = scsi_req(req); struct osst_request *SRpnt = req->end_io_data; struct osst_tape *STp = SRpnt->stp; struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data; @@ -330,6 +331,8 @@ static void osst_end_async(struct request *req, int update) #if DEBUG STp->write_pending = 0; #endif + if (rq->sense_len) + memcpy(SRpnt->sense, rq->sense, SCSI_SENSE_BUFFERSIZE); if (SRpnt->waiting) complete(SRpnt->waiting); @@ -357,17 +360,20 @@ static int osst_execute(struct osst_request *SRpnt, const unsigned char *cmd, int use_sg, int timeout, int retries) { struct request *req; + struct scsi_request *rq; struct page **pages = NULL; struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data; int err = 0; int write = (data_direction == DMA_TO_DEVICE); - req = blk_get_request(SRpnt->stp->device->request_queue, write, GFP_KERNEL); + req = blk_get_request(SRpnt->stp->device->request_queue, + write ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, GFP_KERNEL); if (IS_ERR(req)) return DRIVER_ERROR << 24; - blk_rq_set_block_pc(req); + rq = scsi_req(req); + scsi_req_init(req); req->rq_flags |= RQF_QUIET; SRpnt->bio = NULL; @@ -404,11 +410,9 @@ static int osst_execute(struct osst_request *SRpnt, const unsigned char *cmd, goto free_req; } - req->cmd_len = cmd_len; - memset(req->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */ - memcpy(req->cmd, cmd, req->cmd_len); - req->sense = SRpnt->sense; - req->sense_len = 0; + rq->cmd_len = cmd_len; + memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */ + memcpy(rq->cmd, cmd, rq->cmd_len); req->timeout = timeout; req->retries = retries; req->end_io_data = SRpnt; diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index 1bf8061ff803..40ca75bbcb9d 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c @@ -921,7 +921,7 @@ qla2x00_process_loopback(struct bsg_job *bsg_job) bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(response) + sizeof(uint8_t); - fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) + + fw_sts_ptr = ((uint8_t *)scsi_req(bsg_job->req)->sense) + sizeof(struct fc_bsg_reply); memcpy(fw_sts_ptr, response, sizeof(response)); fw_sts_ptr += sizeof(response); diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index a94b0b6bd030..9281bf47cbed 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -1468,7 +1468,8 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, type, sp->handle, comp_status, fw_status[1], fw_status[2], le16_to_cpu(((struct els_sts_entry_24xx *) pkt)->total_byte_count)); - fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); + fw_sts_ptr = ((uint8_t*)scsi_req(bsg_job->req)->sense) + + sizeof(struct fc_bsg_reply); memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); } else { @@ -1482,7 +1483,8 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, pkt)->error_subcode_2)); res = DID_ERROR << 16; bsg_reply->reply_payload_rcv_len = 0; - fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); + fw_sts_ptr = ((uint8_t*)scsi_req(bsg_job->req)->sense) + + sizeof(struct fc_bsg_reply); memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); } ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056, diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c index 02f1de18bc2b..96c33e292eba 100644 --- a/drivers/scsi/qla2xxx/qla_mr.c +++ b/drivers/scsi/qla2xxx/qla_mr.c @@ -2244,7 +2244,7 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req, memcpy(fstatus.reserved_3, pkt->reserved_2, 20 * sizeof(uint8_t)); - fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) + + fw_sts_ptr = ((uint8_t *)scsi_req(bsg_job->req)->sense) + sizeof(struct fc_bsg_reply); memcpy(fw_sts_ptr, (uint8_t *)&fstatus, diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 75455d4dab68..7bfbcfa7af40 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -98,176 +98,6 @@ EXPORT_SYMBOL(scsi_sd_probe_domain); ASYNC_DOMAIN_EXCLUSIVE(scsi_sd_pm_domain); EXPORT_SYMBOL(scsi_sd_pm_domain); -struct scsi_host_cmd_pool { - struct kmem_cache *cmd_slab; - struct kmem_cache *sense_slab; - unsigned int users; - char *cmd_name; - char *sense_name; - unsigned int slab_flags; - gfp_t gfp_mask; -}; - -static struct scsi_host_cmd_pool scsi_cmd_pool = { - .cmd_name = "scsi_cmd_cache", - .sense_name = "scsi_sense_cache", - .slab_flags = SLAB_HWCACHE_ALIGN, -}; - -static struct scsi_host_cmd_pool scsi_cmd_dma_pool = { - .cmd_name = "scsi_cmd_cache(DMA)", - .sense_name = "scsi_sense_cache(DMA)", - .slab_flags = SLAB_HWCACHE_ALIGN|SLAB_CACHE_DMA, - .gfp_mask = __GFP_DMA, -}; - -static DEFINE_MUTEX(host_cmd_pool_mutex); - -/** - * scsi_host_free_command - internal function to release a command - * @shost: host to free the command for - * @cmd: command to release - * - * the command must previously have been allocated by - * scsi_host_alloc_command. - */ -static void -scsi_host_free_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd) -{ - struct scsi_host_cmd_pool *pool = shost->cmd_pool; - - if (cmd->prot_sdb) - kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb); - kmem_cache_free(pool->sense_slab, cmd->sense_buffer); - kmem_cache_free(pool->cmd_slab, cmd); -} - -/** - * scsi_host_alloc_command - internal function to allocate command - * @shost: SCSI host whose pool to allocate from - * @gfp_mask: mask for the allocation - * - * Returns a fully allocated command with sense buffer and protection - * data buffer (where applicable) or NULL on failure - */ -static struct scsi_cmnd * -scsi_host_alloc_command(struct Scsi_Host *shost, gfp_t gfp_mask) -{ - struct scsi_host_cmd_pool *pool = shost->cmd_pool; - struct scsi_cmnd *cmd; - - cmd = kmem_cache_zalloc(pool->cmd_slab, gfp_mask | pool->gfp_mask); - if (!cmd) - goto fail; - - cmd->sense_buffer = kmem_cache_alloc(pool->sense_slab, - gfp_mask | pool->gfp_mask); - if (!cmd->sense_buffer) - goto fail_free_cmd; - - if (scsi_host_get_prot(shost) >= SHOST_DIX_TYPE0_PROTECTION) { - cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp_mask); - if (!cmd->prot_sdb) - goto fail_free_sense; - } - - return cmd; - -fail_free_sense: - kmem_cache_free(pool->sense_slab, cmd->sense_buffer); -fail_free_cmd: - kmem_cache_free(pool->cmd_slab, cmd); -fail: - return NULL; -} - -/** - * __scsi_get_command - Allocate a struct scsi_cmnd - * @shost: host to transmit command - * @gfp_mask: allocation mask - * - * Description: allocate a struct scsi_cmd from host's slab, recycling from the - * host's free_list if necessary. - */ -static struct scsi_cmnd * -__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask) -{ - struct scsi_cmnd *cmd = scsi_host_alloc_command(shost, gfp_mask); - - if (unlikely(!cmd)) { - unsigned long flags; - - spin_lock_irqsave(&shost->free_list_lock, flags); - if (likely(!list_empty(&shost->free_list))) { - cmd = list_entry(shost->free_list.next, - struct scsi_cmnd, list); - list_del_init(&cmd->list); - } - spin_unlock_irqrestore(&shost->free_list_lock, flags); - - if (cmd) { - void *buf, *prot; - - buf = cmd->sense_buffer; - prot = cmd->prot_sdb; - - memset(cmd, 0, sizeof(*cmd)); - - cmd->sense_buffer = buf; - cmd->prot_sdb = prot; - } - } - - return cmd; -} - -/** - * scsi_get_command - Allocate and setup a scsi command block - * @dev: parent scsi device - * @gfp_mask: allocator flags - * - * Returns: The allocated scsi command structure. - */ -struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask) -{ - struct scsi_cmnd *cmd = __scsi_get_command(dev->host, gfp_mask); - unsigned long flags; - - if (unlikely(cmd == NULL)) - return NULL; - - cmd->device = dev; - INIT_LIST_HEAD(&cmd->list); - INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler); - spin_lock_irqsave(&dev->list_lock, flags); - list_add_tail(&cmd->list, &dev->cmd_list); - spin_unlock_irqrestore(&dev->list_lock, flags); - cmd->jiffies_at_alloc = jiffies; - return cmd; -} - -/** - * __scsi_put_command - Free a struct scsi_cmnd - * @shost: dev->host - * @cmd: Command to free - */ -static void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd) -{ - unsigned long flags; - - if (unlikely(list_empty(&shost->free_list))) { - spin_lock_irqsave(&shost->free_list_lock, flags); - if (list_empty(&shost->free_list)) { - list_add(&cmd->list, &shost->free_list); - cmd = NULL; - } - spin_unlock_irqrestore(&shost->free_list_lock, flags); - } - - if (likely(cmd != NULL)) - scsi_host_free_command(shost, cmd); -} - /** * scsi_put_command - Free a scsi command block * @cmd: command block to free @@ -287,188 +117,6 @@ void scsi_put_command(struct scsi_cmnd *cmd) spin_unlock_irqrestore(&cmd->device->list_lock, flags); BUG_ON(delayed_work_pending(&cmd->abort_work)); - - __scsi_put_command(cmd->device->host, cmd); -} - -static struct scsi_host_cmd_pool * -scsi_find_host_cmd_pool(struct Scsi_Host *shost) -{ - if (shost->hostt->cmd_size) - return shost->hostt->cmd_pool; - if (shost->unchecked_isa_dma) - return &scsi_cmd_dma_pool; - return &scsi_cmd_pool; -} - -static void -scsi_free_host_cmd_pool(struct scsi_host_cmd_pool *pool) -{ - kfree(pool->sense_name); - kfree(pool->cmd_name); - kfree(pool); -} - -static struct scsi_host_cmd_pool * -scsi_alloc_host_cmd_pool(struct Scsi_Host *shost) -{ - struct scsi_host_template *hostt = shost->hostt; - struct scsi_host_cmd_pool *pool; - - pool = kzalloc(sizeof(*pool), GFP_KERNEL); - if (!pool) - return NULL; - - pool->cmd_name = kasprintf(GFP_KERNEL, "%s_cmd", hostt->proc_name); - pool->sense_name = kasprintf(GFP_KERNEL, "%s_sense", hostt->proc_name); - if (!pool->cmd_name || !pool->sense_name) { - scsi_free_host_cmd_pool(pool); - return NULL; - } - - pool->slab_flags = SLAB_HWCACHE_ALIGN; - if (shost->unchecked_isa_dma) { - pool->slab_flags |= SLAB_CACHE_DMA; - pool->gfp_mask = __GFP_DMA; - } - - if (hostt->cmd_size) - hostt->cmd_pool = pool; - - return pool; -} - -static struct scsi_host_cmd_pool * -scsi_get_host_cmd_pool(struct Scsi_Host *shost) -{ - struct scsi_host_template *hostt = shost->hostt; - struct scsi_host_cmd_pool *retval = NULL, *pool; - size_t cmd_size = sizeof(struct scsi_cmnd) + hostt->cmd_size; - - /* - * Select a command slab for this host and create it if not - * yet existent. - */ - mutex_lock(&host_cmd_pool_mutex); - pool = scsi_find_host_cmd_pool(shost); - if (!pool) { - pool = scsi_alloc_host_cmd_pool(shost); - if (!pool) - goto out; - } - - if (!pool->users) { - pool->cmd_slab = kmem_cache_create(pool->cmd_name, cmd_size, 0, - pool->slab_flags, NULL); - if (!pool->cmd_slab) - goto out_free_pool; - - pool->sense_slab = kmem_cache_create(pool->sense_name, - SCSI_SENSE_BUFFERSIZE, 0, - pool->slab_flags, NULL); - if (!pool->sense_slab) - goto out_free_slab; - } - - pool->users++; - retval = pool; -out: - mutex_unlock(&host_cmd_pool_mutex); - return retval; - -out_free_slab: - kmem_cache_destroy(pool->cmd_slab); -out_free_pool: - if (hostt->cmd_size) { - scsi_free_host_cmd_pool(pool); - hostt->cmd_pool = NULL; - } - goto out; -} - -static void scsi_put_host_cmd_pool(struct Scsi_Host *shost) -{ - struct scsi_host_template *hostt = shost->hostt; - struct scsi_host_cmd_pool *pool; - - mutex_lock(&host_cmd_pool_mutex); - pool = scsi_find_host_cmd_pool(shost); - - /* - * This may happen if a driver has a mismatched get and put - * of the command pool; the driver should be implicated in - * the stack trace - */ - BUG_ON(pool->users == 0); - - if (!--pool->users) { - kmem_cache_destroy(pool->cmd_slab); - kmem_cache_destroy(pool->sense_slab); - if (hostt->cmd_size) { - scsi_free_host_cmd_pool(pool); - hostt->cmd_pool = NULL; - } - } - mutex_unlock(&host_cmd_pool_mutex); -} - -/** - * scsi_setup_command_freelist - Setup the command freelist for a scsi host. - * @shost: host to allocate the freelist for. - * - * Description: The command freelist protects against system-wide out of memory - * deadlock by preallocating one SCSI command structure for each host, so the - * system can always write to a swap file on a device associated with that host. - * - * Returns: Nothing. - */ -int scsi_setup_command_freelist(struct Scsi_Host *shost) -{ - const gfp_t gfp_mask = shost->unchecked_isa_dma ? GFP_DMA : GFP_KERNEL; - struct scsi_cmnd *cmd; - - spin_lock_init(&shost->free_list_lock); - INIT_LIST_HEAD(&shost->free_list); - - shost->cmd_pool = scsi_get_host_cmd_pool(shost); - if (!shost->cmd_pool) - return -ENOMEM; - - /* - * Get one backup command for this host. - */ - cmd = scsi_host_alloc_command(shost, gfp_mask); - if (!cmd) { - scsi_put_host_cmd_pool(shost); - shost->cmd_pool = NULL; - return -ENOMEM; - } - list_add(&cmd->list, &shost->free_list); - return 0; -} - -/** - * scsi_destroy_command_freelist - Release the command freelist for a scsi host. - * @shost: host whose freelist is going to be destroyed - */ -void scsi_destroy_command_freelist(struct Scsi_Host *shost) -{ - /* - * If cmd_pool is NULL the free list was not initialized, so - * do not attempt to release resources. - */ - if (!shost->cmd_pool) - return; - - while (!list_empty(&shost->free_list)) { - struct scsi_cmnd *cmd; - - cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list); - list_del_init(&cmd->list); - scsi_host_free_command(shost, cmd); - } - shost->cmd_pool = NULL; - scsi_put_host_cmd_pool(shost); } #ifdef CONFIG_SCSI_LOGGING @@ -590,7 +238,7 @@ void scsi_finish_command(struct scsi_cmnd *cmd) "(result %x)\n", cmd->result)); good_bytes = scsi_bufflen(cmd); - if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) { + if (!blk_rq_is_passthrough(cmd->request)) { int old_good_bytes = good_bytes; drv = scsi_cmd_to_driver(cmd); if (drv->done) diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 996e134d79fa..9e82fa5715bc 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -1106,7 +1106,7 @@ static int scsi_request_sense(struct scsi_cmnd *scmd) static int scsi_eh_action(struct scsi_cmnd *scmd, int rtn) { - if (scmd->request->cmd_type != REQ_TYPE_BLOCK_PC) { + if (!blk_rq_is_passthrough(scmd->request)) { struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd); if (sdrv->eh_action) rtn = sdrv->eh_action(scmd, rtn); @@ -1746,7 +1746,7 @@ check_type: * the check condition was retryable. */ if (scmd->request->cmd_flags & REQ_FAILFAST_DEV || - scmd->request->cmd_type == REQ_TYPE_BLOCK_PC) + blk_rq_is_passthrough(scmd->request)) return 1; else return 0; @@ -1968,25 +1968,25 @@ static void eh_lock_door_done(struct request *req, int uptodate) static void scsi_eh_lock_door(struct scsi_device *sdev) { struct request *req; + struct scsi_request *rq; /* * blk_get_request with GFP_KERNEL (__GFP_RECLAIM) sleeps until a * request becomes available */ - req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL); + req = blk_get_request(sdev->request_queue, REQ_OP_SCSI_IN, GFP_KERNEL); if (IS_ERR(req)) return; + rq = scsi_req(req); + scsi_req_init(req); - blk_rq_set_block_pc(req); - - req->cmd[0] = ALLOW_MEDIUM_REMOVAL; - req->cmd[1] = 0; - req->cmd[2] = 0; - req->cmd[3] = 0; - req->cmd[4] = SCSI_REMOVAL_PREVENT; - req->cmd[5] = 0; - - req->cmd_len = COMMAND_SIZE(req->cmd[0]); + rq->cmd[0] = ALLOW_MEDIUM_REMOVAL; + rq->cmd[1] = 0; + rq->cmd[2] = 0; + rq->cmd[3] = 0; + rq->cmd[4] = SCSI_REMOVAL_PREVENT; + rq->cmd[5] = 0; + rq->cmd_len = COMMAND_SIZE(rq->cmd[0]); req->rq_flags |= RQF_QUIET; req->timeout = 10 * HZ; @@ -2331,7 +2331,7 @@ scsi_ioctl_reset(struct scsi_device *dev, int __user *arg) { struct scsi_cmnd *scmd; struct Scsi_Host *shost = dev->host; - struct request req; + struct request *rq; unsigned long flags; int error = 0, rtn, val; @@ -2346,14 +2346,16 @@ scsi_ioctl_reset(struct scsi_device *dev, int __user *arg) return -EIO; error = -EIO; - scmd = scsi_get_command(dev, GFP_KERNEL); - if (!scmd) + rq = kzalloc(sizeof(struct request) + sizeof(struct scsi_cmnd) + + shost->hostt->cmd_size, GFP_KERNEL); + if (!rq) goto out_put_autopm_host; + blk_rq_init(NULL, rq); - blk_rq_init(NULL, &req); - scmd->request = &req; - - scmd->cmnd = req.cmd; + scmd = (struct scsi_cmnd *)(rq + 1); + scsi_init_command(dev, scmd); + scmd->request = rq; + scmd->cmnd = scsi_req(rq)->cmd; scmd->scsi_done = scsi_reset_provider_done_command; memset(&scmd->sdb, 0, sizeof(scmd->sdb)); @@ -2413,6 +2415,7 @@ scsi_ioctl_reset(struct scsi_device *dev, int __user *arg) scsi_run_host_queues(shost); scsi_put_command(scmd); + kfree(rq); out_put_autopm_host: scsi_autopm_put_host(shost); diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 78db07fd8055..912fbc3b4543 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -37,8 +37,59 @@ #include "scsi_priv.h" #include "scsi_logging.h" +static struct kmem_cache *scsi_sdb_cache; +static struct kmem_cache *scsi_sense_cache; +static struct kmem_cache *scsi_sense_isadma_cache; +static DEFINE_MUTEX(scsi_sense_cache_mutex); -struct kmem_cache *scsi_sdb_cache; +static inline struct kmem_cache * +scsi_select_sense_cache(struct Scsi_Host *shost) +{ + return shost->unchecked_isa_dma ? + scsi_sense_isadma_cache : scsi_sense_cache; +} + +static void scsi_free_sense_buffer(struct Scsi_Host *shost, + unsigned char *sense_buffer) +{ + kmem_cache_free(scsi_select_sense_cache(shost), sense_buffer); +} + +static unsigned char *scsi_alloc_sense_buffer(struct Scsi_Host *shost, + gfp_t gfp_mask, int numa_node) +{ + return kmem_cache_alloc_node(scsi_select_sense_cache(shost), gfp_mask, + numa_node); +} + +int scsi_init_sense_cache(struct Scsi_Host *shost) +{ + struct kmem_cache *cache; + int ret = 0; + + cache = scsi_select_sense_cache(shost); + if (cache) + return 0; + + mutex_lock(&scsi_sense_cache_mutex); + if (shost->unchecked_isa_dma) { + scsi_sense_isadma_cache = + kmem_cache_create("scsi_sense_cache(DMA)", + SCSI_SENSE_BUFFERSIZE, 0, + SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, NULL); + if (!scsi_sense_isadma_cache) + ret = -ENOMEM; + } else { + scsi_sense_cache = + kmem_cache_create("scsi_sense_cache", + SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN, NULL); + if (!scsi_sense_cache) + ret = -ENOMEM; + } + + mutex_unlock(&scsi_sense_cache_mutex); + return ret; +} /* * When to reinvoke queueing after a resource shortage. It's 3 msecs to @@ -168,22 +219,23 @@ static int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, req_flags_t rq_flags, int *resid) { struct request *req; - int write = (data_direction == DMA_TO_DEVICE); + struct scsi_request *rq; int ret = DRIVER_ERROR << 24; - req = blk_get_request(sdev->request_queue, write, __GFP_RECLAIM); + req = blk_get_request(sdev->request_queue, + data_direction == DMA_TO_DEVICE ? + REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, __GFP_RECLAIM); if (IS_ERR(req)) return ret; - blk_rq_set_block_pc(req); + rq = scsi_req(req); + scsi_req_init(req); if (bufflen && blk_rq_map_kern(sdev->request_queue, req, buffer, bufflen, __GFP_RECLAIM)) goto out; - req->cmd_len = COMMAND_SIZE(cmd[0]); - memcpy(req->cmd, cmd, req->cmd_len); - req->sense = sense; - req->sense_len = 0; + rq->cmd_len = COMMAND_SIZE(cmd[0]); + memcpy(rq->cmd, cmd, rq->cmd_len); req->retries = retries; req->timeout = timeout; req->cmd_flags |= flags; @@ -200,11 +252,13 @@ static int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, * is invalid. Prevent the garbage from being misinterpreted * and prevent security leaks by zeroing out the excess data. */ - if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen)) - memset(buffer + (bufflen - req->resid_len), 0, req->resid_len); + if (unlikely(rq->resid_len > 0 && rq->resid_len <= bufflen)) + memset(buffer + (bufflen - rq->resid_len), 0, rq->resid_len); if (resid) - *resid = req->resid_len; + *resid = rq->resid_len; + if (sense && rq->sense_len) + memcpy(sense, rq->sense, SCSI_SENSE_BUFFERSIZE); ret = req->errors; out: blk_put_request(req); @@ -529,7 +583,7 @@ void scsi_run_host_queues(struct Scsi_Host *shost) static void scsi_uninit_cmd(struct scsi_cmnd *cmd) { - if (cmd->request->cmd_type == REQ_TYPE_FS) { + if (!blk_rq_is_passthrough(cmd->request)) { struct scsi_driver *drv = scsi_cmd_to_driver(cmd); if (drv->uninit_command) @@ -645,14 +699,13 @@ static bool scsi_end_request(struct request *req, int error, if (bidi_bytes) scsi_release_bidi_buffers(cmd); + scsi_release_buffers(cmd); + scsi_put_command(cmd); spin_lock_irqsave(q->queue_lock, flags); blk_finish_request(req, error); spin_unlock_irqrestore(q->queue_lock, flags); - scsi_release_buffers(cmd); - - scsi_put_command(cmd); scsi_run_queue(q); } @@ -754,18 +807,15 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) sense_deferred = scsi_sense_is_deferred(&sshdr); } - if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */ + if (blk_rq_is_passthrough(req)) { if (result) { - if (sense_valid && req->sense) { + if (sense_valid) { /* * SG_IO wants current and deferred errors */ - int len = 8 + cmd->sense_buffer[7]; - - if (len > SCSI_SENSE_BUFFERSIZE) - len = SCSI_SENSE_BUFFERSIZE; - memcpy(req->sense, cmd->sense_buffer, len); - req->sense_len = len; + scsi_req(req)->sense_len = + min(8 + cmd->sense_buffer[7], + SCSI_SENSE_BUFFERSIZE); } if (!sense_deferred) error = __scsi_error_from_host_byte(cmd, result); @@ -775,14 +825,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) */ req->errors = cmd->result; - req->resid_len = scsi_get_resid(cmd); + scsi_req(req)->resid_len = scsi_get_resid(cmd); if (scsi_bidi_cmnd(cmd)) { /* * Bidi commands Must be complete as a whole, * both sides at once. */ - req->next_rq->resid_len = scsi_in(cmd)->resid; + scsi_req(req->next_rq)->resid_len = scsi_in(cmd)->resid; if (scsi_end_request(req, 0, blk_rq_bytes(req), blk_rq_bytes(req->next_rq))) BUG(); @@ -790,15 +840,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) } } else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) { /* - * Certain non BLOCK_PC requests are commands that don't - * actually transfer anything (FLUSH), so cannot use + * Flush commands do not transfers any data, and thus cannot use * good_bytes != blk_rq_bytes(req) as the signal for an error. * This sets the error explicitly for the problem case. */ error = __scsi_error_from_host_byte(cmd, result); } - /* no bidi support for !REQ_TYPE_BLOCK_PC yet */ + /* no bidi support for !blk_rq_is_passthrough yet */ BUG_ON(blk_bidi_rq(req)); /* @@ -810,8 +859,8 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) blk_rq_sectors(req), good_bytes)); /* - * Recovered errors need reporting, but they're always treated - * as success, so fiddle the result code here. For BLOCK_PC + * Recovered errors need reporting, but they're always treated as + * success, so fiddle the result code here. For passthrough requests * we already took a copy of the original into rq->errors which * is what gets returned to the user */ @@ -825,7 +874,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) else if (!(req->rq_flags & RQF_QUIET)) scsi_print_sense(cmd); result = 0; - /* BLOCK_PC may have set error */ + /* for passthrough error may be set */ error = 0; } @@ -1110,42 +1159,33 @@ err_exit: } EXPORT_SYMBOL(scsi_init_io); -static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev, - struct request *req) +void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd) { - struct scsi_cmnd *cmd; - - if (!req->special) { - /* Bail if we can't get a reference to the device */ - if (!get_device(&sdev->sdev_gendev)) - return NULL; - - cmd = scsi_get_command(sdev, GFP_ATOMIC); - if (unlikely(!cmd)) { - put_device(&sdev->sdev_gendev); - return NULL; - } - req->special = cmd; - } else { - cmd = req->special; - } + void *buf = cmd->sense_buffer; + void *prot = cmd->prot_sdb; + unsigned long flags; - /* pull a tag out of the request if we have one */ - cmd->tag = req->tag; - cmd->request = req; + /* zero out the cmd, except for the embedded scsi_request */ + memset((char *)cmd + sizeof(cmd->req), 0, + sizeof(*cmd) - sizeof(cmd->req)); - cmd->cmnd = req->cmd; - cmd->prot_op = SCSI_PROT_NORMAL; + cmd->device = dev; + cmd->sense_buffer = buf; + cmd->prot_sdb = prot; + INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler); + cmd->jiffies_at_alloc = jiffies; - return cmd; + spin_lock_irqsave(&dev->list_lock, flags); + list_add_tail(&cmd->list, &dev->cmd_list); + spin_unlock_irqrestore(&dev->list_lock, flags); } -static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) +static int scsi_setup_scsi_cmnd(struct scsi_device *sdev, struct request *req) { struct scsi_cmnd *cmd = req->special; /* - * BLOCK_PC requests may transfer data, in which case they must + * Passthrough requests may transfer data, in which case they must * a bio attached to them. Or they might contain a SCSI command * that does not transfer data, in which case they may optionally * submit a request without an attached bio. @@ -1160,14 +1200,15 @@ static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) memset(&cmd->sdb, 0, sizeof(cmd->sdb)); } - cmd->cmd_len = req->cmd_len; + cmd->cmd_len = scsi_req(req)->cmd_len; + cmd->cmnd = scsi_req(req)->cmd; cmd->transfersize = blk_rq_bytes(req); cmd->allowed = req->retries; return BLKPREP_OK; } /* - * Setup a REQ_TYPE_FS command. These are simple request from filesystems + * Setup a normal block command. These are simple request from filesystems * that still need to be translated to SCSI CDBs from the ULD. */ static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) @@ -1180,6 +1221,7 @@ static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) return ret; } + cmd->cmnd = scsi_req(req)->cmd = scsi_req(req)->__cmd; memset(cmd->cmnd, 0, BLK_MAX_CDB); return scsi_cmd_to_driver(cmd)->init_command(cmd); } @@ -1195,14 +1237,10 @@ static int scsi_setup_cmnd(struct scsi_device *sdev, struct request *req) else cmd->sc_data_direction = DMA_FROM_DEVICE; - switch (req->cmd_type) { - case REQ_TYPE_FS: + if (blk_rq_is_scsi(req)) + return scsi_setup_scsi_cmnd(sdev, req); + else return scsi_setup_fs_cmnd(sdev, req); - case REQ_TYPE_BLOCK_PC: - return scsi_setup_blk_pc_cmnd(sdev, req); - default: - return BLKPREP_KILL; - } } static int @@ -1298,19 +1336,28 @@ scsi_prep_return(struct request_queue *q, struct request *req, int ret) static int scsi_prep_fn(struct request_queue *q, struct request *req) { struct scsi_device *sdev = q->queuedata; - struct scsi_cmnd *cmd; + struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); int ret; ret = scsi_prep_state_check(sdev, req); if (ret != BLKPREP_OK) goto out; - cmd = scsi_get_cmd_from_req(sdev, req); - if (unlikely(!cmd)) { - ret = BLKPREP_DEFER; - goto out; + if (!req->special) { + /* Bail if we can't get a reference to the device */ + if (unlikely(!get_device(&sdev->sdev_gendev))) { + ret = BLKPREP_DEFER; + goto out; + } + + scsi_init_command(sdev, cmd); + req->special = cmd; } + cmd->tag = req->tag; + cmd->request = req; + cmd->prot_op = SCSI_PROT_NORMAL; + ret = scsi_setup_cmnd(sdev, req); out: return scsi_prep_return(q, req, ret); @@ -1827,7 +1874,9 @@ static int scsi_mq_prep_fn(struct request *req) unsigned char *sense_buf = cmd->sense_buffer; struct scatterlist *sg; - memset(cmd, 0, sizeof(struct scsi_cmnd)); + /* zero out the cmd, except for the embedded scsi_request */ + memset((char *)cmd + sizeof(cmd->req), 0, + sizeof(*cmd) - sizeof(cmd->req)); req->special = cmd; @@ -1837,7 +1886,6 @@ static int scsi_mq_prep_fn(struct request *req) cmd->tag = req->tag; - cmd->cmnd = req->cmd; cmd->prot_op = SCSI_PROT_NORMAL; INIT_LIST_HEAD(&cmd->list); @@ -1912,7 +1960,6 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, if (!scsi_host_queue_ready(q, shost, sdev)) goto out_dec_target_busy; - if (!(req->rq_flags & RQF_DONTPREP)) { ret = prep_to_mq(scsi_mq_prep_fn(req)); if (ret != BLK_MQ_RQ_QUEUE_OK) @@ -1982,21 +2029,24 @@ static int scsi_init_request(void *data, struct request *rq, unsigned int hctx_idx, unsigned int request_idx, unsigned int numa_node) { + struct Scsi_Host *shost = data; struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); - cmd->sense_buffer = kzalloc_node(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL, - numa_node); + cmd->sense_buffer = + scsi_alloc_sense_buffer(shost, GFP_KERNEL, numa_node); if (!cmd->sense_buffer) return -ENOMEM; + cmd->req.sense = cmd->sense_buffer; return 0; } static void scsi_exit_request(void *data, struct request *rq, unsigned int hctx_idx, unsigned int request_idx) { + struct Scsi_Host *shost = data; struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); - kfree(cmd->sense_buffer); + scsi_free_sense_buffer(shost, cmd->sense_buffer); } static int scsi_map_queues(struct blk_mq_tag_set *set) @@ -2029,7 +2079,7 @@ static u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) return bounce_limit; } -static void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) +void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) { struct device *dev = shost->dma_dev; @@ -2064,28 +2114,64 @@ static void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) */ blk_queue_dma_alignment(q, 0x03); } +EXPORT_SYMBOL_GPL(__scsi_init_queue); -struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, - request_fn_proc *request_fn) +static int scsi_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp) { - struct request_queue *q; + struct Scsi_Host *shost = q->rq_alloc_data; + struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); - q = blk_init_queue(request_fn, NULL); - if (!q) - return NULL; - __scsi_init_queue(shost, q); - return q; + memset(cmd, 0, sizeof(*cmd)); + + cmd->sense_buffer = scsi_alloc_sense_buffer(shost, gfp, NUMA_NO_NODE); + if (!cmd->sense_buffer) + goto fail; + cmd->req.sense = cmd->sense_buffer; + + if (scsi_host_get_prot(shost) >= SHOST_DIX_TYPE0_PROTECTION) { + cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp); + if (!cmd->prot_sdb) + goto fail_free_sense; + } + + return 0; + +fail_free_sense: + scsi_free_sense_buffer(shost, cmd->sense_buffer); +fail: + return -ENOMEM; +} + +static void scsi_exit_rq(struct request_queue *q, struct request *rq) +{ + struct Scsi_Host *shost = q->rq_alloc_data; + struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); + + if (cmd->prot_sdb) + kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb); + scsi_free_sense_buffer(shost, cmd->sense_buffer); } -EXPORT_SYMBOL(__scsi_alloc_queue); struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) { + struct Scsi_Host *shost = sdev->host; struct request_queue *q; - q = __scsi_alloc_queue(sdev->host, scsi_request_fn); + q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE); if (!q) return NULL; + q->cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size; + q->rq_alloc_data = shost; + q->request_fn = scsi_request_fn; + q->init_rq_fn = scsi_init_rq; + q->exit_rq_fn = scsi_exit_rq; + + if (blk_init_allocated_queue(q) < 0) { + blk_cleanup_queue(q); + return NULL; + } + __scsi_init_queue(shost, q); blk_queue_prep_rq(q, scsi_prep_fn); blk_queue_unprep_rq(q, scsi_unprep_fn); blk_queue_softirq_done(q, scsi_softirq_done); @@ -2209,6 +2295,8 @@ int __init scsi_init_queue(void) void scsi_exit_queue(void) { + kmem_cache_destroy(scsi_sense_cache); + kmem_cache_destroy(scsi_sense_isadma_cache); kmem_cache_destroy(scsi_sdb_cache); } diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index 193636a59adf..99bfc985e190 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h @@ -30,8 +30,8 @@ extern void scsi_exit_hosts(void); /* scsi.c */ extern bool scsi_use_blk_mq; -extern int scsi_setup_command_freelist(struct Scsi_Host *shost); -extern void scsi_destroy_command_freelist(struct Scsi_Host *shost); +int scsi_init_sense_cache(struct Scsi_Host *shost); +void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd); #ifdef CONFIG_SCSI_LOGGING void scsi_log_send(struct scsi_cmnd *cmd); void scsi_log_completion(struct scsi_cmnd *cmd, int disposition); @@ -96,7 +96,6 @@ extern void scsi_exit_queue(void); extern void scsi_evt_thread(struct work_struct *work); struct request_queue; struct request; -extern struct kmem_cache *scsi_sdb_cache; /* scsi_proc.c */ #ifdef CONFIG_SCSI_PROC_FS diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 03577bde6ac5..13dcb9ba823c 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -3765,7 +3765,6 @@ fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host) struct device *dev = &shost->shost_gendev; struct fc_internal *i = to_fc_internal(shost->transportt); struct request_queue *q; - int err; char bsg_name[20]; fc_host->rqst_q = NULL; @@ -3776,23 +3775,14 @@ fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host) snprintf(bsg_name, sizeof(bsg_name), "fc_host%d", shost->host_no); - q = __scsi_alloc_queue(shost, bsg_request_fn); - if (!q) { - dev_err(dev, - "fc_host%d: bsg interface failed to initialize - no request queue\n", - shost->host_no); - return -ENOMEM; - } - - err = bsg_setup_queue(dev, q, bsg_name, fc_bsg_dispatch, - i->f->dd_bsg_size); - if (err) { + q = bsg_setup_queue(dev, bsg_name, fc_bsg_dispatch, i->f->dd_bsg_size); + if (IS_ERR(q)) { dev_err(dev, "fc_host%d: bsg interface failed to initialize - setup queue\n", shost->host_no); - blk_cleanup_queue(q); - return err; + return PTR_ERR(q); } + __scsi_init_queue(shost, q); blk_queue_rq_timed_out(q, fc_bsg_job_timeout); blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT); fc_host->rqst_q = q; @@ -3824,26 +3814,18 @@ fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport) struct device *dev = &rport->dev; struct fc_internal *i = to_fc_internal(shost->transportt); struct request_queue *q; - int err; rport->rqst_q = NULL; if (!i->f->bsg_request) return -ENOTSUPP; - q = __scsi_alloc_queue(shost, bsg_request_fn); - if (!q) { - dev_err(dev, "bsg interface failed to initialize - no request queue\n"); - return -ENOMEM; - } - - err = bsg_setup_queue(dev, q, NULL, fc_bsg_dispatch, i->f->dd_bsg_size); - if (err) { + q = bsg_setup_queue(dev, NULL, fc_bsg_dispatch, i->f->dd_bsg_size); + if (IS_ERR(q)) { dev_err(dev, "failed to setup bsg queue\n"); - blk_cleanup_queue(q); - return err; + return PTR_ERR(q); } - + __scsi_init_queue(shost, q); blk_queue_prep_rq(q, fc_bsg_rport_prep); blk_queue_rq_timed_out(q, fc_bsg_job_timeout); blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT); diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 42bca619f854..568c9f26a561 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -1537,24 +1537,18 @@ iscsi_bsg_host_add(struct Scsi_Host *shost, struct iscsi_cls_host *ihost) struct iscsi_internal *i = to_iscsi_internal(shost->transportt); struct request_queue *q; char bsg_name[20]; - int ret; if (!i->iscsi_transport->bsg_request) return -ENOTSUPP; snprintf(bsg_name, sizeof(bsg_name), "iscsi_host%d", shost->host_no); - - q = __scsi_alloc_queue(shost, bsg_request_fn); - if (!q) - return -ENOMEM; - - ret = bsg_setup_queue(dev, q, bsg_name, iscsi_bsg_host_dispatch, 0); - if (ret) { + q = bsg_setup_queue(dev, bsg_name, iscsi_bsg_host_dispatch, 0); + if (IS_ERR(q)) { shost_printk(KERN_ERR, shost, "bsg interface failed to " "initialize - no request queue\n"); - blk_cleanup_queue(q); - return ret; + return PTR_ERR(q); } + __scsi_init_queue(shost, q); ihost->bsg_q = q; return 0; diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index 60b651bfaa01..126a5ee00987 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -33,6 +33,7 @@ #include <linux/bsg.h> #include <scsi/scsi.h> +#include <scsi/scsi_request.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport.h> @@ -177,6 +178,10 @@ static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost, while ((req = blk_fetch_request(q)) != NULL) { spin_unlock_irq(q->queue_lock); + scsi_req(req)->resid_len = blk_rq_bytes(req); + if (req->next_rq) + scsi_req(req->next_rq)->resid_len = + blk_rq_bytes(req->next_rq); handler = to_sas_internal(shost->transportt)->f->smp_handler; ret = handler(shost, rphy, req); req->errors = ret; diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 1f5d92a25a49..40b4038c019e 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -781,7 +781,7 @@ static int sd_setup_discard_cmnd(struct scsi_cmnd *cmd) rq->special_vec.bv_len = len; rq->rq_flags |= RQF_SPECIAL_PAYLOAD; - rq->resid_len = len; + scsi_req(rq)->resid_len = len; ret = scsi_init_io(cmd); out: @@ -1179,7 +1179,7 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt) if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) __free_page(rq->special_vec.bv_page); - if (SCpnt->cmnd != rq->cmd) { + if (SCpnt->cmnd != scsi_req(rq)->cmd) { mempool_free(SCpnt->cmnd, sd_cdb_pool); SCpnt->cmnd = NULL; SCpnt->cmd_len = 0; @@ -1750,9 +1750,6 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd) unsigned int transferred = scsi_bufflen(scmd) - scsi_get_resid(scmd); unsigned int good_bytes; - if (scmd->request->cmd_type != REQ_TYPE_FS) - return 0; - info_valid = scsi_get_sense_info_fld(scmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, &bad_lba); @@ -3082,6 +3079,23 @@ static void sd_probe_async(void *data, async_cookie_t cookie) put_device(&sdkp->dev); } +struct sd_devt { + int idx; + struct disk_devt disk_devt; +}; + +void sd_devt_release(struct disk_devt *disk_devt) +{ + struct sd_devt *sd_devt = container_of(disk_devt, struct sd_devt, + disk_devt); + + spin_lock(&sd_index_lock); + ida_remove(&sd_index_ida, sd_devt->idx); + spin_unlock(&sd_index_lock); + + kfree(sd_devt); +} + /** * sd_probe - called during driver initialization and whenever a * new scsi device is attached to the system. It is called once @@ -3103,6 +3117,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie) static int sd_probe(struct device *dev) { struct scsi_device *sdp = to_scsi_device(dev); + struct sd_devt *sd_devt; struct scsi_disk *sdkp; struct gendisk *gd; int index; @@ -3128,9 +3143,13 @@ static int sd_probe(struct device *dev) if (!sdkp) goto out; + sd_devt = kzalloc(sizeof(*sd_devt), GFP_KERNEL); + if (!sd_devt) + goto out_free; + gd = alloc_disk(SD_MINORS); if (!gd) - goto out_free; + goto out_free_devt; do { if (!ida_pre_get(&sd_index_ida, GFP_KERNEL)) @@ -3146,6 +3165,11 @@ static int sd_probe(struct device *dev) goto out_put; } + atomic_set(&sd_devt->disk_devt.count, 1); + sd_devt->disk_devt.release = sd_devt_release; + sd_devt->idx = index; + gd->disk_devt = &sd_devt->disk_devt; + error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN); if (error) { sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name length exceeded.\n"); @@ -3185,13 +3209,14 @@ static int sd_probe(struct device *dev) return 0; out_free_index: - spin_lock(&sd_index_lock); - ida_remove(&sd_index_ida, index); - spin_unlock(&sd_index_lock); + put_disk_devt(&sd_devt->disk_devt); + sd_devt = NULL; out_put: put_disk(gd); out_free: kfree(sdkp); + out_free_devt: + kfree(sd_devt); out: scsi_autopm_put_device(sdp); return error; @@ -3250,10 +3275,7 @@ static void scsi_disk_release(struct device *dev) struct scsi_disk *sdkp = to_scsi_disk(dev); struct gendisk *disk = sdkp->disk; - spin_lock(&sd_index_lock); - ida_remove(&sd_index_ida, sdkp->index); - spin_unlock(&sd_index_lock); - + put_disk_devt(disk->disk_devt); disk->private_data = NULL; put_disk(disk); put_device(&sdkp->device->sdev_gendev); diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 121de0aaa6ad..e831e01f9fa6 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -781,9 +781,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp, } if (atomic_read(&sdp->detaching)) { if (srp->bio) { - if (srp->rq->cmd != srp->rq->__cmd) - kfree(srp->rq->cmd); - + scsi_req_free_cmd(scsi_req(srp->rq)); blk_end_request_all(srp->rq, -EIO); srp->rq = NULL; } @@ -1279,6 +1277,7 @@ static void sg_rq_end_io(struct request *rq, int uptodate) { struct sg_request *srp = rq->end_io_data; + struct scsi_request *req = scsi_req(rq); Sg_device *sdp; Sg_fd *sfp; unsigned long iflags; @@ -1297,9 +1296,9 @@ sg_rq_end_io(struct request *rq, int uptodate) if (unlikely(atomic_read(&sdp->detaching))) pr_info("%s: device detaching\n", __func__); - sense = rq->sense; + sense = req->sense; result = rq->errors; - resid = rq->resid_len; + resid = req->resid_len; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp, "sg_cmd_done: pack_id=%d, res=0x%x\n", @@ -1333,6 +1332,10 @@ sg_rq_end_io(struct request *rq, int uptodate) sdp->device->changed = 1; } } + + if (req->sense_len) + memcpy(srp->sense_b, req->sense, SCSI_SENSE_BUFFERSIZE); + /* Rely on write phase to clean out srp status values, so no "else" */ /* @@ -1342,8 +1345,7 @@ sg_rq_end_io(struct request *rq, int uptodate) * blk_rq_unmap_user() can be called from user context. */ srp->rq = NULL; - if (rq->cmd != rq->__cmd) - kfree(rq->cmd); + scsi_req_free_cmd(scsi_req(rq)); __blk_put_request(rq->q, rq); write_lock_irqsave(&sfp->rq_list_lock, iflags); @@ -1658,6 +1660,7 @@ sg_start_req(Sg_request *srp, unsigned char *cmd) { int res; struct request *rq; + struct scsi_request *req; Sg_fd *sfp = srp->parentfp; sg_io_hdr_t *hp = &srp->header; int dxfer_len = (int) hp->dxfer_len; @@ -1695,22 +1698,23 @@ sg_start_req(Sg_request *srp, unsigned char *cmd) * With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually * does not sleep except under memory pressure. */ - rq = blk_get_request(q, rw, GFP_KERNEL); + rq = blk_get_request(q, hp->dxfer_direction == SG_DXFER_TO_DEV ? + REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, GFP_KERNEL); if (IS_ERR(rq)) { kfree(long_cmdp); return PTR_ERR(rq); } + req = scsi_req(rq); - blk_rq_set_block_pc(rq); + scsi_req_init(rq); if (hp->cmd_len > BLK_MAX_CDB) - rq->cmd = long_cmdp; - memcpy(rq->cmd, cmd, hp->cmd_len); - rq->cmd_len = hp->cmd_len; + req->cmd = long_cmdp; + memcpy(req->cmd, cmd, hp->cmd_len); + req->cmd_len = hp->cmd_len; srp->rq = rq; rq->end_io_data = srp; - rq->sense = srp->sense_b; rq->retries = SG_DEFAULT_RETRIES; if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE)) @@ -1790,8 +1794,7 @@ sg_finish_rem_req(Sg_request *srp) ret = blk_rq_unmap_user(srp->bio); if (srp->rq) { - if (srp->rq->cmd != srp->rq->__cmd) - kfree(srp->rq->cmd); + scsi_req_free_cmd(scsi_req(srp->rq)); blk_put_request(srp->rq); } diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 8702d9cf8040..11c0dfb3dfa3 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -4499,7 +4499,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, if (pqi_is_logical_device(device)) { raid_bypassed = false; if (device->offload_enabled && - scmd->request->cmd_type == REQ_TYPE_FS) { + !blk_rq_is_passthrough(scmd->request)) { rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); if (rc == 0 || diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 94352e4df831..0b29b9329b1c 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -117,7 +117,7 @@ static unsigned int sr_check_events(struct cdrom_device_info *cdi, unsigned int clearing, int slot); static int sr_packet(struct cdrom_device_info *, struct packet_command *); -static struct cdrom_device_ops sr_dops = { +static const struct cdrom_device_ops sr_dops = { .open = sr_open, .release = sr_release, .drive_status = sr_drive_status, @@ -437,14 +437,17 @@ static int sr_init_command(struct scsi_cmnd *SCpnt) goto out; } - if (rq_data_dir(rq) == WRITE) { + switch (req_op(rq)) { + case REQ_OP_WRITE: if (!cd->writeable) goto out; SCpnt->cmnd[0] = WRITE_10; cd->cdi.media_written = 1; - } else if (rq_data_dir(rq) == READ) { + break; + case REQ_OP_READ: SCpnt->cmnd[0] = READ_10; - } else { + break; + default: blk_dump_rq_flags(rq, "Unknown sr command"); goto out; } diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 5f35b863e1a7..81212d4bd9bf 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -475,7 +475,7 @@ static void st_do_stats(struct scsi_tape *STp, struct request *req) ktime_t now; now = ktime_get(); - if (req->cmd[0] == WRITE_6) { + if (scsi_req(req)->cmd[0] == WRITE_6) { now = ktime_sub(now, STp->stats->write_time); atomic64_add(ktime_to_ns(now), &STp->stats->tot_write_time); atomic64_add(ktime_to_ns(now), &STp->stats->tot_io_time); @@ -489,7 +489,7 @@ static void st_do_stats(struct scsi_tape *STp, struct request *req) } else atomic64_add(atomic_read(&STp->stats->last_write_size), &STp->stats->write_byte_cnt); - } else if (req->cmd[0] == READ_6) { + } else if (scsi_req(req)->cmd[0] == READ_6) { now = ktime_sub(now, STp->stats->read_time); atomic64_add(ktime_to_ns(now), &STp->stats->tot_read_time); atomic64_add(ktime_to_ns(now), &STp->stats->tot_io_time); @@ -514,15 +514,18 @@ static void st_do_stats(struct scsi_tape *STp, struct request *req) static void st_scsi_execute_end(struct request *req, int uptodate) { struct st_request *SRpnt = req->end_io_data; + struct scsi_request *rq = scsi_req(req); struct scsi_tape *STp = SRpnt->stp; struct bio *tmp; STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors; - STp->buffer->cmdstat.residual = req->resid_len; + STp->buffer->cmdstat.residual = rq->resid_len; st_do_stats(STp, req); tmp = SRpnt->bio; + if (rq->sense_len) + memcpy(SRpnt->sense, rq->sense, SCSI_SENSE_BUFFERSIZE); if (SRpnt->waiting) complete(SRpnt->waiting); @@ -535,17 +538,18 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd, int timeout, int retries) { struct request *req; + struct scsi_request *rq; struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data; int err = 0; - int write = (data_direction == DMA_TO_DEVICE); struct scsi_tape *STp = SRpnt->stp; - req = blk_get_request(SRpnt->stp->device->request_queue, write, - GFP_KERNEL); + req = blk_get_request(SRpnt->stp->device->request_queue, + data_direction == DMA_TO_DEVICE ? + REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, GFP_KERNEL); if (IS_ERR(req)) return DRIVER_ERROR << 24; - - blk_rq_set_block_pc(req); + rq = scsi_req(req); + scsi_req_init(req); req->rq_flags |= RQF_QUIET; mdata->null_mapped = 1; @@ -571,11 +575,9 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd, } SRpnt->bio = req->bio; - req->cmd_len = COMMAND_SIZE(cmd[0]); - memset(req->cmd, 0, BLK_MAX_CDB); - memcpy(req->cmd, cmd, req->cmd_len); - req->sense = SRpnt->sense; - req->sense_len = 0; + rq->cmd_len = COMMAND_SIZE(cmd[0]); + memset(rq->cmd, 0, BLK_MAX_CDB); + memcpy(rq->cmd, cmd, rq->cmd_len); req->timeout = timeout; req->retries = retries; req->end_io_data = SRpnt; diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c index 88db6992420e..bcf7d05d1aab 100644 --- a/drivers/scsi/sun3_scsi.c +++ b/drivers/scsi/sun3_scsi.c @@ -260,7 +260,7 @@ static int sun3scsi_dma_xfer_len(struct NCR5380_hostdata *hostdata, { int wanted_len = cmd->SCp.this_residual; - if (wanted_len < DMA_MIN_SIZE || cmd->request->cmd_type != REQ_TYPE_FS) + if (wanted_len < DMA_MIN_SIZE || blk_rq_is_passthrough(cmd->request)) return 0; return wanted_len; diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig index 257361280510..e2bc99980f75 100644 --- a/drivers/target/Kconfig +++ b/drivers/target/Kconfig @@ -4,6 +4,7 @@ menuconfig TARGET_CORE depends on SCSI && BLOCK select CONFIGFS_FS select CRC_T10DIF + select BLK_SCSI_REQUEST # only for scsi_command_size_tbl.. default n help Say Y or M here to enable the TCM Storage Engine and ConfigFS enabled diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 04d7aa7390d0..a8f8e53f2f57 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -1005,7 +1005,8 @@ pscsi_execute_cmd(struct se_cmd *cmd) scsi_command_size(cmd->t_task_cdb)); req = blk_get_request(pdv->pdv_sd->request_queue, - (cmd->data_direction == DMA_TO_DEVICE), + cmd->data_direction == DMA_TO_DEVICE ? + REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, GFP_KERNEL); if (IS_ERR(req)) { pr_err("PSCSI: blk_get_request() failed\n"); @@ -1013,7 +1014,7 @@ pscsi_execute_cmd(struct se_cmd *cmd) goto fail; } - blk_rq_set_block_pc(req); + scsi_req_init(req); if (sgl) { ret = pscsi_map_sg(cmd, sgl, sgl_nents, req); @@ -1023,10 +1024,8 @@ pscsi_execute_cmd(struct se_cmd *cmd) req->end_io = pscsi_req_done; req->end_io_data = cmd; - req->cmd_len = scsi_command_size(pt->pscsi_cdb); - req->cmd = &pt->pscsi_cdb[0]; - req->sense = &pt->pscsi_sense[0]; - req->sense_len = 0; + scsi_req(req)->cmd_len = scsi_command_size(pt->pscsi_cdb); + scsi_req(req)->cmd = &pt->pscsi_cdb[0]; if (pdv->pdv_sd->type == TYPE_DISK) req->timeout = PS_TIMEOUT_DISK; else @@ -1075,7 +1074,7 @@ static void pscsi_req_done(struct request *req, int uptodate) struct pscsi_plugin_task *pt = cmd->priv; pt->pscsi_result = req->errors; - pt->pscsi_resid = req->resid_len; + pt->pscsi_resid = scsi_req(req)->resid_len; cmd->scsi_status = status_byte(pt->pscsi_result) << 1; if (cmd->scsi_status) { @@ -1096,6 +1095,7 @@ static void pscsi_req_done(struct request *req, int uptodate) break; } + memcpy(pt->pscsi_sense, scsi_req(req)->sense, TRANSPORT_SENSE_BUFFER); __blk_put_request(req->q, req); kfree(pt); } |