diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-02-21 10:57:33 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-02-21 10:57:33 -0800 |
commit | 772c8f6f3bbd3ceb94a89373473083e3e1113554 (patch) | |
tree | d2b34e8f1841a169d59adf53074de217a9e0f977 /include | |
parent | fd4a61e08aa79f2b7835b25c6f94f27bd2d65990 (diff) | |
parent | 818551e2b2c662a1b26de6b4f7d6b8411a838d18 (diff) |
Merge tag 'for-4.11/linus-merge-signed' of git://git.kernel.dk/linux-block
Pull block layer updates from Jens Axboe:
- blk-mq scheduling framework from me and Omar, with a port of the
deadline scheduler for this framework. A port of BFQ from Paolo is in
the works, and should be ready for 4.12.
- Various fixups and improvements to the above scheduling framework
from Omar, Paolo, Bart, me, others.
- Cleanup of the exported sysfs blk-mq data into debugfs, from Omar.
This allows us to export more information that helps debug hangs or
performance issues, without cluttering or abusing the sysfs API.
- Fixes for the sbitmap code, the scalable bitmap code that was
migrated from blk-mq, from Omar.
- Removal of the BLOCK_PC support in struct request, and refactoring of
carrying SCSI payloads in the block layer. This cleans up the code
nicely, and enables us to kill the SCSI specific parts of struct
request, shrinking it down nicely. From Christoph mainly, with help
from Hannes.
- Support for ranged discard requests and discard merging, also from
Christoph.
- Support for OPAL in the block layer, and for NVMe as well. Mainly
from Scott Bauer, with fixes/updates from various others folks.
- Error code fixup for gdrom from Christophe.
- cciss pci irq allocation cleanup from Christoph.
- Making the cdrom device operations read only, from Kees Cook.
- Fixes for duplicate bdi registrations and bdi/queue life time
problems from Jan and Dan.
- Set of fixes and updates for lightnvm, from Matias and Javier.
- A few fixes for nbd from Josef, using idr to name devices and a
workqueue deadlock fix on receive. Also marks Josef as the current
maintainer of nbd.
- Fix from Josef, overwriting queue settings when the number of
hardware queues is updated for a blk-mq device.
- NVMe fix from Keith, ensuring that we don't repeatedly mark and IO
aborted, if we didn't end up aborting it.
- SG gap merging fix from Ming Lei for block.
- Loop fix also from Ming, fixing a race and crash between setting loop
status and IO.
- Two block race fixes from Tahsin, fixing request list iteration and
fixing a race between device registration and udev device add
notifiations.
- Double free fix from cgroup writeback, from Tejun.
- Another double free fix in blkcg, from Hou Tao.
- Partition overflow fix for EFI from Alden Tondettar.
* tag 'for-4.11/linus-merge-signed' of git://git.kernel.dk/linux-block: (156 commits)
nvme: Check for Security send/recv support before issuing commands.
block/sed-opal: allocate struct opal_dev dynamically
block/sed-opal: tone down not supported warnings
block: don't defer flushes on blk-mq + scheduling
blk-mq-sched: ask scheduler for work, if we failed dispatching leftovers
blk-mq: don't special case flush inserts for blk-mq-sched
blk-mq-sched: don't add flushes to the head of requeue queue
blk-mq: have blk_mq_dispatch_rq_list() return if we queued IO or not
block: do not allow updates through sysfs until registration completes
lightnvm: set default lun range when no luns are specified
lightnvm: fix off-by-one error on target initialization
Maintainers: Modify SED list from nvme to block
Move stack parameters for sed_ioctl to prevent oversized stack with CONFIG_KASAN
uapi: sed-opal fix IOW for activate lsp to use correct struct
cdrom: Make device operations read-only
elevator: fix loading wrong elevator type for blk-mq devices
cciss: switch to pci_irq_alloc_vectors
block/loop: fix race between I/O and set_status
blk-mq-sched: don't hold queue_lock when calling exit_icq
block: set make_request_fn manually in blk_mq_update_nr_hw_queues
...
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/backing-dev-defs.h | 2 | ||||
-rw-r--r-- | include/linux/backing-dev.h | 12 | ||||
-rw-r--r-- | include/linux/blk-mq.h | 9 | ||||
-rw-r--r-- | include/linux/blk_types.h | 38 | ||||
-rw-r--r-- | include/linux/blkdev.h | 124 | ||||
-rw-r--r-- | include/linux/blktrace_api.h | 18 | ||||
-rw-r--r-- | include/linux/bsg-lib.h | 5 | ||||
-rw-r--r-- | include/linux/cdrom.h | 5 | ||||
-rw-r--r-- | include/linux/debugfs.h | 8 | ||||
-rw-r--r-- | include/linux/device-mapper.h | 3 | ||||
-rw-r--r-- | include/linux/elevator.h | 63 | ||||
-rw-r--r-- | include/linux/fs.h | 2 | ||||
-rw-r--r-- | include/linux/genhd.h | 8 | ||||
-rw-r--r-- | include/linux/ide.h | 58 | ||||
-rw-r--r-- | include/linux/lightnvm.h | 138 | ||||
-rw-r--r-- | include/linux/nvme.h | 3 | ||||
-rw-r--r-- | include/linux/sbitmap.h | 30 | ||||
-rw-r--r-- | include/linux/sed-opal.h | 70 | ||||
-rw-r--r-- | include/scsi/scsi_cmnd.h | 4 | ||||
-rw-r--r-- | include/scsi/scsi_host.h | 5 | ||||
-rw-r--r-- | include/scsi/scsi_request.h | 30 | ||||
-rw-r--r-- | include/scsi/scsi_transport.h | 2 | ||||
-rw-r--r-- | include/trace/events/block.h | 27 | ||||
-rw-r--r-- | include/uapi/linux/lightnvm.h | 50 | ||||
-rw-r--r-- | include/uapi/linux/sed-opal.h | 119 |
25 files changed, 610 insertions, 223 deletions
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index e850e76acaaf..ad955817916d 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h @@ -10,6 +10,7 @@ #include <linux/flex_proportions.h> #include <linux/timer.h> #include <linux/workqueue.h> +#include <linux/kref.h> struct page; struct device; @@ -144,6 +145,7 @@ struct backing_dev_info { char *name; + struct kref refcnt; /* Reference counter for the structure */ unsigned int capabilities; /* Device capabilities */ unsigned int min_ratio; unsigned int max_ratio, max_prop_frac; diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 43b93a947e61..c52a48cb9a66 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -18,7 +18,14 @@ #include <linux/slab.h> int __must_check bdi_init(struct backing_dev_info *bdi); -void bdi_exit(struct backing_dev_info *bdi); + +static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi) +{ + kref_get(&bdi->refcnt); + return bdi; +} + +void bdi_put(struct backing_dev_info *bdi); __printf(3, 4) int bdi_register(struct backing_dev_info *bdi, struct device *parent, @@ -29,6 +36,7 @@ void bdi_unregister(struct backing_dev_info *bdi); int __must_check bdi_setup_and_register(struct backing_dev_info *, char *); void bdi_destroy(struct backing_dev_info *bdi); +struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id); void wb_start_writeback(struct bdi_writeback *wb, long nr_pages, bool range_cyclic, enum wb_reason reason); @@ -183,7 +191,7 @@ static inline struct backing_dev_info *inode_to_bdi(struct inode *inode) sb = inode->i_sb; #ifdef CONFIG_BLOCK if (sb_is_blkdev_sb(sb)) - return blk_get_backing_dev_info(I_BDEV(inode)); + return I_BDEV(inode)->bd_bdi; #endif return sb->s_bdi; } diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 4a2ab5d99ff7..8e4df3d6c8cd 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -22,6 +22,7 @@ struct blk_mq_hw_ctx { unsigned long flags; /* BLK_MQ_F_* flags */ + void *sched_data; struct request_queue *queue; struct blk_flush_queue *fq; @@ -35,6 +36,7 @@ struct blk_mq_hw_ctx { atomic_t wait_index; struct blk_mq_tags *tags; + struct blk_mq_tags *sched_tags; struct srcu_struct queue_rq_srcu; @@ -60,7 +62,7 @@ struct blk_mq_hw_ctx { struct blk_mq_tag_set { unsigned int *mq_map; - struct blk_mq_ops *ops; + const struct blk_mq_ops *ops; unsigned int nr_hw_queues; unsigned int queue_depth; /* max hw supported */ unsigned int reserved_tags; @@ -151,11 +153,13 @@ enum { BLK_MQ_F_SG_MERGE = 1 << 2, BLK_MQ_F_DEFER_ISSUE = 1 << 4, BLK_MQ_F_BLOCKING = 1 << 5, + BLK_MQ_F_NO_SCHED = 1 << 6, BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, BLK_MQ_F_ALLOC_POLICY_BITS = 1, BLK_MQ_S_STOPPED = 0, BLK_MQ_S_TAG_ACTIVE = 1, + BLK_MQ_S_SCHED_RESTART = 2, BLK_MQ_MAX_DEPTH = 10240, @@ -179,14 +183,13 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set); void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); -void blk_mq_insert_request(struct request *, bool, bool, bool); void blk_mq_free_request(struct request *rq); -void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *, struct request *rq); bool blk_mq_can_queue(struct blk_mq_hw_ctx *); enum { BLK_MQ_REQ_NOWAIT = (1 << 0), /* return when out of requests */ BLK_MQ_REQ_RESERVED = (1 << 1), /* allocate from reserved pool */ + BLK_MQ_REQ_INTERNAL = (1 << 2), /* allocate internal/sched tag */ }; struct request *blk_mq_alloc_request(struct request_queue *q, int rw, diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 519ea2c9df61..d703acb55d0f 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -162,6 +162,13 @@ enum req_opf { /* write the zero filled sector many times */ REQ_OP_WRITE_ZEROES = 8, + /* SCSI passthrough using struct scsi_request */ + REQ_OP_SCSI_IN = 32, + REQ_OP_SCSI_OUT = 33, + /* Driver private requests */ + REQ_OP_DRV_IN = 34, + REQ_OP_DRV_OUT = 35, + REQ_OP_LAST, }; @@ -221,6 +228,15 @@ static inline bool op_is_write(unsigned int op) } /* + * Check if the bio or request is one that needs special treatment in the + * flush state machine. + */ +static inline bool op_is_flush(unsigned int op) +{ + return op & (REQ_FUA | REQ_PREFLUSH); +} + +/* * Reads are always treated as synchronous, as are requests with the FUA or * PREFLUSH flag. Other operations may be marked as synchronous using the * REQ_SYNC flag. @@ -232,22 +248,29 @@ static inline bool op_is_sync(unsigned int op) } typedef unsigned int blk_qc_t; -#define BLK_QC_T_NONE -1U -#define BLK_QC_T_SHIFT 16 +#define BLK_QC_T_NONE -1U +#define BLK_QC_T_SHIFT 16 +#define BLK_QC_T_INTERNAL (1U << 31) static inline bool blk_qc_t_valid(blk_qc_t cookie) { return cookie != BLK_QC_T_NONE; } -static inline blk_qc_t blk_tag_to_qc_t(unsigned int tag, unsigned int queue_num) +static inline blk_qc_t blk_tag_to_qc_t(unsigned int tag, unsigned int queue_num, + bool internal) { - return tag | (queue_num << BLK_QC_T_SHIFT); + blk_qc_t ret = tag | (queue_num << BLK_QC_T_SHIFT); + + if (internal) + ret |= BLK_QC_T_INTERNAL; + + return ret; } static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie) { - return cookie >> BLK_QC_T_SHIFT; + return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT; } static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie) @@ -255,6 +278,11 @@ static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie) return cookie & ((1u << BLK_QC_T_SHIFT) - 1); } +static inline bool blk_qc_t_is_internal(blk_qc_t cookie) +{ + return (cookie & BLK_QC_T_INTERNAL) != 0; +} + struct blk_issue_stat { u64 time; }; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 1ca8e8fd1078..aecca0e7d9ca 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -71,15 +71,6 @@ struct request_list { }; /* - * request command types - */ -enum rq_cmd_type_bits { - REQ_TYPE_FS = 1, /* fs request */ - REQ_TYPE_BLOCK_PC, /* scsi command */ - REQ_TYPE_DRV_PRIV, /* driver defined types from here */ -}; - -/* * request flags */ typedef __u32 __bitwise req_flags_t; @@ -128,8 +119,6 @@ typedef __u32 __bitwise req_flags_t; #define RQF_NOMERGE_FLAGS \ (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD) -#define BLK_MAX_CDB 16 - /* * Try to put the fields that are referenced together in the same cacheline. * @@ -147,13 +136,16 @@ struct request { struct blk_mq_ctx *mq_ctx; int cpu; - unsigned cmd_type; unsigned int cmd_flags; /* op and common flags */ req_flags_t rq_flags; + + int internal_tag; + unsigned long atomic_flags; /* the following two fields are internal, NEVER access directly */ unsigned int __data_len; /* total data len */ + int tag; sector_t __sector; /* sector cursor */ struct bio *bio; @@ -222,20 +214,9 @@ struct request { void *special; /* opaque pointer available for LLD use */ - int tag; int errors; - /* - * when request is used as a packet command carrier - */ - unsigned char __cmd[BLK_MAX_CDB]; - unsigned char *cmd; - unsigned short cmd_len; - unsigned int extra_len; /* length of alignment and padding */ - unsigned int sense_len; - unsigned int resid_len; /* residual count */ - void *sense; unsigned long deadline; struct list_head timeout_list; @@ -252,6 +233,21 @@ struct request { struct request *next_rq; }; +static inline bool blk_rq_is_scsi(struct request *rq) +{ + return req_op(rq) == REQ_OP_SCSI_IN || req_op(rq) == REQ_OP_SCSI_OUT; +} + +static inline bool blk_rq_is_private(struct request *rq) +{ + return req_op(rq) == REQ_OP_DRV_IN || req_op(rq) == REQ_OP_DRV_OUT; +} + +static inline bool blk_rq_is_passthrough(struct request *rq) +{ + return blk_rq_is_scsi(rq) || blk_rq_is_private(rq); +} + static inline unsigned short req_get_ioprio(struct request *req) { return req->ioprio; @@ -271,6 +267,8 @@ typedef void (softirq_done_fn)(struct request *); typedef int (dma_drain_needed_fn)(struct request *); typedef int (lld_busy_fn) (struct request_queue *q); typedef int (bsg_job_fn) (struct bsg_job *); +typedef int (init_rq_fn)(struct request_queue *, struct request *, gfp_t); +typedef void (exit_rq_fn)(struct request_queue *, struct request *); enum blk_eh_timer_return { BLK_EH_NOT_HANDLED, @@ -333,6 +331,7 @@ struct queue_limits { unsigned short logical_block_size; unsigned short max_segments; unsigned short max_integrity_segments; + unsigned short max_discard_segments; unsigned char misaligned; unsigned char discard_misaligned; @@ -406,8 +405,10 @@ struct request_queue { rq_timed_out_fn *rq_timed_out_fn; dma_drain_needed_fn *dma_drain_needed; lld_busy_fn *lld_busy_fn; + init_rq_fn *init_rq_fn; + exit_rq_fn *exit_rq_fn; - struct blk_mq_ops *mq_ops; + const struct blk_mq_ops *mq_ops; unsigned int *mq_map; @@ -432,7 +433,8 @@ struct request_queue { */ struct delayed_work delay_work; - struct backing_dev_info backing_dev_info; + struct backing_dev_info *backing_dev_info; + struct disk_devt *disk_devt; /* * The queue owner gets to use this for whatever they like. @@ -569,7 +571,15 @@ struct request_queue { struct list_head tag_set_list; struct bio_set *bio_split; +#ifdef CONFIG_BLK_DEBUG_FS + struct dentry *debugfs_dir; + struct dentry *mq_debugfs_dir; +#endif + bool mq_sysfs_init_done; + + size_t cmd_size; + void *rq_alloc_data; }; #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ @@ -600,6 +610,7 @@ struct request_queue { #define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */ #define QUEUE_FLAG_DAX 26 /* device supports DAX */ #define QUEUE_FLAG_STATS 27 /* track rq completion times */ +#define QUEUE_FLAG_RESTART 28 /* queue needs restart at completion */ #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ (1 << QUEUE_FLAG_STACKABLE) | \ @@ -695,9 +706,10 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ REQ_FAILFAST_DRIVER)) -#define blk_account_rq(rq) \ - (((rq)->rq_flags & RQF_STARTED) && \ - ((rq)->cmd_type == REQ_TYPE_FS)) +static inline bool blk_account_rq(struct request *rq) +{ + return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq); +} #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) @@ -772,7 +784,7 @@ static inline void blk_clear_rl_full(struct request_list *rl, bool sync) static inline bool rq_mergeable(struct request *rq) { - if (rq->cmd_type != REQ_TYPE_FS) + if (blk_rq_is_passthrough(rq)) return false; if (req_op(rq) == REQ_OP_FLUSH) @@ -910,7 +922,6 @@ extern void blk_rq_init(struct request_queue *q, struct request *rq); extern void blk_put_request(struct request *); extern void __blk_put_request(struct request_queue *, struct request *); extern struct request *blk_get_request(struct request_queue *, int, gfp_t); -extern void blk_rq_set_block_pc(struct request *); extern void blk_requeue_request(struct request_queue *, struct request *); extern int blk_lld_busy(struct request_queue *q); extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, @@ -1047,7 +1058,7 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq, { struct request_queue *q = rq->q; - if (unlikely(rq->cmd_type != REQ_TYPE_FS)) + if (blk_rq_is_passthrough(rq)) return q->limits.max_hw_sectors; if (!q->limits.chunk_sectors || @@ -1129,14 +1140,15 @@ extern void blk_unprep_request(struct request *); extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id); extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); -extern struct request_queue *blk_init_allocated_queue(struct request_queue *, - request_fn_proc *, spinlock_t *); +extern int blk_init_allocated_queue(struct request_queue *); extern void blk_cleanup_queue(struct request_queue *); extern void blk_queue_make_request(struct request_queue *, make_request_fn *); extern void blk_queue_bounce_limit(struct request_queue *, u64); extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); extern void blk_queue_max_segments(struct request_queue *, unsigned short); +extern void blk_queue_max_discard_segments(struct request_queue *, + unsigned short); extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); extern void blk_queue_max_discard_sectors(struct request_queue *q, unsigned int max_discard_sectors); @@ -1179,8 +1191,16 @@ extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable); extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); -extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); +/* + * Number of physical segments as sent to the device. + * + * Normally this is the number of discontiguous data segments sent by the + * submitter. But for data-less command like discard we might have no + * actual data segments submitted, but the driver might have to add it's + * own special payload. In that case we still return 1 here so that this + * special payload will be mapped. + */ static inline unsigned short blk_rq_nr_phys_segments(struct request *rq) { if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) @@ -1188,6 +1208,15 @@ static inline unsigned short blk_rq_nr_phys_segments(struct request *rq) return rq->nr_phys_segments; } +/* + * Number of discard segments (or ranges) the driver needs to fill in. + * Each discard bio merged into a request is counted as one segment. + */ +static inline unsigned short blk_rq_nr_discard_segments(struct request *rq) +{ + return max_t(unsigned short, rq->nr_phys_segments, 1); +} + extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); extern void blk_dump_rq_flags(struct request *, char *); extern long nr_blockdev_pages(void); @@ -1376,6 +1405,11 @@ static inline unsigned short queue_max_segments(struct request_queue *q) return q->limits.max_segments; } +static inline unsigned short queue_max_discard_segments(struct request_queue *q) +{ + return q->limits.max_discard_segments; +} + static inline unsigned int queue_max_segment_size(struct request_queue *q) { return q->limits.max_segment_size; @@ -1620,6 +1654,25 @@ static inline bool bvec_gap_to_prev(struct request_queue *q, return __bvec_gap_to_prev(q, bprv, offset); } +/* + * Check if the two bvecs from two bios can be merged to one segment. + * If yes, no need to check gap between the two bios since the 1st bio + * and the 1st bvec in the 2nd bio can be handled in one segment. + */ +static inline bool bios_segs_mergeable(struct request_queue *q, + struct bio *prev, struct bio_vec *prev_last_bv, + struct bio_vec *next_first_bv) +{ + if (!BIOVEC_PHYS_MERGEABLE(prev_last_bv, next_first_bv)) + return false; + if (!BIOVEC_SEG_BOUNDARY(q, prev_last_bv, next_first_bv)) + return false; + if (prev->bi_seg_back_size + next_first_bv->bv_len > + queue_max_segment_size(q)) + return false; + return true; +} + static inline bool bio_will_gap(struct request_queue *q, struct bio *prev, struct bio *next) { @@ -1629,7 +1682,8 @@ static inline bool bio_will_gap(struct request_queue *q, struct bio *prev, bio_get_last_bvec(prev, &pb); bio_get_first_bvec(next, &nb); - return __bvec_gap_to_prev(q, &pb, nb.bv_offset); + if (!bios_segs_mergeable(q, prev, &pb, &nb)) + return __bvec_gap_to_prev(q, &pb, nb.bv_offset); } return false; diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index e417f080219a..d2e908586e3d 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h @@ -30,9 +30,6 @@ struct blk_trace { extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *); extern void blk_trace_shutdown(struct request_queue *); -extern int do_blk_trace_setup(struct request_queue *q, char *name, - dev_t dev, struct block_device *bdev, - struct blk_user_trace_setup *buts); extern __printf(2, 3) void __trace_note_message(struct blk_trace *, const char *fmt, ...); @@ -80,7 +77,6 @@ extern struct attribute_group blk_trace_attr_group; #else /* !CONFIG_BLK_DEV_IO_TRACE */ # define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) # define blk_trace_shutdown(q) do { } while (0) -# define do_blk_trace_setup(q, name, dev, bdev, buts) (-ENOTTY) # define blk_add_driver_data(q, rq, data, len) do {} while (0) # define blk_trace_setup(q, name, dev, bdev, arg) (-ENOTTY) # define blk_trace_startstop(q, start) (-ENOTTY) @@ -110,16 +106,16 @@ struct compat_blk_user_trace_setup { #endif -#if defined(CONFIG_EVENT_TRACING) && defined(CONFIG_BLOCK) +extern void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes); -static inline int blk_cmd_buf_len(struct request *rq) +static inline sector_t blk_rq_trace_sector(struct request *rq) { - return (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? rq->cmd_len * 3 : 1; + return blk_rq_is_passthrough(rq) ? 0 : blk_rq_pos(rq); } -extern void blk_dump_cmd(char *buf, struct request *rq); -extern void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes); - -#endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */ +static inline unsigned int blk_rq_trace_nr_sectors(struct request *rq) +{ + return blk_rq_is_passthrough(rq) ? 0 : blk_rq_sectors(rq); +} #endif diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h index 657a718c27d2..e34dde2da0ef 100644 --- a/include/linux/bsg-lib.h +++ b/include/linux/bsg-lib.h @@ -66,9 +66,8 @@ struct bsg_job { void bsg_job_done(struct bsg_job *job, int result, unsigned int reply_payload_rcv_len); -int bsg_setup_queue(struct device *dev, struct request_queue *q, char *name, - bsg_job_fn *job_fn, int dd_job_size); -void bsg_request_fn(struct request_queue *q); +struct request_queue *bsg_setup_queue(struct device *dev, char *name, + bsg_job_fn *job_fn, int dd_job_size); void bsg_job_put(struct bsg_job *job); int __must_check bsg_job_get(struct bsg_job *job); diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h index 8609d577bb66..6e8f209a6dff 100644 --- a/include/linux/cdrom.h +++ b/include/linux/cdrom.h @@ -36,7 +36,7 @@ struct packet_command /* Uniform cdrom data structures for cdrom.c */ struct cdrom_device_info { - struct cdrom_device_ops *ops; /* link to device_ops */ + const struct cdrom_device_ops *ops; /* link to device_ops */ struct list_head list; /* linked list of all device_info */ struct gendisk *disk; /* matching block layer disk */ void *handle; /* driver-dependent data */ @@ -87,7 +87,6 @@ struct cdrom_device_ops { /* driver specifications */ const int capability; /* capability flags */ - int n_minors; /* number of active minor devices */ /* handle uniform packets for scsi type devices (scsi,atapi) */ int (*generic_packet) (struct cdrom_device_info *, struct packet_command *); @@ -123,6 +122,8 @@ extern int cdrom_mode_sense(struct cdrom_device_info *cdi, int page_code, int page_control); extern void init_cdrom_command(struct packet_command *cgc, void *buffer, int len, int type); +extern int cdrom_dummy_generic_packet(struct cdrom_device_info *cdi, + struct packet_command *cgc); /* The SCSI spec says there could be 256 slots. */ #define CDROM_MAX_SLOTS 256 diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index 014cc564d1c4..c0befcf41b58 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -80,6 +80,8 @@ static const struct file_operations __fops = { \ #if defined(CONFIG_DEBUG_FS) +struct dentry *debugfs_lookup(const char *name, struct dentry *parent); + struct dentry *debugfs_create_file(const char *name, umode_t mode, struct dentry *parent, void *data, const struct file_operations *fops); @@ -181,6 +183,12 @@ ssize_t debugfs_write_file_bool(struct file *file, const char __user *user_buf, * want to duplicate the design decision mistakes of procfs and devfs again. */ +static inline struct dentry *debugfs_lookup(const char *name, + struct dentry *parent) +{ + return ERR_PTR(-ENODEV); +} + static inline struct dentry *debugfs_create_file(const char *name, umode_t mode, struct dentry *parent, void *data, const struct file_operations *fops) diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index ef7962e84444..a7e6903866fd 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -55,8 +55,6 @@ typedef void (*dm_dtr_fn) (struct dm_target *ti); * = 2: The target wants to push back the io */ typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio); -typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone, - union map_info *map_context); typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti, struct request *rq, union map_info *map_context, @@ -163,7 +161,6 @@ struct target_type { dm_ctr_fn ctr; dm_dtr_fn dtr; dm_map_fn map; - dm_map_request_fn map_rq; dm_clone_and_map_request_fn clone_and_map_rq; dm_release_clone_request_fn release_clone_rq; dm_endio_fn end_io; diff --git a/include/linux/elevator.h b/include/linux/elevator.h index b276e9ef0e0b..aebecc4ed088 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -9,12 +9,22 @@ struct io_cq; struct elevator_type; -typedef int (elevator_merge_fn) (struct request_queue *, struct request **, +/* + * Return values from elevator merger + */ +enum elv_merge { + ELEVATOR_NO_MERGE = 0, + ELEVATOR_FRONT_MERGE = 1, + ELEVATOR_BACK_MERGE = 2, + ELEVATOR_DISCARD_MERGE = 3, +}; + +typedef enum elv_merge (elevator_merge_fn) (struct request_queue *, struct request **, struct bio *); typedef void (elevator_merge_req_fn) (struct request_queue *, struct request *, struct request *); -typedef void (elevator_merged_fn) (struct request_queue *, struct request *, int); +typedef void (elevator_merged_fn) (struct request_queue *, struct request *, enum elv_merge); typedef int (elevator_allow_bio_merge_fn) (struct request_queue *, struct request *, struct bio *); @@ -77,6 +87,34 @@ struct elevator_ops elevator_registered_fn *elevator_registered_fn; }; +struct blk_mq_alloc_data; +struct blk_mq_hw_ctx; + +struct elevator_mq_ops { + int (*init_sched)(struct request_queue *, struct elevator_type *); + void (*exit_sched)(struct elevator_queue *); + + bool (*allow_merge)(struct request_queue *, struct request *, struct bio *); + bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *); + int (*request_merge)(struct request_queue *q, struct request **, struct bio *); + void (*request_merged)(struct request_queue *, struct request *, enum elv_merge); + void (*requests_merged)(struct request_queue *, struct request *, struct request *); + struct request *(*get_request)(struct request_queue *, unsigned int, struct blk_mq_alloc_data *); + void (*put_request)(struct request *); + void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool); + struct request *(*dispatch_request)(struct blk_mq_hw_ctx *); + bool (*has_work)(struct blk_mq_hw_ctx *); + void (*completed_request)(struct blk_mq_hw_ctx *, struct request *); + void (*started_request)(struct request *); + void (*requeue_request)(struct request *); + struct request *(*former_request)(struct request_queue *, struct request *); + struct request *(*next_request)(struct request_queue *, struct request *); + int (*get_rq_priv)(struct request_queue *, struct request *, struct bio *); + void (*put_rq_priv)(struct request_queue *, struct request *); + void (*init_icq)(struct io_cq *); + void (*exit_icq)(struct io_cq *); +}; + #define ELV_NAME_MAX (16) struct elv_fs_entry { @@ -94,12 +132,16 @@ struct elevator_type struct kmem_cache *icq_cache; /* fields provided by elevator implementation */ - struct elevator_ops ops; + union { + struct elevator_ops sq; + struct elevator_mq_ops mq; + } ops; size_t icq_size; /* see iocontext.h */ size_t icq_align; /* ditto */ struct elv_fs_entry *elevator_attrs; char elevator_name[ELV_NAME_MAX]; struct module *elevator_owner; + bool uses_mq; /* managed by elevator core */ char icq_cache_name[ELV_NAME_MAX + 5]; /* elvname + "_io_cq" */ @@ -123,6 +165,7 @@ struct elevator_queue struct kobject kobj; struct mutex sysfs_lock; unsigned int registered:1; + unsigned int uses_mq:1; DECLARE_HASHTABLE(hash, ELV_HASH_BITS); }; @@ -133,12 +176,15 @@ extern void elv_dispatch_sort(struct request_queue *, struct request *); extern void elv_dispatch_add_tail(struct request_queue *, struct request *); extern void elv_add_request(struct request_queue *, struct request *, int); extern void __elv_add_request(struct request_queue *, struct request *, int); -extern int elv_merge(struct request_queue *, struct request **, struct bio *); +extern enum elv_merge elv_merge(struct request_queue *, struct request **, + struct bio *); extern void elv_merge_requests(struct request_queue *, struct request *, struct request *); -extern void elv_merged_request(struct request_queue *, struct request *, int); +extern void elv_merged_request(struct request_queue *, struct request *, + enum elv_merge); extern void elv_bio_merged(struct request_queue *q, struct request *, struct bio *); +extern bool elv_attempt_insert_merge(struct request_queue *, struct request *); extern void elv_requeue_request(struct request_queue *, struct request *); extern struct request *elv_former_request(struct request_queue *, struct request *); extern struct request *elv_latter_request(struct request_queue *, struct request *); @@ -185,13 +231,6 @@ extern void elv_rb_del(struct rb_root *, struct request *); extern struct request *elv_rb_find(struct rb_root *, sector_t); /* - * Return values from elevator merger - */ -#define ELEVATOR_NO_MERGE 0 -#define ELEVATOR_FRONT_MERGE 1 -#define ELEVATOR_BACK_MERGE 2 - -/* * Insertion selection */ #define ELEVATOR_INSERT_FRONT 1 diff --git a/include/linux/fs.h b/include/linux/fs.h index 2ba074328894..c930cbc19342 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -423,6 +423,7 @@ struct block_device { int bd_invalidated; struct gendisk * bd_disk; struct request_queue * bd_queue; + struct backing_dev_info *bd_bdi; struct list_head bd_list; /* * Private data. You must have bd_claim'ed the block_device @@ -2342,6 +2343,7 @@ extern struct kmem_cache *names_cachep; #ifdef CONFIG_BLOCK extern int register_blkdev(unsigned int, const char *); extern void unregister_blkdev(unsigned int, const char *); +extern void bdev_unhash_inode(dev_t dev); extern struct block_device *bdget(dev_t); extern struct block_device *bdgrab(struct block_device *bdev); extern void bd_set_size(struct block_device *, loff_t size); diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 76f39754e7b0..a999d281a2f1 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -167,6 +167,13 @@ struct blk_integrity { }; #endif /* CONFIG_BLK_DEV_INTEGRITY */ +struct disk_devt { + atomic_t count; + void (*release)(struct disk_devt *disk_devt); +}; + +void put_disk_devt(struct disk_devt *disk_devt); +void get_disk_devt(struct disk_devt *disk_devt); struct gendisk { /* major, first_minor and minors are input parameters only, @@ -176,6 +183,7 @@ struct gendisk { int first_minor; int minors; /* maximum number of minors, =1 for * disks that can't be partitioned. */ + struct disk_devt *disk_devt; char disk_name[DISK_NAME_LEN]; /* name of major driver */ char *(*devnode)(struct gendisk *gd, umode_t *mode); diff --git a/include/linux/ide.h b/include/linux/ide.h index a633898f36ac..2f51c1724b5a 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -20,6 +20,7 @@ #include <linux/mutex.h> /* for request_sense */ #include <linux/cdrom.h> +#include <scsi/scsi_cmnd.h> #include <asm/byteorder.h> #include <asm/io.h> @@ -39,18 +40,53 @@ struct device; -/* IDE-specific values for req->cmd_type */ -enum ata_cmd_type_bits { - REQ_TYPE_ATA_TASKFILE = REQ_TYPE_DRV_PRIV + 1, - REQ_TYPE_ATA_PC, - REQ_TYPE_ATA_SENSE, /* sense request */ - REQ_TYPE_ATA_PM_SUSPEND,/* suspend request */ - REQ_TYPE_ATA_PM_RESUME, /* resume request */ +/* values for ide_request.type */ +enum ata_priv_type { + ATA_PRIV_MISC, + ATA_PRIV_TASKFILE, + ATA_PRIV_PC, + ATA_PRIV_SENSE, /* sense request */ + ATA_PRIV_PM_SUSPEND, /* suspend request */ + ATA_PRIV_PM_RESUME, /* resume request */ }; -#define ata_pm_request(rq) \ - ((rq)->cmd_type == REQ_TYPE_ATA_PM_SUSPEND || \ - (rq)->cmd_type == REQ_TYPE_ATA_PM_RESUME) +struct ide_request { + struct scsi_request sreq; + u8 sense[SCSI_SENSE_BUFFERSIZE]; + u8 type; +}; + +static inline struct ide_request *ide_req(struct request *rq) +{ + return blk_mq_rq_to_pdu(rq); +} + +static inline bool ata_misc_request(struct request *rq) +{ + return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_MISC; +} + +static inline bool ata_taskfile_request(struct request *rq) +{ + return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_TASKFILE; +} + +static inline bool ata_pc_request(struct request *rq) +{ + return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_PC; +} + +static inline bool ata_sense_request(struct request *rq) +{ + return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_SENSE; +} + +static inline bool ata_pm_request(struct request *rq) +{ + return blk_rq_is_private(rq) && + (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND || + ide_req(rq)->type == ATA_PRIV_PM_RESUME); +} /* Error codes returned in rq->errors to the higher part of the driver. */ enum { @@ -579,7 +615,7 @@ struct ide_drive_s { /* current sense rq and buffer */ bool sense_rq_armed; - struct request sense_rq; + struct request *sense_rq; struct request_sense sense_data; }; diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index 7c273bbc5351..ca45e4a088a9 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -80,8 +80,6 @@ struct nvm_dev_ops { unsigned int max_phys_sect; }; - - #ifdef CONFIG_NVM #include <linux/blkdev.h> @@ -109,6 +107,7 @@ enum { NVM_RSP_ERR_FAILWRITE = 0x40ff, NVM_RSP_ERR_EMPTYPAGE = 0x42ff, NVM_RSP_ERR_FAILECC = 0x4281, + NVM_RSP_ERR_FAILCRC = 0x4004, NVM_RSP_WARN_HIGHECC = 0x4700, /* Device opcodes */ @@ -202,11 +201,10 @@ struct nvm_addr_format { struct nvm_id { u8 ver_id; u8 vmnt; - u8 cgrps; u32 cap; u32 dom; struct nvm_addr_format ppaf; - struct nvm_id_group groups[4]; + struct nvm_id_group grp; } __packed; struct nvm_target { @@ -216,10 +214,6 @@ struct nvm_target { struct gendisk *disk; }; -struct nvm_tgt_instance { - struct nvm_tgt_type *tt; -}; - #define ADDR_EMPTY (~0ULL) #define NVM_VERSION_MAJOR 1 @@ -230,7 +224,6 @@ struct nvm_rq; typedef void (nvm_end_io_fn)(struct nvm_rq *); struct nvm_rq { - struct nvm_tgt_instance *ins; struct nvm_tgt_dev *dev; struct bio *bio; @@ -254,6 +247,8 @@ struct nvm_rq { u64 ppa_status; /* ppa media status */ int error; + + void *private; }; static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu) @@ -272,15 +267,6 @@ enum { NVM_BLK_ST_BAD = 0x8, /* Bad block */ }; -/* system block cpu representation */ -struct nvm_sb_info { - unsigned long seqnr; - unsigned long erase_cnt; - unsigned int version; - char mmtype[NVM_MMTYPE_LEN]; - struct ppa_addr fs_ppa; -}; - /* Device generic information */ struct nvm_geo { int nr_chnls; @@ -308,6 +294,7 @@ struct nvm_geo { int sec_per_lun; }; +/* sub-device structure */ struct nvm_tgt_dev { /* Device information */ struct nvm_geo geo; @@ -329,17 +316,10 @@ struct nvm_dev { struct list_head devices; - /* Media manager */ - struct nvmm_type *mt; - void *mp; - - /* System blocks */ - struct nvm_sb_info sb; - /* Device information */ struct nvm_geo geo; - /* lower page table */ + /* lower page table */ int lps_per_blk; int *lptbl; @@ -359,6 +339,10 @@ struct nvm_dev { struct mutex mlock; spinlock_t lock; + + /* target management */ + struct list_head area_list; + struct list_head targets; }; static inline struct ppa_addr linear_to_generic_addr(struct nvm_geo *geo, @@ -391,10 +375,10 @@ static inline struct ppa_addr linear_to_generic_addr(struct nvm_geo *geo, return l; } -static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev, - struct ppa_addr r) +static inline struct ppa_addr generic_to_dev_addr(struct nvm_tgt_dev *tgt_dev, + struct ppa_addr r) { - struct nvm_geo *geo = &dev->geo; + struct nvm_geo *geo = &tgt_dev->geo; struct ppa_addr l; l.ppa = ((u64)r.g.blk) << geo->ppaf.blk_offset; @@ -407,10 +391,10 @@ static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev, return l; } -static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev, - struct ppa_addr r) +static inline struct ppa_addr dev_to_generic_addr(struct nvm_tgt_dev *tgt_dev, + struct ppa_addr r) { - struct nvm_geo *geo = &dev->geo; + struct nvm_geo *geo = &tgt_dev->geo; struct ppa_addr l; l.ppa = 0; @@ -452,15 +436,12 @@ static inline int ppa_cmp_blk(struct ppa_addr ppa1, struct ppa_addr ppa2) (ppa1.g.blk == ppa2.g.blk)); } -static inline int ppa_to_slc(struct nvm_dev *dev, int slc_pg) -{ - return dev->lptbl[slc_pg]; -} - typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *); typedef sector_t (nvm_tgt_capacity_fn)(void *); typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *); typedef void (nvm_tgt_exit_fn)(void *); +typedef int (nvm_tgt_sysfs_init_fn)(struct gendisk *); +typedef void (nvm_tgt_sysfs_exit_fn)(struct gendisk *); struct nvm_tgt_type { const char *name; @@ -469,12 +450,15 @@ struct nvm_tgt_type { /* target entry points */ nvm_tgt_make_rq_fn *make_rq; nvm_tgt_capacity_fn *capacity; - nvm_end_io_fn *end_io; /* module-specific init/teardown */ nvm_tgt_init_fn *init; nvm_tgt_exit_fn *exit; + /* sysfs */ + nvm_tgt_sysfs_init_fn *sysfs_init; + nvm_tgt_sysfs_exit_fn *sysfs_exit; + /* For internal use */ struct list_head list; }; @@ -487,103 +471,29 @@ extern void nvm_unregister_tgt_type(struct nvm_tgt_type *); extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *); extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t); -typedef int (nvmm_register_fn)(struct nvm_dev *); -typedef void (nvmm_unregister_fn)(struct nvm_dev *); - -typedef int (nvmm_create_tgt_fn)(struct nvm_dev *, struct nvm_ioctl_create *); -typedef int (nvmm_remove_tgt_fn)(struct nvm_dev *, struct nvm_ioctl_remove *); -typedef int (nvmm_submit_io_fn)(struct nvm_tgt_dev *, struct nvm_rq *); -typedef int (nvmm_erase_blk_fn)(struct nvm_tgt_dev *, struct ppa_addr *, int); -typedef int (nvmm_get_area_fn)(struct nvm_dev *, sector_t *, sector_t); -typedef void (nvmm_put_area_fn)(struct nvm_dev *, sector_t); -typedef struct ppa_addr (nvmm_trans_ppa_fn)(struct nvm_tgt_dev *, - struct ppa_addr, int); -typedef void (nvmm_part_to_tgt_fn)(struct nvm_dev *, sector_t*, int); - -enum { - TRANS_TGT_TO_DEV = 0x0, - TRANS_DEV_TO_TGT = 0x1, -}; - -struct nvmm_type { - const char *name; - unsigned int version[3]; - - nvmm_register_fn *register_mgr; - nvmm_unregister_fn *unregister_mgr; - - nvmm_create_tgt_fn *create_tgt; - nvmm_remove_tgt_fn *remove_tgt; - - nvmm_submit_io_fn *submit_io; - nvmm_erase_blk_fn *erase_blk; - - nvmm_get_area_fn *get_area; - nvmm_put_area_fn *put_area; - - nvmm_trans_ppa_fn *trans_ppa; - nvmm_part_to_tgt_fn *part_to_tgt; - - struct list_head list; -}; - -extern int nvm_register_mgr(struct nvmm_type *); -extern void nvm_unregister_mgr(struct nvmm_type *); - extern struct nvm_dev *nvm_alloc_dev(int); extern int nvm_register(struct nvm_dev *); extern void nvm_unregister(struct nvm_dev *); -extern int nvm_set_bb_tbl(struct nvm_dev *, struct ppa_addr *, int, int); extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr *, int, int); extern int nvm_max_phys_sects(struct nvm_tgt_dev *); extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *); -extern void nvm_generic_to_addr_mode(struct nvm_dev *, struct nvm_rq *); -extern void nvm_addr_to_generic_mode(struct nvm_dev *, struct nvm_rq *); extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *, const struct ppa_addr *, int, int); extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *); -extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr *, int, int); extern int nvm_erase_blk(struct nvm_tgt_dev *, struct ppa_addr *, int); extern int nvm_get_l2p_tbl(struct nvm_tgt_dev *, u64, u32, nvm_l2p_update_fn *, void *); extern int nvm_get_area(struct nvm_tgt_dev *, sector_t *, sector_t); extern void nvm_put_area(struct nvm_tgt_dev *, sector_t); -extern void nvm_end_io(struct nvm_rq *, int); -extern int nvm_submit_ppa(struct nvm_dev *, struct ppa_addr *, int, int, int, - void *, int); -extern int nvm_submit_ppa_list(struct nvm_dev *, struct ppa_addr *, int, int, - int, void *, int); +extern void nvm_end_io(struct nvm_rq *); extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int); -extern int nvm_get_bb_tbl(struct nvm_dev *, struct ppa_addr, u8 *); extern int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr, u8 *); -/* sysblk.c */ -#define NVM_SYSBLK_MAGIC 0x4E564D53 /* "NVMS" */ - -/* system block on disk representation */ -struct nvm_system_block { - __be32 magic; /* magic signature */ - __be32 seqnr; /* sequence number */ - __be32 erase_cnt; /* erase count */ - __be16 version; /* version number */ - u8 mmtype[NVM_MMTYPE_LEN]; /* media manager name */ - __be64 fs_ppa; /* PPA for media manager - * superblock */ -}; - -extern int nvm_get_sysblock(struct nvm_dev *, struct nvm_sb_info *); -extern int nvm_update_sysblock(struct nvm_dev *, struct nvm_sb_info *); -extern int nvm_init_sysblock(struct nvm_dev *, struct nvm_sb_info *); - extern int nvm_dev_factory(struct nvm_dev *, int flags); -#define nvm_for_each_lun_ppa(geo, ppa, chid, lunid) \ - for ((chid) = 0, (ppa).ppa = 0; (chid) < (geo)->nr_chnls; \ - (chid)++, (ppa).g.ch = (chid)) \ - for ((lunid) = 0; (lunid) < (geo)->luns_per_chnl; \ - (lunid)++, (ppa).g.lun = (lunid)) +extern void nvm_part_to_tgt(struct nvm_dev *, sector_t *, int); #else /* CONFIG_NVM */ struct nvm_dev_ops; diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 3d1c6f1b15c9..0b676a02cf3e 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -244,6 +244,7 @@ enum { NVME_CTRL_ONCS_DSM = 1 << 2, NVME_CTRL_ONCS_WRITE_ZEROES = 1 << 3, NVME_CTRL_VWC_PRESENT = 1 << 0, + NVME_CTRL_OACS_SEC_SUPP = 1 << 0, }; struct nvme_lbaf { @@ -553,6 +554,8 @@ enum { NVME_DSMGMT_AD = 1 << 2, }; +#define NVME_DSM_MAX_RANGES 256 + struct nvme_dsm_range { __le32 cattr; __le32 nlb; diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h index f017fd6e69c4..d4e0a204c118 100644 --- a/include/linux/sbitmap.h +++ b/include/linux/sbitmap.h @@ -259,6 +259,26 @@ static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr) unsigned int sbitmap_weight(const struct sbitmap *sb); /** + * sbitmap_show() - Dump &struct sbitmap information to a &struct seq_file. + * @sb: Bitmap to show. + * @m: struct seq_file to write to. + * + * This is intended for debugging. The format may change at any time. + */ +void sbitmap_show(struct sbitmap *sb, struct seq_file *m); + +/** + * sbitmap_bitmap_show() - Write a hex dump of a &struct sbitmap to a &struct + * seq_file. + * @sb: Bitmap to show. + * @m: struct seq_file to write to. + * + * This is intended for debugging. The output isn't guaranteed to be internally + * consistent. + */ +void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m); + +/** * sbitmap_queue_init_node() - Initialize a &struct sbitmap_queue on a specific * memory node. * @sbq: Bitmap queue to initialize. @@ -370,4 +390,14 @@ static inline struct sbq_wait_state *sbq_wait_ptr(struct sbitmap_queue *sbq, */ void sbitmap_queue_wake_all(struct sbitmap_queue *sbq); +/** + * sbitmap_queue_show() - Dump &struct sbitmap_queue information to a &struct + * seq_file. + * @sbq: Bitmap queue to show. + * @m: struct seq_file to write to. + * + * This is intended for debugging. The format may change at any time. + */ +void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m); + #endif /* __LINUX_SCALE_BITMAP_H */ diff --git a/include/linux/sed-opal.h b/include/linux/sed-opal.h new file mode 100644 index 000000000000..deee23d012e7 --- /dev/null +++ b/include/linux/sed-opal.h @@ -0,0 +1,70 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Authors: + * Rafael Antognolli <rafael.antognolli@intel.com> + * Scott Bauer <scott.bauer@intel.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef LINUX_OPAL_H +#define LINUX_OPAL_H + +#include <uapi/linux/sed-opal.h> +#include <linux/kernel.h> + +struct opal_dev; + +typedef int (sec_send_recv)(void *data, u16 spsp, u8 secp, void *buffer, + size_t len, bool send); + +#ifdef CONFIG_BLK_SED_OPAL +bool opal_unlock_from_suspend(struct opal_dev *dev); +struct opal_dev *init_opal_dev(void *data, sec_send_recv *send_recv); +int sed_ioctl(struct opal_dev *dev, unsigned int cmd, void __user *ioctl_ptr); + +static inline bool is_sed_ioctl(unsigned int cmd) +{ + switch (cmd) { + case IOC_OPAL_SAVE: + case IOC_OPAL_LOCK_UNLOCK: + case IOC_OPAL_TAKE_OWNERSHIP: + case IOC_OPAL_ACTIVATE_LSP: + case IOC_OPAL_SET_PW: + case IOC_OPAL_ACTIVATE_USR: + case IOC_OPAL_REVERT_TPR: + case IOC_OPAL_LR_SETUP: + case IOC_OPAL_ADD_USR_TO_LR: + case IOC_OPAL_ENABLE_DISABLE_MBR: + case IOC_OPAL_ERASE_LR: + case IOC_OPAL_SECURE_ERASE_LR: + return true; + } + return false; +} +#else +static inline bool is_sed_ioctl(unsigned int cmd) +{ + return false; +} + +static inline int sed_ioctl(struct opal_dev *dev, unsigned int cmd, + void __user *ioctl_ptr) +{ + return 0; +} +static inline bool opal_unlock_from_suspend(struct opal_dev *dev) +{ + return false; +} +#define init_opal_dev(data, send_recv) NULL +#endif /* CONFIG_BLK_SED_OPAL */ +#endif /* LINUX_OPAL_H */ diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index 9fc1aecfc813..b379f93a2c48 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h @@ -8,6 +8,7 @@ #include <linux/timer.h> #include <linux/scatterlist.h> #include <scsi/scsi_device.h> +#include <scsi/scsi_request.h> struct Scsi_Host; struct scsi_driver; @@ -57,6 +58,7 @@ struct scsi_pointer { #define SCMD_TAGGED (1 << 0) struct scsi_cmnd { + struct scsi_request req; struct scsi_device *device; struct list_head list; /* scsi_cmnd participates in queue lists */ struct list_head eh_entry; /* entry for the host eh_cmd_q */ @@ -149,7 +151,7 @@ static inline void *scsi_cmd_priv(struct scsi_cmnd *cmd) return cmd + 1; } -/* make sure not to use it with REQ_TYPE_BLOCK_PC commands */ +/* make sure not to use it with passthrough commands */ static inline struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd) { return *(struct scsi_driver **)cmd->request->rq_disk->private_data; diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index 36680f13270d..3cd8c3bec638 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -551,9 +551,6 @@ struct Scsi_Host { struct list_head __devices; struct list_head __targets; - struct scsi_host_cmd_pool *cmd_pool; - spinlock_t free_list_lock; - struct list_head free_list; /* backup store of cmd structs */ struct list_head starved_list; spinlock_t default_lock; @@ -826,8 +823,6 @@ extern void scsi_block_requests(struct Scsi_Host *); struct class_container; -extern struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, - void (*) (struct request_queue *)); /* * These two functions are used to allocate and free a pseudo device * which will connect to the host adapter itself rather than any diff --git a/include/scsi/scsi_request.h b/include/scsi/scsi_request.h new file mode 100644 index 000000000000..ba0aeb980f7e --- /dev/null +++ b/include/scsi/scsi_request.h @@ -0,0 +1,30 @@ +#ifndef _SCSI_SCSI_REQUEST_H +#define _SCSI_SCSI_REQUEST_H + +#include <linux/blk-mq.h> + +#define BLK_MAX_CDB 16 + +struct scsi_request { + unsigned char __cmd[BLK_MAX_CDB]; + unsigned char *cmd; + unsigned short cmd_len; + unsigned int sense_len; + unsigned int resid_len; /* residual count */ + void *sense; +}; + +static inline struct scsi_request *scsi_req(struct request *rq) +{ + return blk_mq_rq_to_pdu(rq); +} + +static inline void scsi_req_free_cmd(struct scsi_request *req) +{ + if (req->cmd != req->__cmd) + kfree(req->cmd); +} + +void scsi_req_init(struct request *); + +#endif /* _SCSI_SCSI_REQUEST_H */ diff --git a/include/scsi/scsi_transport.h b/include/scsi/scsi_transport.h index 81292392adbc..b6e07b56d013 100644 --- a/include/scsi/scsi_transport.h +++ b/include/scsi/scsi_transport.h @@ -119,4 +119,6 @@ scsi_transport_device_data(struct scsi_device *sdev) + shost->transportt->device_private_offset; } +void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q); + #endif /* SCSI_TRANSPORT_H */ diff --git a/include/trace/events/block.h b/include/trace/events/block.h index 3e02e3a25413..a88ed13446ff 100644 --- a/include/trace/events/block.h +++ b/include/trace/events/block.h @@ -73,19 +73,17 @@ DECLARE_EVENT_CLASS(block_rq_with_error, __field( unsigned int, nr_sector ) __field( int, errors ) __array( char, rwbs, RWBS_LEN ) - __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) + __dynamic_array( char, cmd, 1 ) ), TP_fast_assign( __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; - __entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? - 0 : blk_rq_pos(rq); - __entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? - 0 : blk_rq_sectors(rq); + __entry->sector = blk_rq_trace_sector(rq); + __entry->nr_sector = blk_rq_trace_nr_sectors(rq); __entry->errors = rq->errors; blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); - blk_dump_cmd(__get_str(cmd), rq); + __get_str(cmd)[0] = '\0'; ), TP_printk("%d,%d %s (%s) %llu + %u [%d]", @@ -153,7 +151,7 @@ TRACE_EVENT(block_rq_complete, __field( unsigned int, nr_sector ) __field( int, errors ) __array( char, rwbs, RWBS_LEN ) - __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) + __dynamic_array( char, cmd, 1 ) ), TP_fast_assign( @@ -163,7 +161,7 @@ TRACE_EVENT(block_rq_complete, __entry->errors = rq->errors; blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, nr_bytes); - blk_dump_cmd(__get_str(cmd), rq); + __get_str(cmd)[0] = '\0'; ), TP_printk("%d,%d %s (%s) %llu + %u [%d]", @@ -186,20 +184,17 @@ DECLARE_EVENT_CLASS(block_rq, __field( unsigned int, bytes ) __array( char, rwbs, RWBS_LEN ) __array( char, comm, TASK_COMM_LEN ) - __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) + __dynamic_array( char, cmd, 1 ) ), TP_fast_assign( __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; - __entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? - 0 : blk_rq_pos(rq); - __entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? - 0 : blk_rq_sectors(rq); - __entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? - blk_rq_bytes(rq) : 0; + __entry->sector = blk_rq_trace_sector(rq); + __entry->nr_sector = blk_rq_trace_nr_sectors(rq); + __entry->bytes = blk_rq_bytes(rq); blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); - blk_dump_cmd(__get_str(cmd), rq); + __get_str(cmd)[0] = '\0'; memcpy(__entry->comm, current->comm, TASK_COMM_LEN); ), diff --git a/include/uapi/linux/lightnvm.h b/include/uapi/linux/lightnvm.h index 774a43128a7a..fd19f36b3129 100644 --- a/include/uapi/linux/lightnvm.h +++ b/include/uapi/linux/lightnvm.h @@ -122,6 +122,44 @@ struct nvm_ioctl_dev_factory { __u32 flags; }; +struct nvm_user_vio { + __u8 opcode; + __u8 flags; + __u16 control; + __u16 nppas; + __u16 rsvd; + __u64 metadata; + __u64 addr; + __u64 ppa_list; + __u32 metadata_len; + __u32 data_len; + __u64 status; + __u32 result; + __u32 rsvd3[3]; +}; + +struct nvm_passthru_vio { + __u8 opcode; + __u8 flags; + __u8 rsvd[2]; + __u32 nsid; + __u32 cdw2; + __u32 cdw3; + __u64 metadata; + __u64 addr; + __u32 metadata_len; + __u32 data_len; + __u64 ppa_list; + __u16 nppas; + __u16 control; + __u32 cdw13; + __u32 cdw14; + __u32 cdw15; + __u64 status; + __u32 result; + __u32 timeout_ms; +}; + /* The ioctl type, 'L', 0x20 - 0x2F documented in ioctl-number.txt */ enum { /* top level cmds */ @@ -137,6 +175,11 @@ enum { /* Factory reset device */ NVM_DEV_FACTORY_CMD, + + /* Vector user I/O */ + NVM_DEV_VIO_ADMIN_CMD = 0x41, + NVM_DEV_VIO_CMD = 0x42, + NVM_DEV_VIO_USER_CMD = 0x43, }; #define NVM_IOCTL 'L' /* 0x4c */ @@ -154,6 +197,13 @@ enum { #define NVM_DEV_FACTORY _IOW(NVM_IOCTL, NVM_DEV_FACTORY_CMD, \ struct nvm_ioctl_dev_factory) +#define NVME_NVM_IOCTL_IO_VIO _IOWR(NVM_IOCTL, NVM_DEV_VIO_USER_CMD, \ + struct nvm_passthru_vio) +#define NVME_NVM_IOCTL_ADMIN_VIO _IOWR(NVM_IOCTL, NVM_DEV_VIO_ADMIN_CMD,\ + struct nvm_passthru_vio) +#define NVME_NVM_IOCTL_SUBMIT_VIO _IOWR(NVM_IOCTL, NVM_DEV_VIO_CMD,\ + struct nvm_user_vio) + #define NVM_VERSION_MAJOR 1 #define NVM_VERSION_MINOR 0 #define NVM_VERSION_PATCHLEVEL 0 diff --git a/include/uapi/linux/sed-opal.h b/include/uapi/linux/sed-opal.h new file mode 100644 index 000000000000..c72e0735532d --- /dev/null +++ b/include/uapi/linux/sed-opal.h @@ -0,0 +1,119 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Authors: + * Rafael Antognolli <rafael.antognolli@intel.com> + * Scott Bauer <scott.bauer@intel.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef _UAPI_SED_OPAL_H +#define _UAPI_SED_OPAL_H + +#include <linux/types.h> + +#define OPAL_KEY_MAX 256 +#define OPAL_MAX_LRS 9 + +enum opal_mbr { + OPAL_MBR_ENABLE = 0x0, + OPAL_MBR_DISABLE = 0x01, +}; + +enum opal_user { + OPAL_ADMIN1 = 0x0, + OPAL_USER1 = 0x01, + OPAL_USER2 = 0x02, + OPAL_USER3 = 0x03, + OPAL_USER4 = 0x04, + OPAL_USER5 = 0x05, + OPAL_USER6 = 0x06, + OPAL_USER7 = 0x07, + OPAL_USER8 = 0x08, + OPAL_USER9 = 0x09, +}; + +enum opal_lock_state { + OPAL_RO = 0x01, /* 0001 */ + OPAL_RW = 0x02, /* 0010 */ + OPAL_LK = 0x04, /* 0100 */ +}; + +struct opal_key { + __u8 lr; + __u8 key_len; + __u8 __align[6]; + __u8 key[OPAL_KEY_MAX]; +}; + +struct opal_lr_act { + struct opal_key key; + __u32 sum; + __u8 num_lrs; + __u8 lr[OPAL_MAX_LRS]; + __u8 align[2]; /* Align to 8 byte boundary */ +}; + +struct opal_session_info { + __u32 sum; + __u32 who; + struct opal_key opal_key; +}; + +struct opal_user_lr_setup { + __u64 range_start; + __u64 range_length; + __u32 RLE; /* Read Lock enabled */ + __u32 WLE; /* Write Lock Enabled */ + struct opal_session_info session; +}; + +struct opal_lock_unlock { + struct opal_session_info session; + __u32 l_state; + __u8 __align[4]; +}; + +struct opal_new_pw { + struct opal_session_info session; + + /* When we're not operating in sum, and we first set + * passwords we need to set them via ADMIN authority. + * After passwords are changed, we can set them via, + * User authorities. + * Because of this restriction we need to know about + * Two different users. One in 'session' which we will use + * to start the session and new_userr_pw as the user we're + * chaning the pw for. + */ + struct opal_session_info new_user_pw; +}; + +struct opal_mbr_data { + struct opal_key key; + __u8 enable_disable; + __u8 __align[7]; +}; + +#define IOC_OPAL_SAVE _IOW('p', 220, struct opal_lock_unlock) +#define IOC_OPAL_LOCK_UNLOCK _IOW('p', 221, struct opal_lock_unlock) +#define IOC_OPAL_TAKE_OWNERSHIP _IOW('p', 222, struct opal_key) +#define IOC_OPAL_ACTIVATE_LSP _IOW('p', 223, struct opal_lr_act) +#define IOC_OPAL_SET_PW _IOW('p', 224, struct opal_new_pw) +#define IOC_OPAL_ACTIVATE_USR _IOW('p', 225, struct opal_session_info) +#define IOC_OPAL_REVERT_TPR _IOW('p', 226, struct opal_key) +#define IOC_OPAL_LR_SETUP _IOW('p', 227, struct opal_user_lr_setup) +#define IOC_OPAL_ADD_USR_TO_LR _IOW('p', 228, struct opal_lock_unlock) +#define IOC_OPAL_ENABLE_DISABLE_MBR _IOW('p', 229, struct opal_mbr_data) +#define IOC_OPAL_ERASE_LR _IOW('p', 230, struct opal_session_info) +#define IOC_OPAL_SECURE_ERASE_LR _IOW('p', 231, struct opal_session_info) + +#endif /* _UAPI_SED_OPAL_H */ |