diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-12 15:39:22 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-12 15:39:22 -0700 |
commit | aabfea8dc91cf5b220d2ed85e8f6395a9b140371 (patch) | |
tree | 00172545b50105dbbc6a1d4fceecd415ddb7e9bf | |
parent | 7181feb9b7837a1e107123cc16552d42d922def6 (diff) | |
parent | 9a159190414d461fdac7ae5bb749c2d532b35419 (diff) |
Merge tag 's390-5.3-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull more s390 updates from Vasily Gorbik:
- Fix integer overflow during stack frame unwind with invalid
backchain.
- Cleanup unused symbol export in zcrypt code.
- Fix MIO addressing control activation in PCI code and expose its
usage via sysfs.
- Fix kernel image signature verification report presence detection.
- Fix irq registration in vfio-ap code.
- Add CPU measurement counters for newer machines.
- Add base DASD thin provisioning support and code cleanups.
* tag 's390-5.3-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (21 commits)
s390/unwind: avoid int overflow in outside_of_stack
s390/zcrypt: remove the exporting of ap_query_configuration
s390/pci: add mio_enabled attribute
s390: fix setting of mio addressing control
s390/ipl: Fix detection of has_secure attribute
s390: vfio-ap: fix irq registration
s390/cpumf: Add extended counter set definitions for model 8561 and 8562
s390/dasd: Handle out-of-space constraint
s390/dasd: Add discard support for ESE volumes
s390/dasd: Use ALIGN_DOWN macro
s390/dasd: Make dasd_setup_queue() a discipline function
s390/dasd: Add new ioctl to release space
s390/dasd: Add dasd_sleep_on_queue_interruptible()
s390/dasd: Add missing intensity definition
s390/dasd: Fix whitespace
s390/dasd: Add dynamic formatting support for ESE volumes
s390/dasd: Recognise data for ESE volumes
s390/dasd: Put sub-order definitions in a separate section
s390/dasd: Make layout analysis ESE compatible
s390/dasd: Remove old defines and function
...
-rw-r--r-- | arch/s390/include/asm/pci_insn.h | 10 | ||||
-rw-r--r-- | arch/s390/include/asm/sclp.h | 1 | ||||
-rw-r--r-- | arch/s390/include/uapi/asm/dasd.h | 154 | ||||
-rw-r--r-- | arch/s390/kernel/early.c | 2 | ||||
-rw-r--r-- | arch/s390/kernel/ipl.c | 7 | ||||
-rw-r--r-- | arch/s390/kernel/perf_cpum_cf_events.c | 2 | ||||
-rw-r--r-- | arch/s390/kernel/unwind_bc.c | 2 | ||||
-rw-r--r-- | arch/s390/pci/pci.c | 4 | ||||
-rw-r--r-- | arch/s390/pci/pci_sysfs.c | 10 | ||||
-rw-r--r-- | drivers/s390/block/dasd.c | 233 | ||||
-rw-r--r-- | drivers/s390/block/dasd_devmap.c | 70 | ||||
-rw-r--r-- | drivers/s390/block/dasd_diag.c | 22 | ||||
-rw-r--r-- | drivers/s390/block/dasd_eckd.c | 966 | ||||
-rw-r--r-- | drivers/s390/block/dasd_eckd.h | 175 | ||||
-rw-r--r-- | drivers/s390/block/dasd_eer.c | 1 | ||||
-rw-r--r-- | drivers/s390/block/dasd_fba.c | 45 | ||||
-rw-r--r-- | drivers/s390/block/dasd_fba.h | 5 | ||||
-rw-r--r-- | drivers/s390/block/dasd_int.h | 33 | ||||
-rw-r--r-- | drivers/s390/block/dasd_ioctl.c | 56 | ||||
-rw-r--r-- | drivers/s390/char/sclp_early.c | 1 | ||||
-rw-r--r-- | drivers/s390/crypto/ap_bus.c | 1 | ||||
-rw-r--r-- | drivers/s390/crypto/vfio_ap_ops.c | 3 |
22 files changed, 1547 insertions, 256 deletions
diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h index ff81ed19c506..61cf9531f68f 100644 --- a/arch/s390/include/asm/pci_insn.h +++ b/arch/s390/include/asm/pci_insn.h @@ -143,14 +143,4 @@ static inline int zpci_set_irq_ctrl(u16 ctl, u8 isc) return __zpci_set_irq_ctrl(ctl, isc, &iib); } -#ifdef CONFIG_PCI -static inline void enable_mio_ctl(void) -{ - if (static_branch_likely(&have_mio)) - __ctl_set_bit(2, 5); -} -#else /* CONFIG_PCI */ -static inline void enable_mio_ctl(void) {} -#endif /* CONFIG_PCI */ - #endif diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h index f577c5f6031a..c563f8368b19 100644 --- a/arch/s390/include/asm/sclp.h +++ b/arch/s390/include/asm/sclp.h @@ -80,7 +80,6 @@ struct sclp_info { unsigned char has_gisaf : 1; unsigned char has_diag318 : 1; unsigned char has_sipl : 1; - unsigned char has_sipl_g2 : 1; unsigned char has_dirq : 1; unsigned int ibc; unsigned int mtid; diff --git a/arch/s390/include/uapi/asm/dasd.h b/arch/s390/include/uapi/asm/dasd.h index 832be5c2584f..9ec86fae9980 100644 --- a/arch/s390/include/uapi/asm/dasd.h +++ b/arch/s390/include/uapi/asm/dasd.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -/* +/* * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> * Bugreports.to..: <Linux390@de.ibm.com> * Copyright IBM Corp. 1999, 2000 @@ -21,40 +21,40 @@ #define DASD_API_VERSION 6 -/* +/* * struct dasd_information2_t * represents any data about the device, which is visible to userspace. * including foramt and featueres. */ typedef struct dasd_information2_t { - unsigned int devno; /* S/390 devno */ - unsigned int real_devno; /* for aliases */ - unsigned int schid; /* S/390 subchannel identifier */ - unsigned int cu_type : 16; /* from SenseID */ - unsigned int cu_model : 8; /* from SenseID */ - unsigned int dev_type : 16; /* from SenseID */ - unsigned int dev_model : 8; /* from SenseID */ - unsigned int open_count; - unsigned int req_queue_len; - unsigned int chanq_len; /* length of chanq */ - char type[4]; /* from discipline.name, 'none' for unknown */ - unsigned int status; /* current device level */ - unsigned int label_block; /* where to find the VOLSER */ - unsigned int FBA_layout; /* fixed block size (like AIXVOL) */ - unsigned int characteristics_size; - unsigned int confdata_size; - char characteristics[64]; /* from read_device_characteristics */ - char configuration_data[256]; /* from read_configuration_data */ - unsigned int format; /* format info like formatted/cdl/ldl/... */ - unsigned int features; /* dasd features like 'ro',... */ - unsigned int reserved0; /* reserved for further use ,... */ - unsigned int reserved1; /* reserved for further use ,... */ - unsigned int reserved2; /* reserved for further use ,... */ - unsigned int reserved3; /* reserved for further use ,... */ - unsigned int reserved4; /* reserved for further use ,... */ - unsigned int reserved5; /* reserved for further use ,... */ - unsigned int reserved6; /* reserved for further use ,... */ - unsigned int reserved7; /* reserved for further use ,... */ + unsigned int devno; /* S/390 devno */ + unsigned int real_devno; /* for aliases */ + unsigned int schid; /* S/390 subchannel identifier */ + unsigned int cu_type : 16; /* from SenseID */ + unsigned int cu_model : 8; /* from SenseID */ + unsigned int dev_type : 16; /* from SenseID */ + unsigned int dev_model : 8; /* from SenseID */ + unsigned int open_count; + unsigned int req_queue_len; + unsigned int chanq_len; /* length of chanq */ + char type[4]; /* from discipline.name, 'none' for unknown */ + unsigned int status; /* current device level */ + unsigned int label_block; /* where to find the VOLSER */ + unsigned int FBA_layout; /* fixed block size (like AIXVOL) */ + unsigned int characteristics_size; + unsigned int confdata_size; + char characteristics[64]; /* from read_device_characteristics */ + char configuration_data[256]; /* from read_configuration_data */ + unsigned int format; /* format info like formatted/cdl/ldl/... */ + unsigned int features; /* dasd features like 'ro',... */ + unsigned int reserved0; /* reserved for further use ,... */ + unsigned int reserved1; /* reserved for further use ,... */ + unsigned int reserved2; /* reserved for further use ,... */ + unsigned int reserved3; /* reserved for further use ,... */ + unsigned int reserved4; /* reserved for further use ,... */ + unsigned int reserved5; /* reserved for further use ,... */ + unsigned int reserved6; /* reserved for further use ,... */ + unsigned int reserved7; /* reserved for further use ,... */ } dasd_information2_t; /* @@ -92,34 +92,34 @@ typedef struct dasd_information2_t { #define DASD_PARTN_BITS 2 -/* +/* * struct dasd_information_t * represents any data about the data, which is visible to userspace */ typedef struct dasd_information_t { - unsigned int devno; /* S/390 devno */ - unsigned int real_devno; /* for aliases */ - unsigned int schid; /* S/390 subchannel identifier */ - unsigned int cu_type : 16; /* from SenseID */ - unsigned int cu_model : 8; /* from SenseID */ - unsigned int dev_type : 16; /* from SenseID */ - unsigned int dev_model : 8; /* from SenseID */ - unsigned int open_count; - unsigned int req_queue_len; - unsigned int chanq_len; /* length of chanq */ - char type[4]; /* from discipline.name, 'none' for unknown */ - unsigned int status; /* current device level */ - unsigned int label_block; /* where to find the VOLSER */ - unsigned int FBA_layout; /* fixed block size (like AIXVOL) */ - unsigned int characteristics_size; - unsigned int confdata_size; - char characteristics[64]; /* from read_device_characteristics */ - char configuration_data[256]; /* from read_configuration_data */ + unsigned int devno; /* S/390 devno */ + unsigned int real_devno; /* for aliases */ + unsigned int schid; /* S/390 subchannel identifier */ + unsigned int cu_type : 16; /* from SenseID */ + unsigned int cu_model : 8; /* from SenseID */ + unsigned int dev_type : 16; /* from SenseID */ + unsigned int dev_model : 8; /* from SenseID */ + unsigned int open_count; + unsigned int req_queue_len; + unsigned int chanq_len; /* length of chanq */ + char type[4]; /* from discipline.name, 'none' for unknown */ + unsigned int status; /* current device level */ + unsigned int label_block; /* where to find the VOLSER */ + unsigned int FBA_layout; /* fixed block size (like AIXVOL) */ + unsigned int characteristics_size; + unsigned int confdata_size; + char characteristics[64]; /* from read_device_characteristics */ + char configuration_data[256]; /* from read_configuration_data */ } dasd_information_t; /* * Read Subsystem Data - Performance Statistics - */ + */ typedef struct dasd_rssd_perf_stats_t { unsigned char invalid:1; unsigned char format:3; @@ -154,21 +154,21 @@ typedef struct dasd_rssd_perf_stats_t { unsigned char reseved2[96]; } __attribute__((packed)) dasd_rssd_perf_stats_t; -/* +/* * struct profile_info_t - * holds the profinling information + * holds the profinling information */ typedef struct dasd_profile_info_t { - unsigned int dasd_io_reqs; /* number of requests processed at all */ - unsigned int dasd_io_sects; /* number of sectors processed at all */ - unsigned int dasd_io_secs[32]; /* histogram of request's sizes */ - unsigned int dasd_io_times[32]; /* histogram of requests's times */ - unsigned int dasd_io_timps[32]; /* histogram of requests's times per sector */ - unsigned int dasd_io_time1[32]; /* histogram of time from build to start */ - unsigned int dasd_io_time2[32]; /* histogram of time from start to irq */ - unsigned int dasd_io_time2ps[32]; /* histogram of time from start to irq */ - unsigned int dasd_io_time3[32]; /* histogram of time from irq to end */ - unsigned int dasd_io_nr_req[32]; /* histogram of # of requests in chanq */ + unsigned int dasd_io_reqs; /* number of requests processed at all */ + unsigned int dasd_io_sects; /* number of sectors processed at all */ + unsigned int dasd_io_secs[32]; /* histogram of request's sizes */ + unsigned int dasd_io_times[32]; /* histogram of requests's times */ + unsigned int dasd_io_timps[32]; /* histogram of requests's times per sector */ + unsigned int dasd_io_time1[32]; /* histogram of time from build to start */ + unsigned int dasd_io_time2[32]; /* histogram of time from start to irq */ + unsigned int dasd_io_time2ps[32]; /* histogram of time from start to irq */ + unsigned int dasd_io_time3[32]; /* histogram of time from irq to end */ + unsigned int dasd_io_nr_req[32]; /* histogram of # of requests in chanq */ } dasd_profile_info_t; /* @@ -189,10 +189,12 @@ typedef struct format_data_t { * 3/11: also write home address * 4/12: invalidate track */ -#define DASD_FMT_INT_FMT_R0 1 /* write record zero */ -#define DASD_FMT_INT_FMT_HA 2 /* write home address, also set FMT_R0 ! */ -#define DASD_FMT_INT_INVAL 4 /* invalidate tracks */ -#define DASD_FMT_INT_COMPAT 8 /* use OS/390 compatible disk layout */ +#define DASD_FMT_INT_FMT_R0 1 /* write record zero */ +#define DASD_FMT_INT_FMT_HA 2 /* write home address, also set FMT_R0 ! */ +#define DASD_FMT_INT_INVAL 4 /* invalidate tracks */ +#define DASD_FMT_INT_COMPAT 8 /* use OS/390 compatible disk layout */ +#define DASD_FMT_INT_FMT_NOR0 16 /* remove permission to write record zero */ +#define DASD_FMT_INT_ESE_FULL 32 /* release space for entire volume */ /* * struct format_check_t @@ -225,7 +227,7 @@ typedef struct format_check_t { /* If key-length was != 0 */ #define DASD_FMT_ERR_KEY_LENGTH 5 -/* +/* * struct attrib_data_t * represents the operation (cache) bits for the device. * Used in DE to influence caching of the DASD. @@ -281,13 +283,13 @@ struct dasd_snid_ioctl_data { * Here ist how the ioctl-nr should be used: * 0 - 31 DASD driver itself * 32 - 239 still open - * 240 - 255 reserved for EMC + * 240 - 255 reserved for EMC *******************************************************************************/ /* Disable the volume (for Linux) */ -#define BIODASDDISABLE _IO(DASD_IOCTL_LETTER,0) +#define BIODASDDISABLE _IO(DASD_IOCTL_LETTER,0) /* Enable the volume (for Linux) */ -#define BIODASDENABLE _IO(DASD_IOCTL_LETTER,1) +#define BIODASDENABLE _IO(DASD_IOCTL_LETTER,1) /* Issue a reserve/release command, rsp. */ #define BIODASDRSRV _IO(DASD_IOCTL_LETTER,2) /* reserve */ #define BIODASDRLSE _IO(DASD_IOCTL_LETTER,3) /* release */ @@ -295,9 +297,9 @@ struct dasd_snid_ioctl_data { /* reset profiling information of a device */ #define BIODASDPRRST _IO(DASD_IOCTL_LETTER,5) /* Quiesce IO on device */ -#define BIODASDQUIESCE _IO(DASD_IOCTL_LETTER,6) +#define BIODASDQUIESCE _IO(DASD_IOCTL_LETTER,6) /* Resume IO on device */ -#define BIODASDRESUME _IO(DASD_IOCTL_LETTER,7) +#define BIODASDRESUME _IO(DASD_IOCTL_LETTER,7) /* Abort all I/O on a device */ #define BIODASDABORTIO _IO(DASD_IOCTL_LETTER, 240) /* Allow I/O on a device */ @@ -315,13 +317,15 @@ struct dasd_snid_ioctl_data { /* Performance Statistics Read */ #define BIODASDPSRD _IOR(DASD_IOCTL_LETTER,4,dasd_rssd_perf_stats_t) /* Get Attributes (cache operations) */ -#define BIODASDGATTR _IOR(DASD_IOCTL_LETTER,5,attrib_data_t) +#define BIODASDGATTR _IOR(DASD_IOCTL_LETTER,5,attrib_data_t) /* #define BIODASDFORMAT _IOW(IOCTL_LETTER,0,format_data_t) , deprecated */ -#define BIODASDFMT _IOW(DASD_IOCTL_LETTER,1,format_data_t) +#define BIODASDFMT _IOW(DASD_IOCTL_LETTER,1,format_data_t) /* Set Attributes (cache operations) */ -#define BIODASDSATTR _IOW(DASD_IOCTL_LETTER,2,attrib_data_t) +#define BIODASDSATTR _IOW(DASD_IOCTL_LETTER,2,attrib_data_t) +/* Release Allocated Space */ +#define BIODASDRAS _IOW(DASD_IOCTL_LETTER, 3, format_data_t) /* Get Sense Path Group ID (SNID) data */ #define BIODASDSNID _IOWR(DASD_IOCTL_LETTER, 1, struct dasd_snid_ioctl_data) diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 629f173f60cd..6312fed48530 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -30,7 +30,6 @@ #include <asm/sclp.h> #include <asm/facility.h> #include <asm/boot_data.h> -#include <asm/pci_insn.h> #include "entry.h" /* @@ -236,7 +235,6 @@ static __init void detect_machine_facilities(void) clock_comparator_max = -1ULL >> 1; __ctl_set_bit(0, 53); } - enable_mio_ctl(); } static inline void save_vector_registers(void) diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index d836af3ccc38..2c0a515428d6 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -286,12 +286,7 @@ static struct kobj_attribute sys_ipl_secure_attr = static ssize_t ipl_has_secure_show(struct kobject *kobj, struct kobj_attribute *attr, char *page) { - if (MACHINE_IS_LPAR) - return sprintf(page, "%i\n", !!sclp.has_sipl); - else if (MACHINE_IS_VM) - return sprintf(page, "%i\n", !!sclp.has_sipl_g2); - else - return sprintf(page, "%i\n", 0); + return sprintf(page, "%i\n", !!sclp.has_sipl); } static struct kobj_attribute sys_ipl_has_secure_attr = diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c index 34cc96449b30..8b33e03e47b8 100644 --- a/arch/s390/kernel/perf_cpum_cf_events.c +++ b/arch/s390/kernel/perf_cpum_cf_events.c @@ -624,6 +624,8 @@ __init const struct attribute_group **cpumf_cf_event_group(void) break; case 0x3906: case 0x3907: + case 0x8561: + case 0x8562: model = cpumcf_z14_pmu_event_attr; break; default: diff --git a/arch/s390/kernel/unwind_bc.c b/arch/s390/kernel/unwind_bc.c index 3ce8a0808059..8fc9daae47a2 100644 --- a/arch/s390/kernel/unwind_bc.c +++ b/arch/s390/kernel/unwind_bc.c @@ -20,7 +20,7 @@ EXPORT_SYMBOL_GPL(unwind_get_return_address); static bool outside_of_stack(struct unwind_state *state, unsigned long sp) { return (sp <= state->sp) || - (sp + sizeof(struct stack_frame) > state->stack_info.end); + (sp > state->stack_info.end - sizeof(struct stack_frame)); } static bool update_stack_info(struct unwind_state *state, unsigned long sp) diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index b8a64cbb5dea..b0e3b9a0e488 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -890,8 +890,10 @@ static int __init pci_base_init(void) if (!test_facility(69) || !test_facility(71)) return 0; - if (test_facility(153) && !s390_pci_no_mio) + if (test_facility(153) && !s390_pci_no_mio) { static_branch_enable(&have_mio); + ctl_set_bit(2, 5); + } rc = zpci_debug_init(); if (rc) diff --git a/arch/s390/pci/pci_sysfs.c b/arch/s390/pci/pci_sysfs.c index 430c14b006d1..a433ba01a317 100644 --- a/arch/s390/pci/pci_sysfs.c +++ b/arch/s390/pci/pci_sysfs.c @@ -37,6 +37,15 @@ zpci_attr(segment1, "0x%02x\n", pfip[1]); zpci_attr(segment2, "0x%02x\n", pfip[2]); zpci_attr(segment3, "0x%02x\n", pfip[3]); +static ssize_t mio_enabled_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); + + return sprintf(buf, zpci_use_mio(zdev) ? "1\n" : "0\n"); +} +static DEVICE_ATTR_RO(mio_enabled); + static ssize_t recover_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { @@ -115,6 +124,7 @@ static struct attribute *zpci_dev_attrs[] = { &dev_attr_vfn.attr, &dev_attr_uid.attr, &dev_attr_recover.attr, + &dev_attr_mio_enabled.attr, NULL, }; static struct attribute_group zpci_attr_group = { diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index e03304fe25bb..6cca72782af6 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -70,7 +70,6 @@ MODULE_LICENSE("GPL"); * SECTION: prototypes for static functions of dasd.c */ static int dasd_alloc_queue(struct dasd_block *); -static void dasd_setup_queue(struct dasd_block *); static void dasd_free_queue(struct dasd_block *); static int dasd_flush_block_queue(struct dasd_block *); static void dasd_device_tasklet(unsigned long); @@ -120,9 +119,18 @@ struct dasd_device *dasd_alloc_device(void) kfree(device); return ERR_PTR(-ENOMEM); } + /* Get two pages for ese format. */ + device->ese_mem = (void *)__get_free_pages(GFP_ATOMIC | GFP_DMA, 1); + if (!device->ese_mem) { + free_page((unsigned long) device->erp_mem); + free_pages((unsigned long) device->ccw_mem, 1); + kfree(device); + return ERR_PTR(-ENOMEM); + } dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); + dasd_init_chunklist(&device->ese_chunks, device->ese_mem, PAGE_SIZE * 2); spin_lock_init(&device->mem_lock); atomic_set(&device->tasklet_scheduled, 0); tasklet_init(&device->tasklet, dasd_device_tasklet, @@ -146,6 +154,7 @@ struct dasd_device *dasd_alloc_device(void) void dasd_free_device(struct dasd_device *device) { kfree(device->private); + free_pages((unsigned long) device->ese_mem, 1); free_page((unsigned long) device->erp_mem); free_pages((unsigned long) device->ccw_mem, 1); kfree(device); @@ -348,7 +357,8 @@ static int dasd_state_basic_to_ready(struct dasd_device *device) } return rc; } - dasd_setup_queue(block); + if (device->discipline->setup_blk_queue) + device->discipline->setup_blk_queue(block); set_capacity(block->gdp, block->blocks << block->s2b_shift); device->state = DASD_STATE_READY; @@ -1258,6 +1268,49 @@ struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, int datasize, } EXPORT_SYMBOL(dasd_smalloc_request); +struct dasd_ccw_req *dasd_fmalloc_request(int magic, int cplength, + int datasize, + struct dasd_device *device) +{ + struct dasd_ccw_req *cqr; + unsigned long flags; + int size, cqr_size; + char *data; + + cqr_size = (sizeof(*cqr) + 7L) & -8L; + size = cqr_size; + if (cplength > 0) + size += cplength * sizeof(struct ccw1); + if (datasize > 0) + size += datasize; + + spin_lock_irqsave(&device->mem_lock, flags); + cqr = dasd_alloc_chunk(&device->ese_chunks, size); + spin_unlock_irqrestore(&device->mem_lock, flags); + if (!cqr) + return ERR_PTR(-ENOMEM); + memset(cqr, 0, sizeof(*cqr)); + data = (char *)cqr + cqr_size; + cqr->cpaddr = NULL; + if (cplength > 0) { + cqr->cpaddr = data; + data += cplength * sizeof(struct ccw1); + memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1)); + } + cqr->data = NULL; + if (datasize > 0) { + cqr->data = data; + memset(cqr->data, 0, datasize); + } + + cqr->magic = magic; + set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); + dasd_get_device(device); + + return cqr; +} +EXPORT_SYMBOL(dasd_fmalloc_request); + void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) { unsigned long flags; @@ -1269,6 +1322,17 @@ void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) } EXPORT_SYMBOL(dasd_sfree_request); +void dasd_ffree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) +{ + unsigned long flags; + + spin_lock_irqsave(&device->mem_lock, flags); + dasd_free_chunk(&device->ese_chunks, cqr); + spin_unlock_irqrestore(&device->mem_lock, flags); + dasd_put_device(device); +} +EXPORT_SYMBOL(dasd_ffree_request); + /* * Check discipline magic in cqr. */ @@ -1573,13 +1637,43 @@ static int dasd_check_hpf_error(struct irb *irb) irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX)); } +static int dasd_ese_needs_format(struct dasd_block *block, struct irb *irb) +{ + struct dasd_device *device = NULL; + u8 *sense = NULL; + + if (!block) + return 0; + device = block->base; + if (!device || !device->discipline->is_ese) + return 0; + if (!device->discipline->is_ese(device)) + return 0; + + sense = dasd_get_sense(irb); + if (!sense) + return 0; + + return !!(sense[1] & SNS1_NO_REC_FOUND) || + !!(sense[1] & SNS1_FILE_PROTECTED) || + scsw_cstat(&irb->scsw) == SCHN_STAT_INCORR_LEN; +} + +static int dasd_ese_oos_cond(u8 *sense) +{ + return sense[0] & SNS0_EQUIPMENT_CHECK && + sense[1] & SNS1_PERM_ERR && + sense[1] & SNS1_WRITE_INHIBITED && + sense[25] == 0x01; +} + /* * Interrupt handler for "normal" ssch-io based dasd devices. */ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) { - struct dasd_ccw_req *cqr, *next; + struct dasd_ccw_req *cqr, *next, *fcqr; struct dasd_device *device; unsigned long now; int nrf_suppressed = 0; @@ -1641,6 +1735,17 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags); nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) && test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); + + /* + * Extent pool probably out-of-space. + * Stop device and check exhaust level. + */ + if (dasd_ese_oos_cond(sense)) { + dasd_generic_space_exhaust(device, cqr); + device->discipline->ext_pool_exhaust(device, cqr); + dasd_put_device(device); + return; + } } if (!(fp_suppressed || nrf_suppressed)) device->discipline->dump_sense_dbf(device, irb, "int"); @@ -1672,6 +1777,31 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, return; } + if (dasd_ese_needs_format(cqr->block, irb)) { + if (rq_data_dir((struct request *)cqr->callback_data) == READ) { + device->discipline->ese_read(cqr); + cqr->status = DASD_CQR_SUCCESS; + cqr->stopclk = now; + dasd_device_clear_timer(device); + dasd_schedule_device_bh(device); + return; + } + fcqr = device->discipline->ese_format(device, cqr); + if (IS_ERR(fcqr)) { + /* + * If we can't format now, let the request go + * one extra round. Maybe we can format later. + */ + cqr->status = DASD_CQR_QUEUED; + } else { + fcqr->status = DASD_CQR_QUEUED; + cqr->status = DASD_CQR_QUEUED; + list_add(&fcqr->devlist, &device->ccw_queue); + dasd_schedule_device_bh(device); + return; + } + } + /* Check for clear pending */ if (cqr->status == DASD_CQR_CLEAR_PENDING && scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) { @@ -1910,7 +2040,7 @@ static void __dasd_device_check_expire(struct dasd_device *device) static int __dasd_device_is_unusable(struct dasd_device *device, struct dasd_ccw_req *cqr) { - int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM); + int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM | DASD_STOPPED_NOSPC); if (test_bit(DASD_FLAG_OFFLINE, &device->flags) && !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { @@ -2412,6 +2542,15 @@ int dasd_sleep_on_queue(struct list_head *ccw_queue) EXPORT_SYMBOL(dasd_sleep_on_queue); /* + * Start requests from a ccw_queue and wait interruptible for their completion. + */ +int dasd_sleep_on_queue_interruptible(struct list_head *ccw_queue) +{ + return _dasd_sleep_on_queue(ccw_queue, 1); +} +EXPORT_SYMBOL(dasd_sleep_on_queue_interruptible); + +/* * Queue a request to the tail of the device ccw_queue and wait * interruptible for it's completion. */ @@ -3130,55 +3269,6 @@ static int dasd_alloc_queue(struct dasd_block *block) } /* - * Allocate and initialize request queue. - */ -static void dasd_setup_queue(struct dasd_block *block) -{ - unsigned int logical_block_size = block->bp_block; - struct request_queue *q = block->request_queue; - unsigned int max_bytes, max_discard_sectors; - int max; - - if (block->base->features & DASD_FEATURE_USERAW) { - /* - * the max_blocks value for raw_track access is 256 - * it is higher than the native ECKD value because we - * only need one ccw per track - * so the max_hw_sectors are - * 2048 x 512B = 1024kB = 16 tracks - */ - max = 2048; - } else { - max = block->base->discipline->max_blocks << block->s2b_shift; - } - blk_queue_flag_set(QUEUE_FLAG_NONROT, q); - q->limits.max_dev_sectors = max; - blk_queue_logical_block_size(q, logical_block_size); - blk_queue_max_hw_sectors(q, max); - blk_queue_max_segments(q, USHRT_MAX); - /* with page sized segments we can translate each segement into - * one idaw/tidaw - */ - blk_queue_max_segment_size(q, PAGE_SIZE); - blk_queue_segment_boundary(q, PAGE_SIZE - 1); - - /* Only activate blocklayer discard support for devices that support it */ - if (block->base->features & DASD_FEATURE_DISCARD) { - q->limits.discard_granularity = logical_block_size; - q->limits.discard_alignment = PAGE_SIZE; - - /* Calculate max_discard_sectors and make it PAGE aligned */ - max_bytes = USHRT_MAX * logical_block_size; - max_bytes = ALIGN(max_bytes, PAGE_SIZE) - PAGE_SIZE; - max_discard_sectors = max_bytes / logical_block_size; - - blk_queue_max_discard_sectors(q, max_discard_sectors); - blk_queue_max_write_zeroes_sectors(q, max_discard_sectors); - blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); - } -} - -/* * Deactivate and free request queue. */ static void dasd_free_queue(struct dasd_block *block) @@ -3806,6 +3896,43 @@ int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm) } EXPORT_SYMBOL_GPL(dasd_generic_verify_path); +void dasd_generic_space_exhaust(struct dasd_device *device, + struct dasd_ccw_req *cqr) +{ + dasd_eer_write(device, NULL, DASD_EER_NOSPC); + + if (device->state < DASD_STATE_BASIC) + return; + + if (cqr->status == DASD_CQR_IN_IO || + cqr->status == DASD_CQR_CLEAR_PENDING) { + cqr->status = DASD_CQR_QUEUED; + cqr->retries++; + } + dasd_device_set_stop_bits(device, DASD_STOPPED_NOSPC); + dasd_device_clear_timer(device); + dasd_schedule_device_bh(device); +} +EXPORT_SYMBOL_GPL(dasd_generic_space_exhaust); + +void dasd_generic_space_avail(struct dasd_device *device) +{ + dev_info(&device->cdev->dev, "Extent pool space is available\n"); + DBF_DEV_EVENT(DBF_WARNING, device, "%s", "space available"); + + dasd_device_remove_stop_bits(device, DASD_STOPPED_NOSPC); + dasd_schedule_device_bh(device); + + if (device->block) { + dasd_schedule_block_bh(device->block); + if (device->block->request_queue) + blk_mq_run_hw_queues(device->block->request_queue, true); + } + if (!device->stopped) + wake_up(&generic_waitq); +} +EXPORT_SYMBOL_GPL(dasd_generic_space_avail); + /* * clear active requests and requeue them to block layer if possible */ diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index 245f33c2f71e..32fc51341d99 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c @@ -1642,6 +1642,35 @@ static DEVICE_ATTR(path_interval, 0644, dasd_path_interval_show, dasd_path_interval_store); +#define DASD_DEFINE_ATTR(_name, _func) \ +static ssize_t dasd_##_name##_show(struct device *dev, \ + struct device_attribute *attr, \ + char *buf) \ +{ \ + struct ccw_device *cdev = to_ccwdev(dev); \ + struct dasd_device *device = dasd_device_from_cdev(cdev); \ + int val = 0; \ + \ + if (IS_ERR(device)) \ + return -ENODEV; \ + if (device->discipline && _func) \ + val = _func(device); \ + dasd_put_device(device); \ + \ + return snprintf(buf, PAGE_SIZE, "%d\n", val); \ +} \ +static DEVICE_ATTR(_name, 0444, dasd_##_name##_show, NULL); \ + +DASD_DEFINE_ATTR(ese, device->discipline->is_ese); +DASD_DEFINE_ATTR(extent_size, device->discipline->ext_size); +DASD_DEFINE_ATTR(pool_id, device->discipline->ext_pool_id); +DASD_DEFINE_ATTR(space_configured, device->discipline->space_configured); +DASD_DEFINE_ATTR(space_allocated, device->discipline->space_allocated); +DASD_DEFINE_ATTR(logical_capacity, device->discipline->logical_capacity); +DASD_DEFINE_ATTR(warn_threshold, device->discipline->ext_pool_warn_thrshld); +DASD_DEFINE_ATTR(cap_at_warnlevel, device->discipline->ext_pool_cap_at_warnlevel); +DASD_DEFINE_ATTR(pool_oos, device->discipline->ext_pool_oos); + static struct attribute * dasd_attrs[] = { &dev_attr_readonly.attr, &dev_attr_discipline.attr, @@ -1667,6 +1696,7 @@ static struct attribute * dasd_attrs[] = { &dev_attr_path_interval.attr, &dev_attr_path_reset.attr, &dev_attr_hpf.attr, + &dev_attr_ese.attr, NULL, }; @@ -1674,6 +1704,39 @@ static const struct attribute_group dasd_attr_group = { .attrs = dasd_attrs, }; +static struct attribute *capacity_attrs[] = { + &dev_attr_space_configured.attr, + &dev_attr_space_allocated.attr, + &dev_attr_logical_capacity.attr, + NULL, +}; + +static const struct attribute_group capacity_attr_group = { + .name = "capacity", + .attrs = capacity_attrs, +}; + +static struct attribute *ext_pool_attrs[] = { + &dev_attr_pool_id.attr, + &dev_attr_extent_size.attr, + &dev_attr_warn_threshold.attr, + &dev_attr_cap_at_warnlevel.attr, + &dev_attr_pool_oos.attr, + NULL, +}; + +static const struct attribute_group ext_pool_attr_group = { + .name = "extent_pool", + .attrs = ext_pool_attrs, +}; + +static const struct attribute_group *dasd_attr_groups[] = { + &dasd_attr_group, + &capacity_attr_group, + &ext_pool_attr_group, + NULL, +}; + /* * Return value of the specified feature. */ @@ -1715,16 +1778,15 @@ dasd_set_feature(struct ccw_device *cdev, int feature, int flag) EXPORT_SYMBOL(dasd_set_feature); -int -dasd_add_sysfs_files(struct ccw_device *cdev) +int dasd_add_sysfs_files(struct ccw_device *cdev) { - return sysfs_create_group(&cdev->dev.kobj, &dasd_attr_group); + return sysfs_create_groups(&cdev->dev.kobj, dasd_attr_groups); } void dasd_remove_sysfs_files(struct ccw_device *cdev) { - sysfs_remove_group(&cdev->dev.kobj, &dasd_attr_group); + sysfs_remove_groups(&cdev->dev.kobj, dasd_attr_groups); } diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c index e1fe02477ea8..8d4971645cf1 100644 --- a/drivers/s390/block/dasd_diag.c +++ b/drivers/s390/block/dasd_diag.c @@ -615,14 +615,34 @@ dasd_diag_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req, "dump sense not available for DIAG data"); } +/* + * Initialize block layer request queue. + */ +static void dasd_diag_setup_blk_queue(struct dasd_block *block) +{ + unsigned int logical_block_size = block->bp_block; + struct request_queue *q = block->request_queue; + int max; + + max = DIAG_MAX_BLOCKS << block->s2b_shift; + blk_queue_flag_set(QUEUE_FLAG_NONROT, q); + q->limits.max_dev_sectors = max; + blk_queue_logical_block_size(q, logical_block_size); + blk_queue_max_hw_sectors(q, max); + blk_queue_max_segments(q, USHRT_MAX); + /* With page sized segments each segment can be translated into one idaw/tidaw */ + blk_queue_max_segment_size(q, PAGE_SIZE); + blk_queue_segment_boundary(q, PAGE_SIZE - 1); +} + static struct dasd_discipline dasd_diag_discipline = { .owner = THIS_MODULE, .name = "DIAG", .ebcname = "DIAG", - .max_blocks = DIAG_MAX_BLOCKS, .check_device = dasd_diag_check_device, .verify_path = dasd_generic_verify_path, .fill_geometry = dasd_diag_fill_geometry, + .setup_blk_queue = dasd_diag_setup_blk_queue, .start_IO = dasd_start_diag, .term_IO = dasd_diag_term_IO, .handle_terminated_request = dasd_diag_handle_terminated_request, diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index c09039eea707..fc53e1e221f0 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -42,20 +42,6 @@ #endif /* PRINTK_HEADER */ #define PRINTK_HEADER "dasd(eckd):" -#define ECKD_C0(i) (i->home_bytes) -#define ECKD_F(i) (i->formula) -#define ECKD_F1(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f1):\ - (i->factors.f_0x02.f1)) -#define ECKD_F2(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f2):\ - (i->factors.f_0x02.f2)) -#define ECKD_F3(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f3):\ - (i->factors.f_0x02.f3)) -#define ECKD_F4(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f4):0) -#define ECKD_F5(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f5):0) -#define ECKD_F6(i) (i->factor6) -#define ECKD_F7(i) (i->factor7) -#define ECKD_F8(i) (i->factor8) - /* * raw track access always map to 64k in memory * so it maps to 16 blocks of 4k per track @@ -103,6 +89,19 @@ static struct { } *dasd_reserve_req; static DEFINE_MUTEX(dasd_reserve_mutex); +static struct { + struct dasd_ccw_req cqr; + struct ccw1 ccw[2]; + char data[40]; +} *dasd_vol_info_req; +static DEFINE_MUTEX(dasd_vol_info_mutex); + +struct ext_pool_exhaust_work_data { + struct work_struct worker; + struct dasd_device *device; + struct dasd_device *base; +}; + /* definitions for the path verification worker */ struct path_verification_work_data { struct work_struct worker; @@ -122,6 +121,7 @@ struct check_attention_work_data { __u8 lpum; }; +static int dasd_eckd_ext_pool_id(struct dasd_device *); static int prepare_itcw(struct itcw *, unsigned int, unsigned int, int, struct dasd_device *, struct dasd_device *, unsigned int, int, unsigned int, unsigned int, @@ -157,17 +157,10 @@ static const int sizes_trk0[] = { 28, 148, 84 }; #define LABEL_SIZE 140 /* head and record addresses of count_area read in analysis ccw */ -static const int count_area_head[] = { 0, 0, 0, 0, 2 }; +static const int count_area_head[] = { 0, 0, 0, 0, 1 }; static const int count_area_rec[] = { 1, 2, 3, 4, 1 }; static inline unsigned int -round_up_multiple(unsigned int no, unsigned int mult) -{ - int rem = no % mult; - return (rem ? no - rem + mult : no); -} - -static inline unsigned int ceil_quot(unsigned int d1, unsigned int d2) { return (d1 + (d2 - 1)) / d2; @@ -1491,6 +1484,311 @@ static int dasd_eckd_read_features(struct dasd_device *device) return rc; } +/* Read Volume Information - Volume Storage Query */ +static int dasd_eckd_read_vol_info(struct dasd_device *device) +{ + struct dasd_eckd_private *private = device->private; + struct dasd_psf_prssd_data *prssdp; + struct dasd_rssd_vsq *vsq; + struct dasd_ccw_req *cqr; + struct ccw1 *ccw; + int useglobal; + int rc; + + /* This command cannot be executed on an alias device */ + if (private->uid.type == UA_BASE_PAV_ALIAS || + private->uid.type == UA_HYPER_PAV_ALIAS) + return 0; + + useglobal = 0; + cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */, + sizeof(*prssdp) + sizeof(*vsq), device, NULL); + if (IS_ERR(cqr)) { + DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", + "Could not allocate initialization request"); + mutex_lock(&dasd_vol_info_mutex); + useglobal = 1; + cqr = &dasd_vol_info_req->cqr; + memset(cqr, 0, sizeof(*cqr)); + memset(dasd_vol_info_req, 0, sizeof(*dasd_vol_info_req)); + cqr->cpaddr = &dasd_vol_info_req->ccw; + cqr->data = &dasd_vol_info_req->data; + cqr->magic = DASD_ECKD_MAGIC; + } + + /* Prepare for Read Subsystem Data */ + prssdp = cqr->data; + prssdp->order = PSF_ORDER_PRSSD; + prssdp->suborder = PSF_SUBORDER_VSQ; /* Volume Storage Query */ + prssdp->lss = private->ned->ID; + prssdp->volume = private->ned->unit_addr; + + ccw = cqr->cpaddr; + ccw->cmd_code = DASD_ECKD_CCW_PSF; + ccw->count = sizeof(*prssdp); + ccw->flags |= CCW_FLAG_CC; + ccw->cda = (__u32)(addr_t)prssdp; + + /* Read Subsystem Data - Volume Storage Query */ + vsq = (struct dasd_rssd_vsq *)(prssdp + 1); + memset(vsq, 0, sizeof(*vsq)); + + ccw++; + ccw->cmd_code = DASD_ECKD_CCW_RSSD; + ccw->count = sizeof(*vsq); + ccw->flags |= CCW_FLAG_SLI; + ccw->cda = (__u32)(addr_t)vsq; + + cqr->buildclk = get_tod_clock(); + cqr->status = DASD_CQR_FILLED; + cqr->startdev = device; + cqr->memdev = device; + cqr->block = NULL; + cqr->retries = 256; + cqr->expires = device->default_expires * HZ; + /* The command might not be supported. Suppress the error output */ + __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags); + + rc = dasd_sleep_on_interruptible(cqr); + if (rc == 0) { + memcpy(&private->vsq, vsq, sizeof(*vsq)); + } else { + dev_warn(&device->cdev->dev, + "Reading the volume storage information failed with rc=%d\n", rc); + } + + if (useglobal) + mutex_unlock(&dasd_vol_info_mutex); + else + dasd_sfree_request(cqr, cqr->memdev); + + return rc; +} + +static int dasd_eckd_is_ese(struct dasd_device *device) +{ + struct dasd_eckd_private *private = device->private; + + return private->vsq.vol_info.ese; +} + +static int dasd_eckd_ext_pool_id(struct dasd_device *device) +{ + struct dasd_eckd_private *private = device->private; + + return private->vsq.extent_pool_id; +} + +/* + * This value represents the total amount of available space. As more space is + * allocated by ESE volumes, this value will decrease. + * The data for this value is therefore updated on any call. + */ +static int dasd_eckd_space_configured(struct dasd_device *device) +{ + struct dasd_eckd_private *private = device->private; + int rc; + + rc = dasd_eckd_read_vol_info(device); + + return rc ? : private->vsq.space_configured; +} + +/* + * The value of space allocated by an ESE volume may have changed and is + * therefore updated on any call. + */ +static int dasd_eckd_space_allocated(struct dasd_device *device) +{ + struct dasd_eckd_private *private = device->private; + int rc; + + rc = dasd_eckd_read_vol_info(device); + + return rc ? : private->vsq.space_allocated; +} + +static int dasd_eckd_logical_capacity(struct dasd_device *device) +{ + struct dasd_eckd_private *private = device->private; + + return private->vsq.logical_capacity; +} + +static void dasd_eckd_ext_pool_exhaust_work(struct work_struct *work) +{ + struct ext_pool_exhaust_work_data *data; + struct dasd_device *device; + struct dasd_device *base; + + data = container_of(work, struct ext_pool_exhaust_work_data, worker); + device = data->device; + base = data->base; + + if (!base) + base = device; + if (dasd_eckd_space_configured(base) != 0) { + dasd_generic_space_avail(device); + } else { + dev_warn(&device->cdev->dev, "No space left in the extent pool\n"); + DBF_DEV_EVENT(DBF_WARNING, device, "%s", "out of space"); + } + + dasd_put_device(device); + kfree(data); +} + +static int dasd_eckd_ext_pool_exhaust(struct dasd_device *device, + struct dasd_ccw_req *cqr) +{ + struct ext_pool_exhaust_work_data *data; + + data = kzalloc(sizeof(*data), GFP_ATOMIC); + if (!data) + return -ENOMEM; + INIT_WORK(&data->worker, dasd_eckd_ext_pool_exhaust_work); + dasd_get_device(device); + data->device = device; + + if (cqr->block) + data->base = cqr->block->base; + else if (cqr->basedev) + data->base = cqr->basedev; + else + data->base = NULL; + + schedule_work(&data->worker); + + return 0; +} + +static void dasd_eckd_cpy_ext_pool_data(struct dasd_device *device, + struct dasd_rssd_lcq *lcq) +{ + struct dasd_eckd_private *private = device->private; + int pool_id = dasd_eckd_ext_pool_id(device); + struct dasd_ext_pool_sum eps; + int i; + + for (i = 0; i < lcq->pool_count; i++) { + eps = lcq->ext_pool_sum[i]; + if (eps.pool_id == pool_id) { + memcpy(&private->eps, &eps, + sizeof(struct dasd_ext_pool_sum)); + } + } +} + +/* Read Extent Pool Information - Logical Configuration Query */ +static int dasd_eckd_read_ext_pool_info(struct dasd_device *device) +{ + struct dasd_eckd_private *private = device->private; + struct dasd_psf_prssd_data *prssdp; + struct dasd_rssd_lcq *lcq; + struct dasd_ccw_req *cqr; + struct ccw1 *ccw; + int rc; + + /* This command cannot be executed on an alias device */ + if (private->uid.type == UA_BASE_PAV_ALIAS || + private->uid.type == UA_HYPER_PAV_ALIAS) + return 0; + + cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */, + sizeof(*prssdp) + sizeof(*lcq), device, NULL); + if (IS_ERR(cqr)) { + DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", + "Could not allocate initialization request"); + return PTR_ERR(cqr); + } + + /* Prepare for Read Subsystem Data */ + prssdp = cqr->data; + memset(prssdp, 0, sizeof(*prssdp)); + prssdp->order = PSF_ORDER_PRSSD; + prssdp->suborder = PSF_SUBORDER_LCQ; /* Logical Configuration Query */ + + ccw = cqr->cpaddr; + ccw->cmd_code = DASD_ECKD_CCW_PSF; + ccw->count = sizeof(*prssdp); + ccw->flags |= CCW_FLAG_CC; + ccw->cda = (__u32)(addr_t)prssdp; + + lcq = (struct dasd_rssd_lcq *)(prssdp + 1); + memset(lcq, 0, sizeof(*lcq)); + + ccw++; + ccw->cmd_code = DASD_ECKD_CCW_RSSD; + ccw->count = sizeof(*lcq); + ccw->flags |= CCW_FLAG_SLI; + ccw->cda = (__u32)(addr_t)lcq; + + cqr->buildclk = get_tod_clock(); + cqr->status = DASD_CQR_FILLED; + cqr->startdev = device; + cqr->memdev = device; + cqr->block = NULL; + cqr->retries = 256; + cqr->expires = device->default_expires * HZ; + /* The command might not be supported. Suppress the error output */ + __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags); + + rc = dasd_sleep_on_interruptible(cqr); + if (rc == 0) { + dasd_eckd_cpy_ext_pool_data(device, lcq); + } else { + dev_warn(&device->cdev->dev, + "Reading the logical configuration failed with rc=%d\n", rc); + } + + dasd_sfree_request(cqr, cqr->memdev); + + return rc; +} + +/* + * Depending on the device type, the extent size is specified either as + * cylinders per extent (CKD) or size per extent (FBA) + * A 1GB size corresponds to 1113cyl, and 16MB to 21cyl. + */ +static int dasd_eckd_ext_size(struct dasd_device *device) +{ + struct dasd_eckd_private *private = device->private; + struct dasd_ext_pool_sum eps = private->eps; + + if (!eps.flags.extent_size_valid) + return 0; + if (eps.extent_size.size_1G) + return 1113; + if (eps.extent_size.size_16M) + return 21; + + return 0; +} + +static int dasd_eckd_ext_pool_warn_thrshld(struct dasd_device *device) +{ + struct dasd_eckd_private *private = device->private; + + return private->eps.warn_thrshld; +} + +static int dasd_eckd_ext_pool_cap_at_warnlevel(struct dasd_device *device) +{ + struct dasd_eckd_private *private = device->private; + + return private->eps.flags.capacity_at_warnlevel; +} + +/* + * Extent Pool out of space + */ +static int dasd_eckd_ext_pool_oos(struct dasd_device *device) +{ + struct dasd_eckd_private *private = device->private; + + return private->eps.flags.pool_oos; +} /* * Build CP for Perform Subsystem Function - SSC. @@ -1721,6 +2019,16 @@ dasd_eckd_check_characteristics(struct dasd_device *device) /* Read Feature Codes */ dasd_eckd_read_features(device); + /* Read Volume Information */ + rc = dasd_eckd_read_vol_info(device); + if (rc) + goto out_err3; + + /* Read Extent Pool Information */ + rc = dasd_eckd_read_ext_pool_info(device); + if (rc) + goto out_err3; + /* Read Device Characteristics */ rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC, &private->rdc_data, 64); @@ -1751,6 +2059,9 @@ dasd_eckd_check_characteristics(struct dasd_device *device) if (readonly) set_bit(DASD_FLAG_DEVICE_RO, &device->flags); + if (dasd_eckd_is_ese(device)) + dasd_set_feature(device->cdev, DASD_FEATURE_DISCARD, 1); + dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) " "with %d cylinders, %d heads, %d sectors%s\n", private->rdc_data.dev_type, @@ -1823,8 +2134,8 @@ dasd_eckd_analysis_ccw(struct dasd_device *device) if (IS_ERR(cqr)) return cqr; ccw = cqr->cpaddr; - /* Define extent for the first 3 tracks. */ - define_extent(ccw++, cqr->data, 0, 2, + /* Define extent for the first 2 tracks. */ + define_extent(ccw++, cqr->data, 0, 1, DASD_ECKD_CCW_READ_COUNT, device, 0); LO_data = cqr->data + sizeof(struct DE_eckd_data); /* Locate record for the first 4 records on track 0. */ @@ -1843,9 +2154,9 @@ dasd_eckd_analysis_ccw(struct dasd_device *device) count_data++; } - /* Locate record for the first record on track 2. */ + /* Locate record for the first record on track 1. */ ccw[-1].flags |= CCW_FLAG_CC; - locate_record(ccw++, LO_data++, 2, 0, 1, + locate_record(ccw++, LO_data++, 1, 0, 1, DASD_ECKD_CCW_READ_COUNT, device, 0); /* Read count ccw. */ ccw[-1].flags |= CCW_FLAG_CC; @@ -1860,6 +2171,9 @@ dasd_eckd_analysis_ccw(struct dasd_device *device) cqr->retries = 255; cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; + /* Set flags to suppress output for expected errors */ + set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); + return cqr; } @@ -1967,7 +2281,7 @@ static int dasd_eckd_end_analysis(struct dasd_block *block) } } if (i == 3) - count_area = &private->count_area[4]; + count_area = &private->count_area[3]; if (private->uses_cdl == 0) { for (i = 0; i < 5; i++) { @@ -2099,8 +2413,7 @@ dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata, */ itcw_size = itcw_calc_size(0, count, 0); - cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev, - NULL); + cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev); if (IS_ERR(cqr)) return cqr; @@ -2193,8 +2506,7 @@ dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata, } cplength += count; - cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, - startdev, NULL); + cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev); if (IS_ERR(cqr)) return cqr; @@ -2241,13 +2553,11 @@ dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata, } static struct dasd_ccw_req * -dasd_eckd_build_format(struct dasd_device *base, - struct format_data_t *fdata, - int enable_pav) +dasd_eckd_build_format(struct dasd_device *base, struct dasd_device *startdev, + struct format_data_t *fdata, int enable_pav) { struct dasd_eckd_private *base_priv; struct dasd_eckd_private *start_priv; - struct dasd_device *startdev = NULL; struct dasd_ccw_req *fcp; struct eckd_count *ect; struct ch_t address; @@ -2338,9 +2648,8 @@ dasd_eckd_build_format(struct dasd_device *base, fdata->intensity); return ERR_PTR(-EINVAL); } - /* Allocate the format ccw request. */ - fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, - datasize, startdev, NULL); + + fcp = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev); if (IS_ERR(fcp)) return fcp; @@ -2513,7 +2822,7 @@ dasd_eckd_format_build_ccw_req(struct dasd_device *base, struct dasd_ccw_req *ccw_req; if (!fmt_buffer) { - ccw_req = dasd_eckd_build_format(base, fdata, enable_pav); + ccw_req = dasd_eckd_build_format(base, NULL, fdata, enable_pav); } else { if (tpm) ccw_req = dasd_eckd_build_check_tcw(base, fdata, @@ -2659,7 +2968,7 @@ out_err: rc = -EIO; } list_del_init(&cqr->blocklist); - dasd_sfree_request(cqr, device); + dasd_ffree_request(cqr, device); private->count--; } @@ -2699,6 +3008,96 @@ static int dasd_eckd_format_device(struct dasd_device *base, } /* + * Callback function to free ESE format requests. + */ +static void dasd_eckd_ese_format_cb(struct dasd_ccw_req *cqr, void *data) +{ + struct dasd_device *device = cqr->startdev; + struct dasd_eckd_private *private = device->private; + + private->count--; + dasd_ffree_request(cqr, device); +} + +static struct dasd_ccw_req * +dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr) +{ + struct dasd_eckd_private *private; + struct format_data_t fdata; + unsigned int recs_per_trk; + struct dasd_ccw_req *fcqr; + struct dasd_device *base; + struct dasd_block *block; + unsigned int blksize; + struct request *req; + sector_t first_trk; + sector_t last_trk; + int rc; + + req = cqr->callback_data; + base = cqr->block->base; + private = base->private; + block = base->block; + blksize = block->bp_block; + recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize); + + first_trk = blk_rq_pos(req) >> block->s2b_shift; + sector_div(first_trk, recs_per_trk); + last_trk = + (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; + sector_div(last_trk, recs_per_trk); + + fdata.start_unit = first_trk; + fdata.stop_unit = last_trk; + fdata.blksize = blksize; + fdata.intensity = private->uses_cdl ? DASD_FMT_INT_COMPAT : 0; + + rc = dasd_eckd_format_sanity_checks(base, &fdata); + if (rc) + return ERR_PTR(-EINVAL); + + /* + * We're building the request with PAV disabled as we're reusing + * the former startdev. + */ + fcqr = dasd_eckd_build_format(base, startdev, &fdata, 0); + if (IS_ERR(fcqr)) + return fcqr; + + fcqr->callback = dasd_eckd_ese_format_cb; + + return fcqr; +} + +/* + * When data is read from an unformatted area of an ESE volume, this function + * returns zeroed data and thereby mimics a read of zero data. + */ +static void dasd_eckd_ese_read(struct dasd_ccw_req *cqr) +{ + unsigned int blksize, off; + struct dasd_device *base; + struct req_iterator iter; + struct request *req; + struct bio_vec bv; + char *dst; + + req = (struct request *) cqr->callback_data; + base = cqr->block->base; + blksize = base->block->bp_block; + + rq_for_each_segment(bv, req, iter) { + dst = page_address(bv.bv_page) + bv.bv_offset; + for (off = 0; off < bv.bv_len; off += blksize) { + if (dst && rq_data_dir(req) == READ) { + dst += off; + memset(dst, 0, blksize); + } + } + } +} + +/* * Helper function to count consecutive records of a single track. */ static int dasd_eckd_count_records(struct eckd_count *fmt_buffer, int start, @@ -3033,6 +3432,277 @@ static void dasd_eckd_check_for_device_change(struct dasd_device *device, } } +static int dasd_eckd_ras_sanity_checks(struct dasd_device *device, + unsigned int first_trk, + unsigned int last_trk) +{ + struct dasd_eckd_private *private = device->private; + unsigned int trks_per_vol; + int rc = 0; + + trks_per_vol = private->real_cyl * private->rdc_data.trk_per_cyl; + + if (first_trk >= trks_per_vol) { + dev_warn(&device->cdev->dev, + "Start track number %u used in the space release command is too big\n", + first_trk); + rc = -EINVAL; + } else if (last_trk >= trks_per_vol) { + dev_warn(&device->cdev->dev, + "Stop track number %u used in the space release command is too big\n", + last_trk); + rc = -EINVAL; + } else if (first_trk > last_trk) { + dev_warn(&device->cdev->dev, + "Start track %u used in the space release command exceeds the end track\n", + first_trk); + rc = -EINVAL; + } + return rc; +} + +/* + * Helper function to count the amount of involved extents within a given range + * with extent alignment in mind. + */ +static int count_exts(unsigned int from, unsigned int to, int trks_per_ext) +{ + int cur_pos = 0; + int count = 0; + int tmp; + + if (from == to) + return 1; + + /* Count first partial extent */ + if (from % trks_per_ext != 0) { + tmp = from + trks_per_ext - (from % trks_per_ext) - 1; + if (tmp > to) + tmp = to; + cur_pos = tmp - from + 1; + count++; + } + /* Count full extents */ + if (to - (from + cur_pos) + 1 >= trks_per_ext) { + tmp = to - ((to - trks_per_ext + 1) % trks_per_ext); + count += (tmp - (from + cur_pos) + 1) / trks_per_ext; + cur_pos = tmp; + } + /* Count last partial extent */ + if (cur_pos < to) + count++; + + return count; +} + +/* + * Release allocated space for a given range or an entire volume. + */ +static struct dasd_ccw_req * +dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block, + struct request *req, unsigned int first_trk, + unsigned int last_trk, int by_extent) +{ + struct dasd_eckd_private *private = device->private; + struct dasd_dso_ras_ext_range *ras_range; + struct dasd_rssd_features *features; + struct dasd_dso_ras_data *ras_data; + u16 heads, beg_head, end_head; + int cur_to_trk, cur_from_trk; + struct dasd_ccw_req *cqr; + u32 beg_cyl, end_cyl; + struct ccw1 *ccw; + int trks_per_ext; + size_t ras_size; + size_t size; + int nr_exts; + void *rq; + int i; + + if (dasd_eckd_ras_sanity_checks(device, first_trk, last_trk)) + return ERR_PTR(-EINVAL); + + rq = req ? blk_mq_rq_to_pdu(req) : NULL; + + features = &private->features; + + trks_per_ext = dasd_eckd_ext_size(device) * private->rdc_data.trk_per_cyl; + nr_exts = 0; + if (by_extent) + nr_exts = count_exts(first_trk, last_trk, trks_per_ext); + ras_size = sizeof(*ras_data); + size = ras_size + (nr_exts * sizeof(*ras_range)); + + cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, size, device, rq); + if (IS_ERR(cqr)) { + DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", + "Could not allocate RAS request"); + return cqr; + } + + ras_data = cqr->data; + memset(ras_data, 0, size); + + ras_data->order = DSO_ORDER_RAS; + ras_data->flags.vol_type = 0; /* CKD volume */ + /* Release specified extents or entire volume */ + ras_data->op_flags.by_extent = by_extent; + /* + * This bit guarantees initialisation of tracks within an extent that is + * not fully specified, but is only supported with a certain feature + * subset. + */ + ras_data->op_flags.guarantee_init = !!(features->feature[56] & 0x01); + ras_data->lss = private->ned->ID; + ras_data->dev_addr = private->ned->unit_addr; + ras_data->nr_exts = nr_exts; + + if (by_extent) { + heads = private->rdc_data.trk_per_cyl; + cur_from_trk = first_trk; + cur_to_trk = first_trk + trks_per_ext - + (first_trk % trks_per_ext) - 1; + if (cur_to_trk > last_trk) + cur_to_trk = last_trk; + ras_range = (struct dasd_dso_ras_ext_range *)(cqr->data + ras_size); + + for (i = 0; i < nr_exts; i++) { + beg_cyl = cur_from_trk / heads; + beg_head = cur_from_trk % heads; + end_cyl = cur_to_trk / heads; + end_head = cur_to_trk % heads; + + set_ch_t(&ras_range->beg_ext, beg_cyl, beg_head); + set_ch_t(&ras_range->end_ext, end_cyl, end_head); + + cur_from_trk = cur_to_trk + 1; + cur_to_trk = cur_from_trk + trks_per_ext - 1; + if (cur_to_trk > last_trk) + cur_to_trk = last_trk; + ras_range++; + } + } + + ccw = cqr->cpaddr; + ccw->cda = (__u32)(addr_t)cqr->data; + ccw->cmd_code = DASD_ECKD_CCW_DSO; + ccw->count = size; + + cqr->startdev = device; + cqr->memdev = device; + cqr->block = block; + cqr->retries = 256; + cqr->expires = device->default_expires * HZ; + cqr->buildclk = get_tod_clock(); + cqr->status = DASD_CQR_FILLED; + + return cqr; +} + +static int dasd_eckd_release_space_full(struct dasd_device *device) +{ + struct dasd_ccw_req *cqr; + int rc; + + cqr = dasd_eckd_dso_ras(device, NULL, NULL, 0, 0, 0); + if (IS_ERR(cqr)) + return PTR_ERR(cqr); + + rc = dasd_sleep_on_interruptible(cqr); + + dasd_sfree_request(cqr, cqr->memdev); + + return rc; +} + +static int dasd_eckd_release_space_trks(struct dasd_device *device, + unsigned int from, unsigned int to) +{ + struct dasd_eckd_private *private = device->private; + struct dasd_block *block = device->block; + struct dasd_ccw_req *cqr, *n; + struct list_head ras_queue; + unsigned int device_exts; + int trks_per_ext; + int stop, step; + int cur_pos; + int rc = 0; + int retry; + + INIT_LIST_HEAD(&ras_queue); + + device_exts = private->real_cyl / dasd_eckd_ext_size(device); + trks_per_ext = dasd_eckd_ext_size(device) * private->rdc_data.trk_per_cyl; + + /* Make sure device limits are not exceeded */ + step = trks_per_ext * min(device_exts, DASD_ECKD_RAS_EXTS_MAX); + cur_pos = from; + + do { + retry = 0; + while (cur_pos < to) { + stop = cur_pos + step - + ((cur_pos + step) % trks_per_ext) - 1; + if (stop > to) + stop = to; + + cqr = dasd_eckd_dso_ras(device, NULL, NULL, cur_pos, stop, 1); + if (IS_ERR(cqr)) { + rc = PTR_ERR(cqr); + if (rc == -ENOMEM) { + if (list_empty(&ras_queue)) + goto out; + retry = 1; + break; + } + goto err_out; + } + + spin_lock_irq(&block->queue_lock); + list_add_tail(&cqr->blocklist, &ras_queue); + spin_unlock_irq(&block->queue_lock); + cur_pos = stop + 1; + } + + rc = dasd_sleep_on_queue_interruptible(&ras_queue); + +err_out: + list_for_each_entry_safe(cqr, n, &ras_queue, blocklist) { + device = cqr->startdev; + private = device->private; + + spin_lock_irq(&block->queue_lock); + list_del_init(&cqr->blocklist); + spin_unlock_irq(&block->queue_lock); + dasd_sfree_request(cqr, device); + private->count--; + } + } while (retry); + +out: + return rc; +} + +static int dasd_eckd_release_space(struct dasd_device *device, + struct format_data_t *rdata) +{ + if (rdata->intensity & DASD_FMT_INT_ESE_FULL) + return dasd_eckd_release_space_full(device); + else if (rdata->intensity == 0) + return dasd_eckd_release_space_trks(device, rdata->start_unit, + rdata->stop_unit); + else + return -EINVAL; +} + +static struct dasd_ccw_req * +dasd_eckd_build_cp_discard(struct dasd_device *device, struct dasd_block *block, + struct request *req, sector_t first_trk, + sector_t last_trk) +{ + return dasd_eckd_dso_ras(device, block, req, first_trk, last_trk, 1); +} + static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( struct dasd_device *startdev, struct dasd_block *block, @@ -3214,6 +3884,14 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( cqr->retries = startdev->default_retries; cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; + + /* Set flags to suppress output for expected errors */ + if (dasd_eckd_is_ese(basedev)) { + set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags); + set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags); + set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); + } + return cqr; } @@ -3385,6 +4063,11 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track( cqr->retries = startdev->default_retries; cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; + + /* Set flags to suppress output for expected errors */ + if (dasd_eckd_is_ese(basedev)) + set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); + return cqr; } @@ -3704,6 +4387,14 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track( cqr->retries = startdev->default_retries; cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; + + /* Set flags to suppress output for expected errors */ + if (dasd_eckd_is_ese(basedev)) { + set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags); + set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags); + set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); + } + return cqr; out_error: dasd_sfree_request(cqr, startdev); @@ -3756,6 +4447,10 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev, cmdwtd = private->features.feature[12] & 0x40; use_prefix = private->features.feature[8] & 0x01; + if (req_op(req) == REQ_OP_DISCARD) + return dasd_eckd_build_cp_discard(startdev, block, req, + first_trk, last_trk); + cqr = NULL; if (cdlspecial || dasd_page_cache) { /* do nothing, just fall through to the cmd mode single case */ @@ -4034,12 +4729,14 @@ static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base, struct dasd_block *block, struct request *req) { + struct dasd_device *startdev = NULL; struct dasd_eckd_private *private; - struct dasd_device *startdev; - unsigned long flags; struct dasd_ccw_req *cqr; + unsigned long flags; - startdev = dasd_alias_get_start_dev(base); + /* Discard requests can only be processed on base devices */ + if (req_op(req) != REQ_OP_DISCARD) + startdev = dasd_alias_get_start_dev(base); if (!startdev) startdev = base; private = startdev->private; @@ -4965,6 +5662,16 @@ static int dasd_eckd_restore_device(struct dasd_device *device) /* Read Feature Codes */ dasd_eckd_read_features(device); + /* Read Volume Information */ + rc = dasd_eckd_read_vol_info(device); + if (rc) + goto out_err2; + + /* Read Extent Pool Information */ + rc = dasd_eckd_read_ext_pool_info(device); + if (rc) + goto out_err2; + /* Read Device Characteristics */ rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC, &temp_rdc_data, 64); @@ -5635,6 +6342,73 @@ static void dasd_eckd_handle_cuir(struct dasd_device *device, void *messages, device->discipline->check_attention(device, lpum); } +static void dasd_eckd_oos_resume(struct dasd_device *device) +{ + struct dasd_eckd_private *private = device->private; + struct alias_pav_group *pavgroup, *tempgroup; + struct dasd_device *dev, *n; + unsigned long flags; + + spin_lock_irqsave(&private->lcu->lock, flags); + list_for_each_entry_safe(dev, n, &private->lcu->active_devices, + alias_list) { + if (dev->stopped & DASD_STOPPED_NOSPC) + dasd_generic_space_avail(dev); + } + list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices, + alias_list) { + if (dev->stopped & DASD_STOPPED_NOSPC) + dasd_generic_space_avail(dev); + } + /* devices in PAV groups */ + list_for_each_entry_safe(pavgroup, tempgroup, + &private->lcu->grouplist, + group) { + list_for_each_entry_safe(dev, n, &pavgroup->baselist, + alias_list) { + if (dev->stopped & DASD_STOPPED_NOSPC) + dasd_generic_space_avail(dev); + } + list_for_each_entry_safe(dev, n, &pavgroup->aliaslist, + alias_list) { + if (dev->stopped & DASD_STOPPED_NOSPC) + dasd_generic_space_avail(dev); + } + } + spin_unlock_irqrestore(&private->lcu->lock, flags); +} + +static void dasd_eckd_handle_oos(struct dasd_device *device, void *messages, + __u8 lpum) +{ + struct dasd_oos_message *oos = messages; + + switch (oos->code) { + case REPO_WARN: + case POOL_WARN: + dev_warn(&device->cdev->dev, + "Extent pool usage has reached a critical value\n"); + dasd_eckd_oos_resume(device); + break; + case REPO_EXHAUST: + case POOL_EXHAUST: + dev_warn(&device->cdev->dev, + "Extent pool is exhausted\n"); + break; + case REPO_RELIEVE: + case POOL_RELIEVE: + dev_info(&device->cdev->dev, + "Extent pool physical space constraint has been relieved\n"); + break; + } + + /* In any case, update related data */ + dasd_eckd_read_ext_pool_info(device); + + /* to make sure there is no attention left schedule work again */ + device->discipline->check_attention(device, lpum); +} + static void dasd_eckd_check_attention_work(struct work_struct *work) { struct check_attention_work_data *data; @@ -5653,9 +6427,14 @@ static void dasd_eckd_check_attention_work(struct work_struct *work) rc = dasd_eckd_read_message_buffer(device, messages, data->lpum); if (rc) goto out; + if (messages->length == ATTENTION_LENGTH_CUIR && messages->format == ATTENTION_FORMAT_CUIR) dasd_eckd_handle_cuir(device, messages, data->lpum); + if (messages->length == ATTENTION_LENGTH_OOS && + messages->format == ATTENTION_FORMAT_OOS) + dasd_eckd_handle_oos(device, messages, data->lpum); + out: dasd_put_device(device); kfree(messages); @@ -5734,6 +6513,72 @@ static void dasd_eckd_handle_hpf_error(struct dasd_device *device, dasd_schedule_requeue(device); } +/* + * Initialize block layer request queue. + */ +static void dasd_eckd_setup_blk_queue(struct dasd_block *block) +{ + unsigned int logical_block_size = block->bp_block; + struct request_queue *q = block->request_queue; + struct dasd_device *device = block->base; + struct dasd_eckd_private *private; + unsigned int max_discard_sectors; + unsigned int max_bytes; + unsigned int ext_bytes; /* Extent Size in Bytes */ + int recs_per_trk; + int trks_per_cyl; + int ext_limit; + int ext_size; /* Extent Size in Cylinders */ + int max; + + private = device->private; + trks_per_cyl = private->rdc_data.trk_per_cyl; + recs_per_trk = recs_per_track(&private->rdc_data, 0, logical_block_size); + + if (device->features & DASD_FEATURE_USERAW) { + /* + * the max_blocks value for raw_track access is 256 + * it is higher than the native ECKD value because we + * only need one ccw per track + * so the max_hw_sectors are + * 2048 x 512B = 1024kB = 16 tracks + */ + max = DASD_ECKD_MAX_BLOCKS_RAW << block->s2b_shift; + } else { + max = DASD_ECKD_MAX_BLOCKS << block->s2b_shift; + } + blk_queue_flag_set(QUEUE_FLAG_NONROT, q); + q->limits.max_dev_sectors = max; + blk_queue_logical_block_size(q, logical_block_size); + blk_queue_max_hw_sectors(q, max); + blk_queue_max_segments(q, USHRT_MAX); + /* With page sized segments each segment can be translated into one idaw/tidaw */ + blk_queue_max_segment_size(q, PAGE_SIZE); + blk_queue_segment_boundary(q, PAGE_SIZE - 1); + + if (dasd_eckd_is_ese(device)) { + /* + * Depending on the extent size, up to UINT_MAX bytes can be + * accepted. However, neither DASD_ECKD_RAS_EXTS_MAX nor the + * device limits should be exceeded. + */ + ext_size = dasd_eckd_ext_size(device); + ext_limit = min(private->real_cyl / ext_size, DASD_ECKD_RAS_EXTS_MAX); + ext_bytes = ext_size * trks_per_cyl * recs_per_trk * + logical_block_size; + max_bytes = UINT_MAX - (UINT_MAX % ext_bytes); + if (max_bytes / ext_bytes > ext_limit) + max_bytes = ext_bytes * ext_limit; + + max_discard_sectors = max_bytes / 512; + + blk_queue_max_discard_sectors(q, max_discard_sectors); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); + q->limits.discard_granularity = ext_bytes; + q->limits.discard_alignment = ext_bytes; + } +} + static struct ccw_driver dasd_eckd_driver = { .driver = { .name = "dasd-eckd", @@ -5754,24 +6599,10 @@ static struct ccw_driver dasd_eckd_driver = { .int_class = IRQIO_DAS, }; -/* - * max_blocks is dependent on the amount of storage that is available - * in the static io buffer for each device. Currently each device has - * 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has - * 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use - * up to 16 bytes (8 for the ccw and 8 for the idal pointer). In - * addition we have one define extent ccw + 16 bytes of data and one - * locate record ccw + 16 bytes of data. That makes: - * (8192 - 24 - 136 - 8 - 16 - 8 - 16) / 16 = 499 blocks at maximum. - * We want to fit two into the available memory so that we can immediately - * start the next request if one finishes off. That makes 249.5 blocks - * for one request. Give a little safety and the result is 240. - */ static struct dasd_discipline dasd_eckd_discipline = { .owner = THIS_MODULE, .name = "ECKD", .ebcname = "ECKD", - .max_blocks = 190, .check_device = dasd_eckd_check_characteristics, .uncheck_device = dasd_eckd_uncheck_device, .do_analysis = dasd_eckd_do_analysis, @@ -5779,6 +6610,7 @@ static struct dasd_discipline dasd_eckd_discipline = { .basic_to_ready = dasd_eckd_basic_to_ready, .online_to_ready = dasd_eckd_online_to_ready, .basic_to_known = dasd_eckd_basic_to_known, + .setup_blk_queue = dasd_eckd_setup_blk_queue, .fill_geometry = dasd_eckd_fill_geometry, .start_IO = dasd_start_IO, .term_IO = dasd_term_IO, @@ -5806,6 +6638,19 @@ static struct dasd_discipline dasd_eckd_discipline = { .disable_hpf = dasd_eckd_disable_hpf_device, .hpf_enabled = dasd_eckd_hpf_enabled, .reset_path = dasd_eckd_reset_path, + .is_ese = dasd_eckd_is_ese, + .space_allocated = dasd_eckd_space_allocated, + .space_configured = dasd_eckd_space_configured, + .logical_capacity = dasd_eckd_logical_capacity, + .release_space = dasd_eckd_release_space, + .ext_pool_id = dasd_eckd_ext_pool_id, + .ext_size = dasd_eckd_ext_size, + .ext_pool_cap_at_warnlevel = dasd_eckd_ext_pool_cap_at_warnlevel, + .ext_pool_warn_thrshld = dasd_eckd_ext_pool_warn_thrshld, + .ext_pool_oos = dasd_eckd_ext_pool_oos, + .ext_pool_exhaust = dasd_eckd_ext_pool_exhaust, + .ese_format = dasd_eckd_ese_format, + .ese_read = dasd_eckd_ese_read, }; static int __init @@ -5818,16 +6663,22 @@ dasd_eckd_init(void) GFP_KERNEL | GFP_DMA); if (!dasd_reserve_req) return -ENOMEM; + dasd_vol_info_req = kmalloc(sizeof(*dasd_vol_info_req), + GFP_KERNEL | GFP_DMA); + if (!dasd_vol_info_req) + return -ENOMEM; path_verification_worker = kmalloc(sizeof(*path_verification_worker), GFP_KERNEL | GFP_DMA); if (!path_verification_worker) { kfree(dasd_reserve_req); + kfree(dasd_vol_info_req); return -ENOMEM; } rawpadpage = (void *)__get_free_page(GFP_KERNEL); if (!rawpadpage) { kfree(path_verification_worker); kfree(dasd_reserve_req); + kfree(dasd_vol_info_req); return -ENOMEM; } ret = ccw_driver_register(&dasd_eckd_driver); @@ -5836,6 +6687,7 @@ dasd_eckd_init(void) else { kfree(path_verification_worker); kfree(dasd_reserve_req); + kfree(dasd_vol_info_req); free_page((unsigned long)rawpadpage); } return ret; diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h index 5869d2fede35..6943508d0f1d 100644 --- a/drivers/s390/block/dasd_eckd.h +++ b/drivers/s390/block/dasd_eckd.h @@ -50,16 +50,26 @@ #define DASD_ECKD_CCW_PFX_READ 0xEA #define DASD_ECKD_CCW_RSCK 0xF9 #define DASD_ECKD_CCW_RCD 0xFA +#define DASD_ECKD_CCW_DSO 0xF7 + +/* Define Subssystem Function / Orders */ +#define DSO_ORDER_RAS 0x81 /* - * Perform Subsystem Function / Sub-Orders + * Perform Subsystem Function / Orders */ #define PSF_ORDER_PRSSD 0x18 #define PSF_ORDER_CUIR_RESPONSE 0x1A -#define PSF_SUBORDER_QHA 0x1C #define PSF_ORDER_SSC 0x1D /* + * Perform Subsystem Function / Sub-Orders + */ +#define PSF_SUBORDER_QHA 0x1C /* Query Host Access */ +#define PSF_SUBORDER_VSQ 0x52 /* Volume Storage Query */ +#define PSF_SUBORDER_LCQ 0x53 /* Logical Configuration Query */ + +/* * CUIR response condition codes */ #define PSF_CUIR_INVALID 0x00 @@ -80,10 +90,22 @@ #define CUIR_RESUME 0x02 /* + * Out-of-space (OOS) Codes + */ +#define REPO_WARN 0x01 +#define REPO_EXHAUST 0x02 +#define POOL_WARN 0x03 +#define POOL_EXHAUST 0x04 +#define REPO_RELIEVE 0x05 +#define POOL_RELIEVE 0x06 + +/* * attention message definitions */ #define ATTENTION_LENGTH_CUIR 0x0e #define ATTENTION_FORMAT_CUIR 0x01 +#define ATTENTION_LENGTH_OOS 0x10 +#define ATTENTION_FORMAT_OOS 0x06 #define DASD_ECKD_PG_GROUPED 0x10 @@ -99,6 +121,12 @@ #define DASD_ECKD_PATH_THRHLD 256 #define DASD_ECKD_PATH_INTERVAL 300 +/* + * Maximum number of blocks to be chained + */ +#define DASD_ECKD_MAX_BLOCKS 190 +#define DASD_ECKD_MAX_BLOCKS_RAW 256 + /***************************************************************************** * SECTION: Type Definitions ****************************************************************************/ @@ -116,35 +144,12 @@ struct ch_t { __u16 head; } __attribute__ ((packed)); -struct chs_t { - __u16 cyl; - __u16 head; - __u32 sector; -} __attribute__ ((packed)); - struct chr_t { __u16 cyl; __u16 head; __u8 record; } __attribute__ ((packed)); -struct geom_t { - __u16 cyl; - __u16 head; - __u32 sector; -} __attribute__ ((packed)); - -struct eckd_home { - __u8 skip_control[14]; - __u16 cell_number; - __u8 physical_addr[3]; - __u8 flag; - struct ch_t track_addr; - __u8 reserved; - __u8 key_length; - __u8 reserved2[2]; -} __attribute__ ((packed)); - struct DE_eckd_data { struct { unsigned char perm:2; /* Permissions on this extent */ @@ -387,6 +392,86 @@ struct dasd_rssd_messages { char messages[4087]; } __packed; +/* + * Read Subsystem Data - Volume Storage Query + */ +struct dasd_rssd_vsq { + struct { + __u8 tse:1; + __u8 space_not_available:1; + __u8 ese:1; + __u8 unused:5; + } __packed vol_info; + __u8 unused1; + __u16 extent_pool_id; + __u8 warn_cap_limit; + __u8 warn_cap_guaranteed; + __u16 unused2; + __u32 limit_capacity; + __u32 guaranteed_capacity; + __u32 space_allocated; + __u32 space_configured; + __u32 logical_capacity; +} __packed; + +/* + * Extent Pool Summary + */ +struct dasd_ext_pool_sum { + __u16 pool_id; + __u8 repo_warn_thrshld; + __u8 warn_thrshld; + struct { + __u8 type:1; /* 0 - CKD / 1 - FB */ + __u8 track_space_efficient:1; + __u8 extent_space_efficient:1; + __u8 standard_volume:1; + __u8 extent_size_valid:1; + __u8 capacity_at_warnlevel:1; + __u8 pool_oos:1; + __u8 unused0:1; + __u8 unused1; + } __packed flags; + struct { + __u8 reserved0:1; + __u8 size_1G:1; + __u8 reserved1:5; + __u8 size_16M:1; + } __packed extent_size; + __u8 unused; +} __packed; + +/* + * Read Subsystem Data-Response - Logical Configuration Query - Header + */ +struct dasd_rssd_lcq { + __u16 data_length; /* Length of data returned */ + __u16 pool_count; /* Count of extent pools returned - Max: 448 */ + struct { + __u8 pool_info_valid:1; /* Detailed Information valid */ + __u8 pool_id_volume:1; + __u8 pool_id_cec:1; + __u8 unused0:5; + __u8 unused1; + } __packed header_flags; + char sfi_type[6]; /* Storage Facility Image Type (EBCDIC) */ + char sfi_model[3]; /* Storage Facility Image Model (EBCDIC) */ + __u8 sfi_seq_num[10]; /* Storage Facility Image Sequence Number */ + __u8 reserved[7]; + struct dasd_ext_pool_sum ext_pool_sum[448]; +} __packed; + +struct dasd_oos_message { + __u16 length; + __u8 format; + __u8 code; + __u8 percentage_empty; + __u8 reserved; + __u16 ext_pool_id; + __u16 token; + __u8 unused[6]; +} __packed; + struct dasd_cuir_message { __u16 length; __u8 format; @@ -461,6 +546,42 @@ struct dasd_psf_ssc_data { unsigned char reserved[59]; } __attribute__((packed)); +/* Maximum number of extents for a single Release Allocated Space command */ +#define DASD_ECKD_RAS_EXTS_MAX 110U + +struct dasd_dso_ras_ext_range { + struct ch_t beg_ext; + struct ch_t end_ext; +} __packed; + +/* + * Define Subsytem Operation - Release Allocated Space + */ +struct dasd_dso_ras_data { + __u8 order; + struct { + __u8 message:1; /* Must be zero */ + __u8 reserved1:2; + __u8 vol_type:1; /* 0 - CKD/FBA, 1 - FB */ + __u8 reserved2:4; + } __packed flags; + /* Operation Flags to specify scope */ + struct { + __u8 reserved1:2; + /* Release Space by Extent */ + __u8 by_extent:1; /* 0 - entire volume, 1 - specified extents */ + __u8 guarantee_init:1; + __u8 force_release:1; /* Internal - will be ignored */ + __u16 reserved2:11; + } __packed op_flags; + __u8 lss; + __u8 dev_addr; + __u32 reserved1; + __u8 reserved2[10]; + __u16 nr_exts; /* Defines number of ext_scope - max 110 */ + __u16 reserved3; +} __packed; + /* * some structures and definitions for alias handling @@ -551,6 +672,8 @@ struct dasd_eckd_private { int uses_cdl; struct attrib_data_t attrib; /* e.g. cache operations */ struct dasd_rssd_features features; + struct dasd_rssd_vsq vsq; + struct dasd_ext_pool_sum eps; u32 real_cyl; /* alias managemnet */ @@ -572,7 +695,5 @@ int dasd_alias_remove_device(struct dasd_device *); struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *); void dasd_alias_handle_summary_unit_check(struct work_struct *); void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *); -void dasd_alias_lcu_setup_complete(struct dasd_device *); -void dasd_alias_wait_for_lcu_setup(struct dasd_device *); int dasd_alias_update_add_device(struct dasd_device *); #endif /* DASD_ECKD_H */ diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c index 93bb09da7fdc..5ae64af9ccea 100644 --- a/drivers/s390/block/dasd_eer.c +++ b/drivers/s390/block/dasd_eer.c @@ -386,6 +386,7 @@ void dasd_eer_write(struct dasd_device *device, struct dasd_ccw_req *cqr, dasd_eer_write_standard_trigger(device, cqr, id); break; case DASD_EER_NOPATH: + case DASD_EER_NOSPC: dasd_eer_write_standard_trigger(device, NULL, id); break; case DASD_EER_STATECHANGE: diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index 56007a3e7f11..cbb770824226 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c @@ -770,27 +770,46 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req, } /* - * max_blocks is dependent on the amount of storage that is available - * in the static io buffer for each device. Currently each device has - * 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has - * 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use - * up to 16 bytes (8 for the ccw and 8 for the idal pointer). In - * addition we have one define extent ccw + 16 bytes of data and a - * locate record ccw for each block (stupid devices!) + 16 bytes of data. - * That makes: - * (8192 - 24 - 136 - 8 - 16) / 40 = 200.2 blocks at maximum. - * We want to fit two into the available memory so that we can immediately - * start the next request if one finishes off. That makes 100.1 blocks - * for one request. Give a little safety and the result is 96. + * Initialize block layer request queue. */ +static void dasd_fba_setup_blk_queue(struct dasd_block *block) +{ + unsigned int logical_block_size = block->bp_block; + struct request_queue *q = block->request_queue; + unsigned int max_bytes, max_discard_sectors; + int max; + + max = DASD_FBA_MAX_BLOCKS << block->s2b_shift; + blk_queue_flag_set(QUEUE_FLAG_NONROT, q); + q->limits.max_dev_sectors = max; + blk_queue_logical_block_size(q, logical_block_size); + blk_queue_max_hw_sectors(q, max); + blk_queue_max_segments(q, USHRT_MAX); + /* With page sized segments each segment can be translated into one idaw/tidaw */ + blk_queue_max_segment_size(q, PAGE_SIZE); + blk_queue_segment_boundary(q, PAGE_SIZE - 1); + + q->limits.discard_granularity = logical_block_size; + q->limits.discard_alignment = PAGE_SIZE; + + /* Calculate max_discard_sectors and make it PAGE aligned */ + max_bytes = USHRT_MAX * logical_block_size; + max_bytes = ALIGN_DOWN(max_bytes, PAGE_SIZE); + max_discard_sectors = max_bytes / logical_block_size; + + blk_queue_max_discard_sectors(q, max_discard_sectors); + blk_queue_max_write_zeroes_sectors(q, max_discard_sectors); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); +} + static struct dasd_discipline dasd_fba_discipline = { .owner = THIS_MODULE, .name = "FBA ", .ebcname = "FBA ", - .max_blocks = 96, .check_device = dasd_fba_check_characteristics, .do_analysis = dasd_fba_do_analysis, .verify_path = dasd_generic_verify_path, + .setup_blk_queue = dasd_fba_setup_blk_queue, .fill_geometry = dasd_fba_fill_geometry, .start_IO = dasd_start_IO, .term_IO = dasd_term_IO, diff --git a/drivers/s390/block/dasd_fba.h b/drivers/s390/block/dasd_fba.h index b14bf1b2c691..8f75df06e893 100644 --- a/drivers/s390/block/dasd_fba.h +++ b/drivers/s390/block/dasd_fba.h @@ -9,6 +9,11 @@ #ifndef DASD_FBA_H #define DASD_FBA_H +/* + * Maximum number of blocks to be chained + */ +#define DASD_FBA_MAX_BLOCKS 96 + struct DE_fba_data { struct { unsigned char perm:2; /* Permissions on this extent */ diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index de6b96036aa4..91c9f9586e0f 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -268,7 +268,6 @@ struct dasd_discipline { struct module *owner; char ebcname[8]; /* a name used for tagging and printks */ char name[8]; /* a name used for tagging and printks */ - int max_blocks; /* maximum number of blocks to be chained */ struct list_head list; /* used for list of disciplines */ @@ -307,6 +306,10 @@ struct dasd_discipline { int (*online_to_ready) (struct dasd_device *); int (*basic_to_known)(struct dasd_device *); + /* + * Initialize block layer request queue. + */ + void (*setup_blk_queue)(struct dasd_block *); /* (struct dasd_device *); * Device operation functions. build_cp creates a ccw chain for * a block device request, start_io starts the request and @@ -367,6 +370,25 @@ struct dasd_discipline { void (*disable_hpf)(struct dasd_device *); int (*hpf_enabled)(struct dasd_device *); void (*reset_path)(struct dasd_device *, __u8); + + /* + * Extent Space Efficient (ESE) relevant functions + */ + int (*is_ese)(struct dasd_device *); + /* Capacity */ + int (*space_allocated)(struct dasd_device *); + int (*space_configured)(struct dasd_device *); + int (*logical_capacity)(struct dasd_device *); + int (*release_space)(struct dasd_device *, struct format_data_t *); + /* Extent Pool */ + int (*ext_pool_id)(struct dasd_device *); + int (*ext_size)(struct dasd_device *); + int (*ext_pool_cap_at_warnlevel)(struct dasd_device *); + int (*ext_pool_warn_thrshld)(struct dasd_device *); + int (*ext_pool_oos)(struct dasd_device *); + int (*ext_pool_exhaust)(struct dasd_device *, struct dasd_ccw_req *); + struct dasd_ccw_req *(*ese_format)(struct dasd_device *, struct dasd_ccw_req *); + void (*ese_read)(struct dasd_ccw_req *); }; extern struct dasd_discipline *dasd_diag_discipline_pointer; @@ -386,6 +408,7 @@ extern struct dasd_discipline *dasd_diag_discipline_pointer; #define DASD_EER_NOPATH 2 #define DASD_EER_STATECHANGE 3 #define DASD_EER_PPRCSUSPEND 4 +#define DASD_EER_NOSPC 5 /* DASD path handling */ @@ -482,8 +505,10 @@ struct dasd_device { spinlock_t mem_lock; void *ccw_mem; void *erp_mem; + void *ese_mem; struct list_head ccw_chunks; struct list_head erp_chunks; + struct list_head ese_chunks; atomic_t tasklet_scheduled; struct tasklet_struct tasklet; @@ -558,6 +583,7 @@ struct dasd_queue { #define DASD_STOPPED_SU 16 /* summary unit check handling */ #define DASD_STOPPED_PM 32 /* pm state transition */ #define DASD_UNRESUMED_PM 64 /* pm resume failed state */ +#define DASD_STOPPED_NOSPC 128 /* no space left */ /* per device flags */ #define DASD_FLAG_OFFLINE 3 /* device is in offline processing */ @@ -700,7 +726,9 @@ extern struct kmem_cache *dasd_page_cache; struct dasd_ccw_req * dasd_smalloc_request(int, int, int, struct dasd_device *, struct dasd_ccw_req *); +struct dasd_ccw_req *dasd_fmalloc_request(int, int, int, struct dasd_device *); void dasd_sfree_request(struct dasd_ccw_req *, struct dasd_device *); +void dasd_ffree_request(struct dasd_ccw_req *, struct dasd_device *); void dasd_wakeup_cb(struct dasd_ccw_req *, void *); struct dasd_device *dasd_alloc_device(void); @@ -727,6 +755,7 @@ void dasd_schedule_block_bh(struct dasd_block *); int dasd_sleep_on(struct dasd_ccw_req *); int dasd_sleep_on_queue(struct list_head *); int dasd_sleep_on_immediatly(struct dasd_ccw_req *); +int dasd_sleep_on_queue_interruptible(struct list_head *); int dasd_sleep_on_interruptible(struct dasd_ccw_req *); void dasd_device_set_timer(struct dasd_device *, int); void dasd_device_clear_timer(struct dasd_device *); @@ -750,6 +779,8 @@ int dasd_generic_restore_device(struct ccw_device *); enum uc_todo dasd_generic_uc_handler(struct ccw_device *, struct irb *); void dasd_generic_path_event(struct ccw_device *, int *); int dasd_generic_verify_path(struct dasd_device *, __u8); +void dasd_generic_space_exhaust(struct dasd_device *, struct dasd_ccw_req *); +void dasd_generic_space_avail(struct dasd_device *); int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int); char *dasd_get_sense(struct irb *); diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c index 8e26001dc11c..9a5f3add325f 100644 --- a/drivers/s390/block/dasd_ioctl.c +++ b/drivers/s390/block/dasd_ioctl.c @@ -333,6 +333,59 @@ out_err: return rc; } +static int dasd_release_space(struct dasd_device *device, + struct format_data_t *rdata) +{ + if (!device->discipline->is_ese && !device->discipline->is_ese(device)) + return -ENOTSUPP; + if (!device->discipline->release_space) + return -ENOTSUPP; + + return device->discipline->release_space(device, rdata); +} + +/* + * Release allocated space + */ +static int dasd_ioctl_release_space(struct block_device *bdev, void __user *argp) +{ + struct format_data_t rdata; + struct dasd_device *base; + int rc = 0; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + if (!argp) + return -EINVAL; + + base = dasd_device_from_gendisk(bdev->bd_disk); + if (!base) + return -ENODEV; + if (base->features & DASD_FEATURE_READONLY || + test_bit(DASD_FLAG_DEVICE_RO, &base->flags)) { + rc = -EROFS; + goto out_err; + } + if (bdev != bdev->bd_contains) { + pr_warn("%s: The specified DASD is a partition and tracks cannot be released\n", + dev_name(&base->cdev->dev)); + rc = -EINVAL; + goto out_err; + } + + if (copy_from_user(&rdata, argp, sizeof(rdata))) { + rc = -EFAULT; + goto out_err; + } + + rc = dasd_release_space(base, &rdata); + +out_err: + dasd_put_device(base); + + return rc; +} + #ifdef CONFIG_DASD_PROFILE /* * Reset device profile information @@ -595,6 +648,9 @@ int dasd_ioctl(struct block_device *bdev, fmode_t mode, case BIODASDREADALLCMB: rc = dasd_ioctl_readall_cmb(block, cmd, argp); break; + case BIODASDRAS: + rc = dasd_ioctl_release_space(bdev, argp); + break; default: /* if the discipline has an ioctl method try it. */ rc = -ENOTTY; diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c index 6c90aa725f23..e71992a3c55f 100644 --- a/drivers/s390/char/sclp_early.c +++ b/drivers/s390/char/sclp_early.c @@ -41,7 +41,6 @@ static void __init sclp_early_facilities_detect(struct read_info_sccb *sccb) sclp.has_hvs = !!(sccb->fac119 & 0x80); sclp.has_kss = !!(sccb->fac98 & 0x01); sclp.has_sipl = !!(sccb->cbl & 0x02); - sclp.has_sipl_g2 = !!(sccb->cbl & 0x04); if (sccb->fac85 & 0x02) S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP; if (sccb->fac91 & 0x40) diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index b7902b643ec8..a76b8a8bcbbb 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -208,7 +208,6 @@ static inline int ap_query_configuration(struct ap_config_info *info) return -EINVAL; return ap_qci(info); } -EXPORT_SYMBOL(ap_query_configuration); /** * ap_init_configuration(): Allocate and query configuration array. diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c index 7e85ba7c6ef0..0604b49a4d32 100644 --- a/drivers/s390/crypto/vfio_ap_ops.c +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -115,7 +115,6 @@ static void vfio_ap_wait_for_irqclear(int apqn) * Unregisters the ISC in the GIB when the saved ISC not invalid. * Unpin the guest's page holding the NIB when it exist. * Reset the saved_pfn and saved_isc to invalid values. - * Clear the pointer to the matrix mediated device. * */ static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q) @@ -127,7 +126,6 @@ static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q) &q->saved_pfn, 1); q->saved_pfn = 0; q->saved_isc = VFIO_AP_ISC_INVALID; - q->matrix_mdev = NULL; } /** @@ -179,6 +177,7 @@ struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q) status.response_code); end_free: vfio_ap_free_aqic_resources(q); + q->matrix_mdev = NULL; return status; } |