diff options
Diffstat (limited to 'drivers/char')
-rw-r--r-- | drivers/char/Kconfig | 11 | ||||
-rw-r--r-- | drivers/char/Makefile | 2 | ||||
-rw-r--r-- | drivers/char/hw_random/n2-drv.c | 29 | ||||
-rw-r--r-- | drivers/char/hw_random/n2rng.h | 2 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_watchdog.c | 2 | ||||
-rw-r--r-- | drivers/char/msm_smd_pkt.c | 5 | ||||
-rw-r--r-- | drivers/char/mspec.c | 2 | ||||
-rw-r--r-- | drivers/char/ramoops.c | 101 | ||||
-rw-r--r-- | drivers/char/random.c | 349 | ||||
-rw-r--r-- | drivers/char/tile-srom.c | 481 | ||||
-rw-r--r-- | drivers/char/tpm/tpm.c | 102 | ||||
-rw-r--r-- | drivers/char/tpm/tpm.h | 7 | ||||
-rw-r--r-- | drivers/char/tpm/tpm_nsc.c | 14 | ||||
-rw-r--r-- | drivers/char/tpm/tpm_tis.c | 179 |
14 files changed, 853 insertions, 433 deletions
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 49502bc5360a..423fd56bf612 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -616,5 +616,16 @@ config MSM_SMD_PKT Enables userspace clients to read and write to some packet SMD ports via device interface for MSM chipset. +config TILE_SROM + bool "Character-device access via hypervisor to the Tilera SPI ROM" + depends on TILE + default y + ---help--- + This device provides character-level read-write access + to the SROM, typically via the "0", "1", and "2" devices + in /dev/srom/. The Tilera hypervisor makes the flash + device appear much like a simple EEPROM, and knows + how to partition a single ROM for multiple purposes. + endmenu diff --git a/drivers/char/Makefile b/drivers/char/Makefile index 7a00672bd85d..32762ba769c2 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile @@ -63,3 +63,5 @@ obj-$(CONFIG_RAMOOPS) += ramoops.o obj-$(CONFIG_JS_RTC) += js-rtc.o js-rtc-y = rtc.o + +obj-$(CONFIG_TILE_SROM) += tile-srom.o diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c index ac6739e085e3..c3de70de00d4 100644 --- a/drivers/char/hw_random/n2-drv.c +++ b/drivers/char/hw_random/n2-drv.c @@ -1,6 +1,6 @@ /* n2-drv.c: Niagara-2 RNG driver. * - * Copyright (C) 2008 David S. Miller <davem@davemloft.net> + * Copyright (C) 2008, 2011 David S. Miller <davem@davemloft.net> */ #include <linux/kernel.h> @@ -22,8 +22,8 @@ #define DRV_MODULE_NAME "n2rng" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "0.1" -#define DRV_MODULE_RELDATE "May 15, 2008" +#define DRV_MODULE_VERSION "0.2" +#define DRV_MODULE_RELDATE "July 27, 2011" static char version[] __devinitdata = DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; @@ -623,14 +623,14 @@ static const struct of_device_id n2rng_match[]; static int __devinit n2rng_probe(struct platform_device *op) { const struct of_device_id *match; - int victoria_falls; + int multi_capable; int err = -ENOMEM; struct n2rng *np; match = of_match_device(n2rng_match, &op->dev); if (!match) return -EINVAL; - victoria_falls = (match->data != NULL); + multi_capable = (match->data != NULL); n2rng_driver_version(); np = kzalloc(sizeof(*np), GFP_KERNEL); @@ -640,8 +640,8 @@ static int __devinit n2rng_probe(struct platform_device *op) INIT_DELAYED_WORK(&np->work, n2rng_work); - if (victoria_falls) - np->flags |= N2RNG_FLAG_VF; + if (multi_capable) + np->flags |= N2RNG_FLAG_MULTI; err = -ENODEV; np->hvapi_major = 2; @@ -658,10 +658,10 @@ static int __devinit n2rng_probe(struct platform_device *op) } } - if (np->flags & N2RNG_FLAG_VF) { + if (np->flags & N2RNG_FLAG_MULTI) { if (np->hvapi_major < 2) { - dev_err(&op->dev, "VF RNG requires HVAPI major " - "version 2 or later, got %lu\n", + dev_err(&op->dev, "multi-unit-capable RNG requires " + "HVAPI major version 2 or later, got %lu\n", np->hvapi_major); goto out_hvapi_unregister; } @@ -688,8 +688,8 @@ static int __devinit n2rng_probe(struct platform_device *op) goto out_free_units; dev_info(&op->dev, "Found %s RNG, units: %d\n", - ((np->flags & N2RNG_FLAG_VF) ? - "Victoria Falls" : "Niagara2"), + ((np->flags & N2RNG_FLAG_MULTI) ? + "multi-unit-capable" : "single-unit"), np->num_units); np->hwrng.name = "n2rng"; @@ -751,6 +751,11 @@ static const struct of_device_id n2rng_match[] = { .compatible = "SUNW,vf-rng", .data = (void *) 1, }, + { + .name = "random-number-generator", + .compatible = "SUNW,kt-rng", + .data = (void *) 1, + }, {}, }; MODULE_DEVICE_TABLE(of, n2rng_match); diff --git a/drivers/char/hw_random/n2rng.h b/drivers/char/hw_random/n2rng.h index 4bea07f30978..f244ac89087f 100644 --- a/drivers/char/hw_random/n2rng.h +++ b/drivers/char/hw_random/n2rng.h @@ -68,7 +68,7 @@ struct n2rng { struct platform_device *op; unsigned long flags; -#define N2RNG_FLAG_VF 0x00000001 /* Victoria Falls RNG, else N2 */ +#define N2RNG_FLAG_MULTI 0x00000001 /* Multi-unit capable RNG */ #define N2RNG_FLAG_CONTROL 0x00000002 /* Operating in control domain */ #define N2RNG_FLAG_READY 0x00000008 /* Ready for hw-rng layer */ #define N2RNG_FLAG_SHUTDOWN 0x00000010 /* Driver unregistering */ diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c index 320668f4c3aa..3302586655c4 100644 --- a/drivers/char/ipmi/ipmi_watchdog.c +++ b/drivers/char/ipmi/ipmi_watchdog.c @@ -52,7 +52,7 @@ #include <linux/string.h> #include <linux/ctype.h> #include <linux/delay.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #ifdef CONFIG_X86 /* diff --git a/drivers/char/msm_smd_pkt.c b/drivers/char/msm_smd_pkt.c index b6f8a65c9960..8eca55deb3a3 100644 --- a/drivers/char/msm_smd_pkt.c +++ b/drivers/char/msm_smd_pkt.c @@ -379,9 +379,8 @@ static int __init smd_pkt_init(void) for (i = 0; i < NUM_SMD_PKT_PORTS; ++i) { smd_pkt_devp[i] = kzalloc(sizeof(struct smd_pkt_dev), GFP_KERNEL); - if (IS_ERR(smd_pkt_devp[i])) { - r = PTR_ERR(smd_pkt_devp[i]); - pr_err("kmalloc() failed %d\n", r); + if (!smd_pkt_devp[i]) { + pr_err("kmalloc() failed\n"); goto clean_cdevs; } diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c index 25d139c9dbed..5c0d96a820fa 100644 --- a/drivers/char/mspec.c +++ b/drivers/char/mspec.c @@ -46,7 +46,7 @@ #include <asm/page.h> #include <asm/system.h> #include <asm/pgtable.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #include <asm/tlbflush.h> #include <asm/uncached.h> #include <asm/sn/addrs.h> diff --git a/drivers/char/ramoops.c b/drivers/char/ramoops.c index 1a9f5f6d6ac5..810aff9e750f 100644 --- a/drivers/char/ramoops.c +++ b/drivers/char/ramoops.c @@ -19,18 +19,26 @@ * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/kernel.h> +#include <linux/err.h> #include <linux/module.h> #include <linux/kmsg_dump.h> #include <linux/time.h> #include <linux/io.h> #include <linux/ioport.h> #include <linux/platform_device.h> +#include <linux/slab.h> #include <linux/ramoops.h> #define RAMOOPS_KERNMSG_HDR "====" +#define MIN_MEM_SIZE 4096UL -#define RECORD_SIZE 4096UL +static ulong record_size = MIN_MEM_SIZE; +module_param(record_size, ulong, 0400); +MODULE_PARM_DESC(record_size, + "size of each dump done on oops/panic"); static ulong mem_address; module_param(mem_address, ulong, 0400); @@ -52,10 +60,15 @@ static struct ramoops_context { void *virt_addr; phys_addr_t phys_addr; unsigned long size; + unsigned long record_size; + int dump_oops; int count; int max_count; } oops_cxt; +static struct platform_device *dummy; +static struct ramoops_platform_data *dummy_data; + static void ramoops_do_dump(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason, const char *s1, unsigned long l1, const char *s2, unsigned long l2) @@ -74,13 +87,13 @@ static void ramoops_do_dump(struct kmsg_dumper *dumper, return; /* Only dump oopses if dump_oops is set */ - if (reason == KMSG_DUMP_OOPS && !dump_oops) + if (reason == KMSG_DUMP_OOPS && !cxt->dump_oops) return; - buf = cxt->virt_addr + (cxt->count * RECORD_SIZE); + buf = cxt->virt_addr + (cxt->count * cxt->record_size); buf_orig = buf; - memset(buf, '\0', RECORD_SIZE); + memset(buf, '\0', cxt->record_size); res = sprintf(buf, "%s", RAMOOPS_KERNMSG_HDR); buf += res; do_gettimeofday(×tamp); @@ -88,8 +101,8 @@ static void ramoops_do_dump(struct kmsg_dumper *dumper, buf += res; hdr_size = buf - buf_orig; - l2_cpy = min(l2, RECORD_SIZE - hdr_size); - l1_cpy = min(l1, RECORD_SIZE - hdr_size - l2_cpy); + l2_cpy = min(l2, cxt->record_size - hdr_size); + l1_cpy = min(l1, cxt->record_size - hdr_size - l2_cpy); s2_start = l2 - l2_cpy; s1_start = l1 - l1_cpy; @@ -106,44 +119,59 @@ static int __init ramoops_probe(struct platform_device *pdev) struct ramoops_context *cxt = &oops_cxt; int err = -EINVAL; - if (pdata) { - mem_size = pdata->mem_size; - mem_address = pdata->mem_address; + if (!pdata->mem_size || !pdata->record_size) { + pr_err("The memory size and the record size must be " + "non-zero\n"); + goto fail3; } - if (!mem_size) { - printk(KERN_ERR "ramoops: invalid size specification"); + rounddown_pow_of_two(pdata->mem_size); + rounddown_pow_of_two(pdata->record_size); + + /* Check for the minimum memory size */ + if (pdata->mem_size < MIN_MEM_SIZE && + pdata->record_size < MIN_MEM_SIZE) { + pr_err("memory size too small, minium is %lu\n", MIN_MEM_SIZE); goto fail3; } - rounddown_pow_of_two(mem_size); - - if (mem_size < RECORD_SIZE) { - printk(KERN_ERR "ramoops: size too small"); + if (pdata->mem_size < pdata->record_size) { + pr_err("The memory size must be larger than the " + "records size\n"); goto fail3; } - cxt->max_count = mem_size / RECORD_SIZE; + cxt->max_count = pdata->mem_size / pdata->record_size; cxt->count = 0; - cxt->size = mem_size; - cxt->phys_addr = mem_address; + cxt->size = pdata->mem_size; + cxt->phys_addr = pdata->mem_address; + cxt->record_size = pdata->record_size; + cxt->dump_oops = pdata->dump_oops; + /* + * Update the module parameter variables as well so they are visible + * through /sys/module/ramoops/parameters/ + */ + mem_size = pdata->mem_size; + mem_address = pdata->mem_address; + record_size = pdata->record_size; + dump_oops = pdata->dump_oops; if (!request_mem_region(cxt->phys_addr, cxt->size, "ramoops")) { - printk(KERN_ERR "ramoops: request mem region failed"); + pr_err("request mem region failed\n"); err = -EINVAL; goto fail3; } cxt->virt_addr = ioremap(cxt->phys_addr, cxt->size); if (!cxt->virt_addr) { - printk(KERN_ERR "ramoops: ioremap failed"); + pr_err("ioremap failed\n"); goto fail2; } cxt->dump.dump = ramoops_do_dump; err = kmsg_dump_register(&cxt->dump); if (err) { - printk(KERN_ERR "ramoops: registering kmsg dumper failed"); + pr_err("registering kmsg dumper failed\n"); goto fail1; } @@ -162,7 +190,7 @@ static int __exit ramoops_remove(struct platform_device *pdev) struct ramoops_context *cxt = &oops_cxt; if (kmsg_dump_unregister(&cxt->dump) < 0) - printk(KERN_WARNING "ramoops: could not unregister kmsg_dumper"); + pr_warn("could not unregister kmsg_dumper\n"); iounmap(cxt->virt_addr); release_mem_region(cxt->phys_addr, cxt->size); @@ -179,12 +207,39 @@ static struct platform_driver ramoops_driver = { static int __init ramoops_init(void) { - return platform_driver_probe(&ramoops_driver, ramoops_probe); + int ret; + ret = platform_driver_probe(&ramoops_driver, ramoops_probe); + if (ret == -ENODEV) { + /* + * If we didn't find a platform device, we use module parameters + * building platform data on the fly. + */ + pr_info("platform device not found, using module parameters\n"); + dummy_data = kzalloc(sizeof(struct ramoops_platform_data), + GFP_KERNEL); + if (!dummy_data) + return -ENOMEM; + dummy_data->mem_size = mem_size; + dummy_data->mem_address = mem_address; + dummy_data->record_size = record_size; + dummy_data->dump_oops = dump_oops; + dummy = platform_create_bundle(&ramoops_driver, ramoops_probe, + NULL, 0, dummy_data, + sizeof(struct ramoops_platform_data)); + + if (IS_ERR(dummy)) + ret = PTR_ERR(dummy); + else + ret = 0; + } + + return ret; } static void __exit ramoops_exit(void) { platform_driver_unregister(&ramoops_driver); + kfree(dummy_data); } module_init(ramoops_init); diff --git a/drivers/char/random.c b/drivers/char/random.c index 729281961f22..c35a785005b0 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1300,345 +1300,14 @@ ctl_table random_table[] = { }; #endif /* CONFIG_SYSCTL */ -/******************************************************************** - * - * Random functions for networking - * - ********************************************************************/ - -/* - * TCP initial sequence number picking. This uses the random number - * generator to pick an initial secret value. This value is hashed - * along with the TCP endpoint information to provide a unique - * starting point for each pair of TCP endpoints. This defeats - * attacks which rely on guessing the initial TCP sequence number. - * This algorithm was suggested by Steve Bellovin. - * - * Using a very strong hash was taking an appreciable amount of the total - * TCP connection establishment time, so this is a weaker hash, - * compensated for by changing the secret periodically. - */ - -/* F, G and H are basic MD4 functions: selection, majority, parity */ -#define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z)))) -#define G(x, y, z) (((x) & (y)) + (((x) ^ (y)) & (z))) -#define H(x, y, z) ((x) ^ (y) ^ (z)) - -/* - * The generic round function. The application is so specific that - * we don't bother protecting all the arguments with parens, as is generally - * good macro practice, in favor of extra legibility. - * Rotation is separate from addition to prevent recomputation - */ -#define ROUND(f, a, b, c, d, x, s) \ - (a += f(b, c, d) + x, a = (a << s) | (a >> (32 - s))) -#define K1 0 -#define K2 013240474631UL -#define K3 015666365641UL - -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) - -static __u32 twothirdsMD4Transform(__u32 const buf[4], __u32 const in[12]) -{ - __u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3]; - - /* Round 1 */ - ROUND(F, a, b, c, d, in[ 0] + K1, 3); - ROUND(F, d, a, b, c, in[ 1] + K1, 7); - ROUND(F, c, d, a, b, in[ 2] + K1, 11); - ROUND(F, b, c, d, a, in[ 3] + K1, 19); - ROUND(F, a, b, c, d, in[ 4] + K1, 3); - ROUND(F, d, a, b, c, in[ 5] + K1, 7); - ROUND(F, c, d, a, b, in[ 6] + K1, 11); - ROUND(F, b, c, d, a, in[ 7] + K1, 19); - ROUND(F, a, b, c, d, in[ 8] + K1, 3); - ROUND(F, d, a, b, c, in[ 9] + K1, 7); - ROUND(F, c, d, a, b, in[10] + K1, 11); - ROUND(F, b, c, d, a, in[11] + K1, 19); - - /* Round 2 */ - ROUND(G, a, b, c, d, in[ 1] + K2, 3); - ROUND(G, d, a, b, c, in[ 3] + K2, 5); - ROUND(G, c, d, a, b, in[ 5] + K2, 9); - ROUND(G, b, c, d, a, in[ 7] + K2, 13); - ROUND(G, a, b, c, d, in[ 9] + K2, 3); - ROUND(G, d, a, b, c, in[11] + K2, 5); - ROUND(G, c, d, a, b, in[ 0] + K2, 9); - ROUND(G, b, c, d, a, in[ 2] + K2, 13); - ROUND(G, a, b, c, d, in[ 4] + K2, 3); - ROUND(G, d, a, b, c, in[ 6] + K2, 5); - ROUND(G, c, d, a, b, in[ 8] + K2, 9); - ROUND(G, b, c, d, a, in[10] + K2, 13); - - /* Round 3 */ - ROUND(H, a, b, c, d, in[ 3] + K3, 3); - ROUND(H, d, a, b, c, in[ 7] + K3, 9); - ROUND(H, c, d, a, b, in[11] + K3, 11); - ROUND(H, b, c, d, a, in[ 2] + K3, 15); - ROUND(H, a, b, c, d, in[ 6] + K3, 3); - ROUND(H, d, a, b, c, in[10] + K3, 9); - ROUND(H, c, d, a, b, in[ 1] + K3, 11); - ROUND(H, b, c, d, a, in[ 5] + K3, 15); - ROUND(H, a, b, c, d, in[ 9] + K3, 3); - ROUND(H, d, a, b, c, in[ 0] + K3, 9); - ROUND(H, c, d, a, b, in[ 4] + K3, 11); - ROUND(H, b, c, d, a, in[ 8] + K3, 15); - - return buf[1] + b; /* "most hashed" word */ - /* Alternative: return sum of all words? */ -} -#endif - -#undef ROUND -#undef F -#undef G -#undef H -#undef K1 -#undef K2 -#undef K3 - -/* This should not be decreased so low that ISNs wrap too fast. */ -#define REKEY_INTERVAL (300 * HZ) -/* - * Bit layout of the tcp sequence numbers (before adding current time): - * bit 24-31: increased after every key exchange - * bit 0-23: hash(source,dest) - * - * The implementation is similar to the algorithm described - * in the Appendix of RFC 1185, except that - * - it uses a 1 MHz clock instead of a 250 kHz clock - * - it performs a rekey every 5 minutes, which is equivalent - * to a (source,dest) tulple dependent forward jump of the - * clock by 0..2^(HASH_BITS+1) - * - * Thus the average ISN wraparound time is 68 minutes instead of - * 4.55 hours. - * - * SMP cleanup and lock avoidance with poor man's RCU. - * Manfred Spraul <manfred@colorfullife.com> - * - */ -#define COUNT_BITS 8 -#define COUNT_MASK ((1 << COUNT_BITS) - 1) -#define HASH_BITS 24 -#define HASH_MASK ((1 << HASH_BITS) - 1) +static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned; -static struct keydata { - __u32 count; /* already shifted to the final position */ - __u32 secret[12]; -} ____cacheline_aligned ip_keydata[2]; - -static unsigned int ip_cnt; - -static void rekey_seq_generator(struct work_struct *work); - -static DECLARE_DELAYED_WORK(rekey_work, rekey_seq_generator); - -/* - * Lock avoidance: - * The ISN generation runs lockless - it's just a hash over random data. - * State changes happen every 5 minutes when the random key is replaced. - * Synchronization is performed by having two copies of the hash function - * state and rekey_seq_generator always updates the inactive copy. - * The copy is then activated by updating ip_cnt. - * The implementation breaks down if someone blocks the thread - * that processes SYN requests for more than 5 minutes. Should never - * happen, and even if that happens only a not perfectly compliant - * ISN is generated, nothing fatal. - */ -static void rekey_seq_generator(struct work_struct *work) +static int __init random_int_secret_init(void) { - struct keydata *keyptr = &ip_keydata[1 ^ (ip_cnt & 1)]; - - get_random_bytes(keyptr->secret, sizeof(keyptr->secret)); - keyptr->count = (ip_cnt & COUNT_MASK) << HASH_BITS; - smp_wmb(); - ip_cnt++; - schedule_delayed_work(&rekey_work, - round_jiffies_relative(REKEY_INTERVAL)); -} - -static inline struct keydata *get_keyptr(void) -{ - struct keydata *keyptr = &ip_keydata[ip_cnt & 1]; - - smp_rmb(); - - return keyptr; -} - -static __init int seqgen_init(void) -{ - rekey_seq_generator(NULL); + get_random_bytes(random_int_secret, sizeof(random_int_secret)); return 0; } -late_initcall(seqgen_init); - -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) -__u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr, - __be16 sport, __be16 dport) -{ - __u32 seq; - __u32 hash[12]; - struct keydata *keyptr = get_keyptr(); - - /* The procedure is the same as for IPv4, but addresses are longer. - * Thus we must use twothirdsMD4Transform. - */ - - memcpy(hash, saddr, 16); - hash[4] = ((__force u16)sport << 16) + (__force u16)dport; - memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7); - - seq = twothirdsMD4Transform((const __u32 *)daddr, hash) & HASH_MASK; - seq += keyptr->count; - - seq += ktime_to_ns(ktime_get_real()); - - return seq; -} -EXPORT_SYMBOL(secure_tcpv6_sequence_number); -#endif - -/* The code below is shamelessly stolen from secure_tcp_sequence_number(). - * All blames to Andrey V. Savochkin <saw@msu.ru>. - */ -__u32 secure_ip_id(__be32 daddr) -{ - struct keydata *keyptr; - __u32 hash[4]; - - keyptr = get_keyptr(); - - /* - * Pick a unique starting offset for each IP destination. - * The dest ip address is placed in the starting vector, - * which is then hashed with random data. - */ - hash[0] = (__force __u32)daddr; - hash[1] = keyptr->secret[9]; - hash[2] = keyptr->secret[10]; - hash[3] = keyptr->secret[11]; - - return half_md4_transform(hash, keyptr->secret); -} - -__u32 secure_ipv6_id(const __be32 daddr[4]) -{ - const struct keydata *keyptr; - __u32 hash[4]; - - keyptr = get_keyptr(); - - hash[0] = (__force __u32)daddr[0]; - hash[1] = (__force __u32)daddr[1]; - hash[2] = (__force __u32)daddr[2]; - hash[3] = (__force __u32)daddr[3]; - - return half_md4_transform(hash, keyptr->secret); -} - -#ifdef CONFIG_INET - -__u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr, - __be16 sport, __be16 dport) -{ - __u32 seq; - __u32 hash[4]; - struct keydata *keyptr = get_keyptr(); - - /* - * Pick a unique starting offset for each TCP connection endpoints - * (saddr, daddr, sport, dport). - * Note that the words are placed into the starting vector, which is - * then mixed with a partial MD4 over random data. - */ - hash[0] = (__force u32)saddr; - hash[1] = (__force u32)daddr; - hash[2] = ((__force u16)sport << 16) + (__force u16)dport; - hash[3] = keyptr->secret[11]; - - seq = half_md4_transform(hash, keyptr->secret) & HASH_MASK; - seq += keyptr->count; - /* - * As close as possible to RFC 793, which - * suggests using a 250 kHz clock. - * Further reading shows this assumes 2 Mb/s networks. - * For 10 Mb/s Ethernet, a 1 MHz clock is appropriate. - * For 10 Gb/s Ethernet, a 1 GHz clock should be ok, but - * we also need to limit the resolution so that the u32 seq - * overlaps less than one time per MSL (2 minutes). - * Choosing a clock of 64 ns period is OK. (period of 274 s) - */ - seq += ktime_to_ns(ktime_get_real()) >> 6; - - return seq; -} - -/* Generate secure starting point for ephemeral IPV4 transport port search */ -u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport) -{ - struct keydata *keyptr = get_keyptr(); - u32 hash[4]; - - /* - * Pick a unique starting offset for each ephemeral port search - * (saddr, daddr, dport) and 48bits of random data. - */ - hash[0] = (__force u32)saddr; - hash[1] = (__force u32)daddr; - hash[2] = (__force u32)dport ^ keyptr->secret[10]; - hash[3] = keyptr->secret[11]; - - return half_md4_transform(hash, keyptr->secret); -} -EXPORT_SYMBOL_GPL(secure_ipv4_port_ephemeral); - -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) -u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, - __be16 dport) -{ - struct keydata *keyptr = get_keyptr(); - u32 hash[12]; - - memcpy(hash, saddr, 16); - hash[4] = (__force u32)dport; - memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7); - - return twothirdsMD4Transform((const __u32 *)daddr, hash); -} -#endif - -#if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE) -/* Similar to secure_tcp_sequence_number but generate a 48 bit value - * bit's 32-47 increase every key exchange - * 0-31 hash(source, dest) - */ -u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr, - __be16 sport, __be16 dport) -{ - u64 seq; - __u32 hash[4]; - struct keydata *keyptr = get_keyptr(); - - hash[0] = (__force u32)saddr; - hash[1] = (__force u32)daddr; - hash[2] = ((__force u16)sport << 16) + (__force u16)dport; - hash[3] = keyptr->secret[11]; - - seq = half_md4_transform(hash, keyptr->secret); - seq |= ((u64)keyptr->count) << (32 - HASH_BITS); - - seq += ktime_to_ns(ktime_get_real()); - seq &= (1ull << 48) - 1; - - return seq; -} -EXPORT_SYMBOL(secure_dccp_sequence_number); -#endif - -#endif /* CONFIG_INET */ - +late_initcall(random_int_secret_init); /* * Get a random word for internal kernel use only. Similar to urandom but @@ -1646,17 +1315,15 @@ EXPORT_SYMBOL(secure_dccp_sequence_number); * value is not cryptographically secure but for several uses the cost of * depleting entropy is too high */ -DEFINE_PER_CPU(__u32 [4], get_random_int_hash); +DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash); unsigned int get_random_int(void) { - struct keydata *keyptr; __u32 *hash = get_cpu_var(get_random_int_hash); - int ret; + unsigned int ret; - keyptr = get_keyptr(); hash[0] += current->pid + jiffies + get_cycles(); - - ret = half_md4_transform(hash, keyptr->secret); + md5_transform(hash, random_int_secret); + ret = hash[0]; put_cpu_var(get_random_int_hash); return ret; diff --git a/drivers/char/tile-srom.c b/drivers/char/tile-srom.c new file mode 100644 index 000000000000..cf3ee008dca2 --- /dev/null +++ b/drivers/char/tile-srom.c @@ -0,0 +1,481 @@ +/* + * Copyright 2011 Tilera Corporation. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for + * more details. + * + * SPI Flash ROM driver + * + * This source code is derived from code provided in "Linux Device + * Drivers, Third Edition", by Jonathan Corbet, Alessandro Rubini, and + * Greg Kroah-Hartman, published by O'Reilly Media, Inc. + */ + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/init.h> +#include <linux/kernel.h> /* printk() */ +#include <linux/slab.h> /* kmalloc() */ +#include <linux/fs.h> /* everything... */ +#include <linux/errno.h> /* error codes */ +#include <linux/types.h> /* size_t */ +#include <linux/proc_fs.h> +#include <linux/fcntl.h> /* O_ACCMODE */ +#include <linux/aio.h> +#include <linux/pagemap.h> +#include <linux/hugetlb.h> +#include <linux/uaccess.h> +#include <linux/platform_device.h> +#include <hv/hypervisor.h> +#include <linux/ioctl.h> +#include <linux/cdev.h> +#include <linux/delay.h> +#include <hv/drv_srom_intf.h> + +/* + * Size of our hypervisor I/O requests. We break up large transfers + * so that we don't spend large uninterrupted spans of time in the + * hypervisor. Erasing an SROM sector takes a significant fraction of + * a second, so if we allowed the user to, say, do one I/O to write the + * entire ROM, we'd get soft lockup timeouts, or worse. + */ +#define SROM_CHUNK_SIZE ((size_t)4096) + +/* + * When hypervisor is busy (e.g. erasing), poll the status periodically. + */ + +/* + * Interval to poll the state in msec + */ +#define SROM_WAIT_TRY_INTERVAL 20 + +/* + * Maximum times to poll the state + */ +#define SROM_MAX_WAIT_TRY_TIMES 1000 + +struct srom_dev { + int hv_devhdl; /* Handle for hypervisor device */ + u32 total_size; /* Size of this device */ + u32 sector_size; /* Size of a sector */ + u32 page_size; /* Size of a page */ + struct mutex lock; /* Allow only one accessor at a time */ +}; + +static int srom_major; /* Dynamic major by default */ +module_param(srom_major, int, 0); +MODULE_AUTHOR("Tilera Corporation"); +MODULE_LICENSE("GPL"); + +static int srom_devs; /* Number of SROM partitions */ +static struct cdev srom_cdev; +static struct class *srom_class; +static struct srom_dev *srom_devices; + +/* + * Handle calling the hypervisor and managing EAGAIN/EBUSY. + */ + +static ssize_t _srom_read(int hv_devhdl, void *buf, + loff_t off, size_t count) +{ + int retval, retries = SROM_MAX_WAIT_TRY_TIMES; + for (;;) { + retval = hv_dev_pread(hv_devhdl, 0, (HV_VirtAddr)buf, + count, off); + if (retval >= 0) + return retval; + if (retval == HV_EAGAIN) + continue; + if (retval == HV_EBUSY && --retries > 0) { + msleep(SROM_WAIT_TRY_INTERVAL); + continue; + } + pr_err("_srom_read: error %d\n", retval); + return -EIO; + } +} + +static ssize_t _srom_write(int hv_devhdl, const void *buf, + loff_t off, size_t count) +{ + int retval, retries = SROM_MAX_WAIT_TRY_TIMES; + for (;;) { + retval = hv_dev_pwrite(hv_devhdl, 0, (HV_VirtAddr)buf, + count, off); + if (retval >= 0) + return retval; + if (retval == HV_EAGAIN) + continue; + if (retval == HV_EBUSY && --retries > 0) { + msleep(SROM_WAIT_TRY_INTERVAL); + continue; + } + pr_err("_srom_write: error %d\n", retval); + return -EIO; + } +} + +/** + * srom_open() - Device open routine. + * @inode: Inode for this device. + * @filp: File for this specific open of the device. + * + * Returns zero, or an error code. + */ +static int srom_open(struct inode *inode, struct file *filp) +{ + filp->private_data = &srom_devices[iminor(inode)]; + return 0; +} + + +/** + * srom_release() - Device release routine. + * @inode: Inode for this device. + * @filp: File for this specific open of the device. + * + * Returns zero, or an error code. + */ +static int srom_release(struct inode *inode, struct file *filp) +{ + struct srom_dev *srom = filp->private_data; + char dummy; + + /* Make sure we've flushed anything written to the ROM. */ + mutex_lock(&srom->lock); + if (srom->hv_devhdl >= 0) + _srom_write(srom->hv_devhdl, &dummy, SROM_FLUSH_OFF, 1); + mutex_unlock(&srom->lock); + + filp->private_data = NULL; + + return 0; +} + + +/** + * srom_read() - Read data from the device. + * @filp: File for this specific open of the device. + * @buf: User's data buffer. + * @count: Number of bytes requested. + * @f_pos: File position. + * + * Returns number of bytes read, or an error code. + */ +static ssize_t srom_read(struct file *filp, char __user *buf, + size_t count, loff_t *f_pos) +{ + int retval = 0; + void *kernbuf; + struct srom_dev *srom = filp->private_data; + + kernbuf = kmalloc(SROM_CHUNK_SIZE, GFP_KERNEL); + if (!kernbuf) + return -ENOMEM; + + if (mutex_lock_interruptible(&srom->lock)) { + retval = -ERESTARTSYS; + kfree(kernbuf); + return retval; + } + + while (count) { + int hv_retval; + int bytes_this_pass = min(count, SROM_CHUNK_SIZE); + + hv_retval = _srom_read(srom->hv_devhdl, kernbuf, + *f_pos, bytes_this_pass); + if (hv_retval > 0) { + if (copy_to_user(buf, kernbuf, hv_retval) != 0) { + retval = -EFAULT; + break; + } + } else if (hv_retval <= 0) { + if (retval == 0) + retval = hv_retval; + break; + } + + retval += hv_retval; + *f_pos += hv_retval; + buf += hv_retval; + count -= hv_retval; + } + + mutex_unlock(&srom->lock); + kfree(kernbuf); + + return retval; +} + +/** + * srom_write() - Write data to the device. + * @filp: File for this specific open of the device. + * @buf: User's data buffer. + * @count: Number of bytes requested. + * @f_pos: File position. + * + * Returns number of bytes written, or an error code. + */ +static ssize_t srom_write(struct file *filp, const char __user *buf, + size_t count, loff_t *f_pos) +{ + int retval = 0; + void *kernbuf; + struct srom_dev *srom = filp->private_data; + + kernbuf = kmalloc(SROM_CHUNK_SIZE, GFP_KERNEL); + if (!kernbuf) + return -ENOMEM; + + if (mutex_lock_interruptible(&srom->lock)) { + retval = -ERESTARTSYS; + kfree(kernbuf); + return retval; + } + + while (count) { + int hv_retval; + int bytes_this_pass = min(count, SROM_CHUNK_SIZE); + + if (copy_from_user(kernbuf, buf, bytes_this_pass) != 0) { + retval = -EFAULT; + break; + } + + hv_retval = _srom_write(srom->hv_devhdl, kernbuf, + *f_pos, bytes_this_pass); + if (hv_retval <= 0) { + if (retval == 0) + retval = hv_retval; + break; + } + + retval += hv_retval; + *f_pos += hv_retval; + buf += hv_retval; + count -= hv_retval; + } + + mutex_unlock(&srom->lock); + kfree(kernbuf); + + return retval; +} + +/* Provide our own implementation so we can use srom->total_size. */ +loff_t srom_llseek(struct file *filp, loff_t offset, int origin) +{ + struct srom_dev *srom = filp->private_data; + + if (mutex_lock_interruptible(&srom->lock)) + return -ERESTARTSYS; + + switch (origin) { + case SEEK_END: + offset += srom->total_size; + break; + case SEEK_CUR: + offset += filp->f_pos; + break; + } + + if (offset < 0 || offset > srom->total_size) { + offset = -EINVAL; + } else { + filp->f_pos = offset; + filp->f_version = 0; + } + + mutex_unlock(&srom->lock); + + return offset; +} + +static ssize_t total_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct srom_dev *srom = dev_get_drvdata(dev); + return sprintf(buf, "%u\n", srom->total_size); +} + +static ssize_t sector_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct srom_dev *srom = dev_get_drvdata(dev); + return sprintf(buf, "%u\n", srom->sector_size); +} + +static ssize_t page_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct srom_dev *srom = dev_get_drvdata(dev); + return sprintf(buf, "%u\n", srom->page_size); +} + +static struct device_attribute srom_dev_attrs[] = { + __ATTR(total_size, S_IRUGO, total_show, NULL), + __ATTR(sector_size, S_IRUGO, sector_show, NULL), + __ATTR(page_size, S_IRUGO, page_show, NULL), + __ATTR_NULL +}; + +static char *srom_devnode(struct device *dev, mode_t *mode) +{ + *mode = S_IRUGO | S_IWUSR; + return kasprintf(GFP_KERNEL, "srom/%s", dev_name(dev)); +} + +/* + * The fops + */ +static const struct file_operations srom_fops = { + .owner = THIS_MODULE, + .llseek = srom_llseek, + .read = srom_read, + .write = srom_write, + .open = srom_open, + .release = srom_release, +}; + +/** + * srom_setup_minor() - Initialize per-minor information. + * @srom: Per-device SROM state. + * @index: Device to set up. + */ +static int srom_setup_minor(struct srom_dev *srom, int index) +{ + struct device *dev; + int devhdl = srom->hv_devhdl; + + mutex_init(&srom->lock); + + if (_srom_read(devhdl, &srom->total_size, + SROM_TOTAL_SIZE_OFF, sizeof(srom->total_size)) < 0) + return -EIO; + if (_srom_read(devhdl, &srom->sector_size, + SROM_SECTOR_SIZE_OFF, sizeof(srom->sector_size)) < 0) + return -EIO; + if (_srom_read(devhdl, &srom->page_size, + SROM_PAGE_SIZE_OFF, sizeof(srom->page_size)) < 0) + return -EIO; + + dev = device_create(srom_class, &platform_bus, + MKDEV(srom_major, index), srom, "%d", index); + return IS_ERR(dev) ? PTR_ERR(dev) : 0; +} + +/** srom_init() - Initialize the driver's module. */ +static int srom_init(void) +{ + int result, i; + dev_t dev = MKDEV(srom_major, 0); + + /* + * Start with a plausible number of partitions; the krealloc() call + * below will yield about log(srom_devs) additional allocations. + */ + srom_devices = kzalloc(4 * sizeof(struct srom_dev), GFP_KERNEL); + + /* Discover the number of srom partitions. */ + for (i = 0; ; i++) { + int devhdl; + char buf[20]; + struct srom_dev *new_srom_devices = + krealloc(srom_devices, (i+1) * sizeof(struct srom_dev), + GFP_KERNEL | __GFP_ZERO); + if (!new_srom_devices) { + result = -ENOMEM; + goto fail_mem; + } + srom_devices = new_srom_devices; + sprintf(buf, "srom/0/%d", i); + devhdl = hv_dev_open((HV_VirtAddr)buf, 0); + if (devhdl < 0) { + if (devhdl != HV_ENODEV) + pr_notice("srom/%d: hv_dev_open failed: %d.\n", + i, devhdl); + break; + } + srom_devices[i].hv_devhdl = devhdl; + } + srom_devs = i; + + /* Bail out early if we have no partitions at all. */ + if (srom_devs == 0) { + result = -ENODEV; + goto fail_mem; + } + + /* Register our major, and accept a dynamic number. */ + if (srom_major) + result = register_chrdev_region(dev, srom_devs, "srom"); + else { + result = alloc_chrdev_region(&dev, 0, srom_devs, "srom"); + srom_major = MAJOR(dev); + } + if (result < 0) + goto fail_mem; + + /* Register a character device. */ + cdev_init(&srom_cdev, &srom_fops); + srom_cdev.owner = THIS_MODULE; + srom_cdev.ops = &srom_fops; + result = cdev_add(&srom_cdev, dev, srom_devs); + if (result < 0) + goto fail_chrdev; + + /* Create a sysfs class. */ + srom_class = class_create(THIS_MODULE, "srom"); + if (IS_ERR(srom_class)) { + result = PTR_ERR(srom_class); + goto fail_cdev; + } + srom_class->dev_attrs = srom_dev_attrs; + srom_class->devnode = srom_devnode; + + /* Do per-partition initialization */ + for (i = 0; i < srom_devs; i++) { + result = srom_setup_minor(srom_devices + i, i); + if (result < 0) + goto fail_class; + } + + return 0; + +fail_class: + for (i = 0; i < srom_devs; i++) + device_destroy(srom_class, MKDEV(srom_major, i)); + class_destroy(srom_class); +fail_cdev: + cdev_del(&srom_cdev); +fail_chrdev: + unregister_chrdev_region(dev, srom_devs); +fail_mem: + kfree(srom_devices); + return result; +} + +/** srom_cleanup() - Clean up the driver's module. */ +static void srom_cleanup(void) +{ + int i; + for (i = 0; i < srom_devs; i++) + device_destroy(srom_class, MKDEV(srom_major, i)); + class_destroy(srom_class); + cdev_del(&srom_cdev); + unregister_chrdev_region(MKDEV(srom_major, 0), srom_devs); + kfree(srom_devices); +} + +module_init(srom_init); +module_exit(srom_cleanup); diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c index 7beb0e25f1e1..caf8012ef47c 100644 --- a/drivers/char/tpm/tpm.c +++ b/drivers/char/tpm/tpm.c @@ -534,6 +534,7 @@ void tpm_get_timeouts(struct tpm_chip *chip) struct duration_t *duration_cap; ssize_t rc; u32 timeout; + unsigned int scale = 1; tpm_cmd.header.in = tpm_getcap_header; tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP; @@ -545,24 +546,30 @@ void tpm_get_timeouts(struct tpm_chip *chip) if (rc) goto duration; - if (be32_to_cpu(tpm_cmd.header.out.length) - != 4 * sizeof(u32)) - goto duration; + if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 || + be32_to_cpu(tpm_cmd.header.out.length) + != sizeof(tpm_cmd.header.out) + sizeof(u32) + 4 * sizeof(u32)) + return; timeout_cap = &tpm_cmd.params.getcap_out.cap.timeout; /* Don't overwrite default if value is 0 */ timeout = be32_to_cpu(timeout_cap->a); + if (timeout && timeout < 1000) { + /* timeouts in msec rather usec */ + scale = 1000; + chip->vendor.timeout_adjusted = true; + } if (timeout) - chip->vendor.timeout_a = usecs_to_jiffies(timeout); + chip->vendor.timeout_a = usecs_to_jiffies(timeout * scale); timeout = be32_to_cpu(timeout_cap->b); if (timeout) - chip->vendor.timeout_b = usecs_to_jiffies(timeout); + chip->vendor.timeout_b = usecs_to_jiffies(timeout * scale); timeout = be32_to_cpu(timeout_cap->c); if (timeout) - chip->vendor.timeout_c = usecs_to_jiffies(timeout); + chip->vendor.timeout_c = usecs_to_jiffies(timeout * scale); timeout = be32_to_cpu(timeout_cap->d); if (timeout) - chip->vendor.timeout_d = usecs_to_jiffies(timeout); + chip->vendor.timeout_d = usecs_to_jiffies(timeout * scale); duration: tpm_cmd.header.in = tpm_getcap_header; @@ -575,23 +582,31 @@ duration: if (rc) return; - if (be32_to_cpu(tpm_cmd.header.out.return_code) - != 3 * sizeof(u32)) + if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 || + be32_to_cpu(tpm_cmd.header.out.length) + != sizeof(tpm_cmd.header.out) + sizeof(u32) + 3 * sizeof(u32)) return; + duration_cap = &tpm_cmd.params.getcap_out.cap.duration; chip->vendor.duration[TPM_SHORT] = usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short)); + chip->vendor.duration[TPM_MEDIUM] = + usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_medium)); + chip->vendor.duration[TPM_LONG] = + usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_long)); + /* The Broadcom BCM0102 chipset in a Dell Latitude D820 gets the above * value wrong and apparently reports msecs rather than usecs. So we * fix up the resulting too-small TPM_SHORT value to make things work. + * We also scale the TPM_MEDIUM and -_LONG values by 1000. */ - if (chip->vendor.duration[TPM_SHORT] < (HZ/100)) + if (chip->vendor.duration[TPM_SHORT] < (HZ / 100)) { chip->vendor.duration[TPM_SHORT] = HZ; - - chip->vendor.duration[TPM_MEDIUM] = - usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_medium)); - chip->vendor.duration[TPM_LONG] = - usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_long)); + chip->vendor.duration[TPM_MEDIUM] *= 1000; + chip->vendor.duration[TPM_LONG] *= 1000; + chip->vendor.duration_adjusted = true; + dev_info(chip->dev, "Adjusting TPM timeout parameters."); + } } EXPORT_SYMBOL_GPL(tpm_get_timeouts); @@ -600,7 +615,7 @@ void tpm_continue_selftest(struct tpm_chip *chip) u8 data[] = { 0, 193, /* TPM_TAG_RQU_COMMAND */ 0, 0, 0, 10, /* length */ - 0, 0, 0, 83, /* TPM_ORD_GetCapability */ + 0, 0, 0, 83, /* TPM_ORD_ContinueSelfTest */ }; tpm_transmit(chip, data, sizeof(data)); @@ -863,18 +878,24 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr, data = tpm_cmd.params.readpubek_out_buffer; str += sprintf(str, - "Algorithm: %02X %02X %02X %02X\nEncscheme: %02X %02X\n" - "Sigscheme: %02X %02X\nParameters: %02X %02X %02X %02X" - " %02X %02X %02X %02X %02X %02X %02X %02X\n" - "Modulus length: %d\nModulus: \n", - data[10], data[11], data[12], data[13], data[14], - data[15], data[16], data[17], data[22], data[23], - data[24], data[25], data[26], data[27], data[28], - data[29], data[30], data[31], data[32], data[33], - be32_to_cpu(*((__be32 *) (data + 34)))); + "Algorithm: %02X %02X %02X %02X\n" + "Encscheme: %02X %02X\n" + "Sigscheme: %02X %02X\n" + "Parameters: %02X %02X %02X %02X " + "%02X %02X %02X %02X " + "%02X %02X %02X %02X\n" + "Modulus length: %d\n" + "Modulus:\n", + data[0], data[1], data[2], data[3], + data[4], data[5], + data[6], data[7], + data[12], data[13], data[14], data[15], + data[16], data[17], data[18], data[19], + data[20], data[21], data[22], data[23], + be32_to_cpu(*((__be32 *) (data + 24)))); for (i = 0; i < 256; i++) { - str += sprintf(str, "%02X ", data[i + 38]); + str += sprintf(str, "%02X ", data[i + 28]); if ((i + 1) % 16 == 0) str += sprintf(str, "\n"); } @@ -937,6 +958,35 @@ ssize_t tpm_show_caps_1_2(struct device * dev, } EXPORT_SYMBOL_GPL(tpm_show_caps_1_2); +ssize_t tpm_show_durations(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct tpm_chip *chip = dev_get_drvdata(dev); + + return sprintf(buf, "%d %d %d [%s]\n", + jiffies_to_usecs(chip->vendor.duration[TPM_SHORT]), + jiffies_to_usecs(chip->vendor.duration[TPM_MEDIUM]), + jiffies_to_usecs(chip->vendor.duration[TPM_LONG]), + chip->vendor.duration_adjusted + ? "adjusted" : "original"); +} +EXPORT_SYMBOL_GPL(tpm_show_durations); + +ssize_t tpm_show_timeouts(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct tpm_chip *chip = dev_get_drvdata(dev); + + return sprintf(buf, "%d %d %d %d [%s]\n", + jiffies_to_usecs(chip->vendor.timeout_a), + jiffies_to_usecs(chip->vendor.timeout_b), + jiffies_to_usecs(chip->vendor.timeout_c), + jiffies_to_usecs(chip->vendor.timeout_d), + chip->vendor.timeout_adjusted + ? "adjusted" : "original"); +} +EXPORT_SYMBOL_GPL(tpm_show_timeouts); + ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index 72ddb031b69a..9c4163cfa3ce 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -56,6 +56,10 @@ extern ssize_t tpm_show_owned(struct device *, struct device_attribute *attr, char *); extern ssize_t tpm_show_temp_deactivated(struct device *, struct device_attribute *attr, char *); +extern ssize_t tpm_show_durations(struct device *, + struct device_attribute *attr, char *); +extern ssize_t tpm_show_timeouts(struct device *, + struct device_attribute *attr, char *); struct tpm_chip; @@ -67,6 +71,7 @@ struct tpm_vendor_specific { unsigned long base; /* TPM base address */ int irq; + int probed_irq; int region_size; int have_region; @@ -81,7 +86,9 @@ struct tpm_vendor_specific { struct list_head list; int locality; unsigned long timeout_a, timeout_b, timeout_c, timeout_d; /* jiffies */ + bool timeout_adjusted; unsigned long duration[3]; /* jiffies */ + bool duration_adjusted; wait_queue_head_t read_queue; wait_queue_head_t int_queue; diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c index a605cb7dd898..82facc9104c7 100644 --- a/drivers/char/tpm/tpm_nsc.c +++ b/drivers/char/tpm/tpm_nsc.c @@ -330,12 +330,12 @@ static int __init init_nsc(void) pdev->dev.driver = &nsc_drv.driver; pdev->dev.release = tpm_nsc_remove; - if ((rc = platform_device_register(pdev)) < 0) - goto err_free_dev; + if ((rc = platform_device_add(pdev)) < 0) + goto err_put_dev; if (request_region(base, 2, "tpm_nsc0") == NULL ) { rc = -EBUSY; - goto err_unreg_dev; + goto err_del_dev; } if (!(chip = tpm_register_hardware(&pdev->dev, &tpm_nsc))) { @@ -382,10 +382,10 @@ static int __init init_nsc(void) err_rel_reg: release_region(base, 2); -err_unreg_dev: - platform_device_unregister(pdev); -err_free_dev: - kfree(pdev); +err_del_dev: + platform_device_del(pdev); +err_put_dev: + platform_device_put(pdev); err_unreg_drv: platform_driver_unregister(&nsc_drv); return rc; diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index dd21df55689d..3f4051a7c5a7 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c @@ -26,6 +26,7 @@ #include <linux/interrupt.h> #include <linux/wait.h> #include <linux/acpi.h> +#include <linux/freezer.h> #include "tpm.h" #define TPM_HEADER_SIZE 10 @@ -79,7 +80,7 @@ enum tis_defaults { static LIST_HEAD(tis_chips); static DEFINE_SPINLOCK(tis_lock); -#ifdef CONFIG_ACPI +#if defined(CONFIG_PNP) && defined(CONFIG_ACPI) static int is_itpm(struct pnp_dev *dev) { struct acpi_device *acpi = pnp_acpi_device(dev); @@ -93,7 +94,7 @@ static int is_itpm(struct pnp_dev *dev) return 0; } #else -static int is_itpm(struct pnp_dev *dev) +static inline int is_itpm(struct pnp_dev *dev) { return 0; } @@ -120,7 +121,7 @@ static void release_locality(struct tpm_chip *chip, int l, int force) static int request_locality(struct tpm_chip *chip, int l) { - unsigned long stop; + unsigned long stop, timeout; long rc; if (check_locality(chip, l) >= 0) @@ -129,17 +130,25 @@ static int request_locality(struct tpm_chip *chip, int l) iowrite8(TPM_ACCESS_REQUEST_USE, chip->vendor.iobase + TPM_ACCESS(l)); + stop = jiffies + chip->vendor.timeout_a; + if (chip->vendor.irq) { +again: + timeout = stop - jiffies; + if ((long)timeout <= 0) + return -1; rc = wait_event_interruptible_timeout(chip->vendor.int_queue, (check_locality (chip, l) >= 0), - chip->vendor.timeout_a); + timeout); if (rc > 0) return l; - + if (rc == -ERESTARTSYS && freezing(current)) { + clear_thread_flag(TIF_SIGPENDING); + goto again; + } } else { /* wait for burstcount */ - stop = jiffies + chip->vendor.timeout_a; do { if (check_locality(chip, l) >= 0) return l; @@ -196,15 +205,24 @@ static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout, if ((status & mask) == mask) return 0; + stop = jiffies + timeout; + if (chip->vendor.irq) { +again: + timeout = stop - jiffies; + if ((long)timeout <= 0) + return -ETIME; rc = wait_event_interruptible_timeout(*queue, ((tpm_tis_status (chip) & mask) == mask), timeout); if (rc > 0) return 0; + if (rc == -ERESTARTSYS && freezing(current)) { + clear_thread_flag(TIF_SIGPENDING); + goto again; + } } else { - stop = jiffies + timeout; do { msleep(TPM_TIMEOUT); status = tpm_tis_status(chip); @@ -288,11 +306,10 @@ MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)"); * tpm.c can skip polling for the data to be available as the interrupt is * waited for here */ -static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) +static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len) { int rc, status, burstcnt; size_t count = 0; - u32 ordinal; if (request_locality(chip, 0) < 0) return -EBUSY; @@ -327,8 +344,7 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) /* write last byte */ iowrite8(buf[count], - chip->vendor.iobase + - TPM_DATA_FIFO(chip->vendor.locality)); + chip->vendor.iobase + TPM_DATA_FIFO(chip->vendor.locality)); wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, &chip->vendor.int_queue); status = tpm_tis_status(chip); @@ -337,6 +353,28 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) goto out_err; } + return 0; + +out_err: + tpm_tis_ready(chip); + release_locality(chip, chip->vendor.locality, 0); + return rc; +} + +/* + * If interrupts are used (signaled by an irq set in the vendor structure) + * tpm.c can skip polling for the data to be available as the interrupt is + * waited for here + */ +static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) +{ + int rc; + u32 ordinal; + + rc = tpm_tis_send_data(chip, buf, len); + if (rc < 0) + return rc; + /* go and do it */ iowrite8(TPM_STS_GO, chip->vendor.iobase + TPM_STS(chip->vendor.locality)); @@ -358,6 +396,47 @@ out_err: return rc; } +/* + * Early probing for iTPM with STS_DATA_EXPECT flaw. + * Try sending command without itpm flag set and if that + * fails, repeat with itpm flag set. + */ +static int probe_itpm(struct tpm_chip *chip) +{ + int rc = 0; + u8 cmd_getticks[] = { + 0x00, 0xc1, 0x00, 0x00, 0x00, 0x0a, + 0x00, 0x00, 0x00, 0xf1 + }; + size_t len = sizeof(cmd_getticks); + int rem_itpm = itpm; + + itpm = 0; + + rc = tpm_tis_send_data(chip, cmd_getticks, len); + if (rc == 0) + goto out; + + tpm_tis_ready(chip); + release_locality(chip, chip->vendor.locality, 0); + + itpm = 1; + + rc = tpm_tis_send_data(chip, cmd_getticks, len); + if (rc == 0) { + dev_info(chip->dev, "Detected an iTPM.\n"); + rc = 1; + } else + rc = -EFAULT; + +out: + itpm = rem_itpm; + tpm_tis_ready(chip); + release_locality(chip, chip->vendor.locality, 0); + + return rc; +} + static const struct file_operations tis_ops = { .owner = THIS_MODULE, .llseek = no_llseek, @@ -376,6 +455,8 @@ static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL); static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL); static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); +static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL); +static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL); static struct attribute *tis_attrs[] = { &dev_attr_pubek.attr, @@ -385,7 +466,9 @@ static struct attribute *tis_attrs[] = { &dev_attr_owned.attr, &dev_attr_temp_deactivated.attr, &dev_attr_caps.attr, - &dev_attr_cancel.attr, NULL, + &dev_attr_cancel.attr, + &dev_attr_durations.attr, + &dev_attr_timeouts.attr, NULL, }; static struct attribute_group tis_attr_grp = { @@ -416,7 +499,7 @@ static irqreturn_t tis_int_probe(int irq, void *dev_id) if (interrupt == 0) return IRQ_NONE; - chip->vendor.irq = irq; + chip->vendor.probed_irq = irq; /* Clear interrupts handled with TPM_EOI */ iowrite32(interrupt, @@ -464,7 +547,7 @@ static int tpm_tis_init(struct device *dev, resource_size_t start, resource_size_t len, unsigned int irq) { u32 vendor, intfcaps, intmask; - int rc, i; + int rc, i, irq_s, irq_e; struct tpm_chip *chip; if (!(chip = tpm_register_hardware(dev, &tpm_tis))) @@ -493,6 +576,14 @@ static int tpm_tis_init(struct device *dev, resource_size_t start, "1.2 TPM (device-id 0x%X, rev-id %d)\n", vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0))); + if (!itpm) { + itpm = probe_itpm(chip); + if (itpm < 0) { + rc = -ENODEV; + goto out_err; + } + } + if (itpm) dev_info(dev, "Intel iTPM workaround enabled\n"); @@ -522,6 +613,9 @@ static int tpm_tis_init(struct device *dev, resource_size_t start, if (intfcaps & TPM_INTF_DATA_AVAIL_INT) dev_dbg(dev, "\tData Avail Int Support\n"); + /* get the timeouts before testing for irqs */ + tpm_get_timeouts(chip); + /* INTERRUPT Setup */ init_waitqueue_head(&chip->vendor.read_queue); init_waitqueue_head(&chip->vendor.int_queue); @@ -540,13 +634,19 @@ static int tpm_tis_init(struct device *dev, resource_size_t start, if (interrupts) chip->vendor.irq = irq; if (interrupts && !chip->vendor.irq) { - chip->vendor.irq = + irq_s = ioread8(chip->vendor.iobase + TPM_INT_VECTOR(chip->vendor.locality)); + if (irq_s) { + irq_e = irq_s; + } else { + irq_s = 3; + irq_e = 15; + } - for (i = 3; i < 16 && chip->vendor.irq == 0; i++) { + for (i = irq_s; i <= irq_e && chip->vendor.irq == 0; i++) { iowrite8(i, chip->vendor.iobase + - TPM_INT_VECTOR(chip->vendor.locality)); + TPM_INT_VECTOR(chip->vendor.locality)); if (request_irq (i, tis_int_probe, IRQF_SHARED, chip->vendor.miscdev.name, chip) != 0) { @@ -568,9 +668,22 @@ static int tpm_tis_init(struct device *dev, resource_size_t start, chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality)); + chip->vendor.probed_irq = 0; + /* Generate Interrupts */ tpm_gen_interrupt(chip); + chip->vendor.irq = chip->vendor.probed_irq; + + /* free_irq will call into tis_int_probe; + clear all irqs we haven't seen while doing + tpm_gen_interrupt */ + iowrite32(ioread32 + (chip->vendor.iobase + + TPM_INT_STATUS(chip->vendor.locality)), + chip->vendor.iobase + + TPM_INT_STATUS(chip->vendor.locality)); + /* Turn off */ iowrite32(intmask, chip->vendor.iobase + @@ -609,7 +722,6 @@ static int tpm_tis_init(struct device *dev, resource_size_t start, list_add(&chip->vendor.list, &tis_chips); spin_unlock(&tis_lock); - tpm_get_timeouts(chip); tpm_continue_selftest(chip); return 0; @@ -619,6 +731,29 @@ out_err: tpm_remove_hardware(chip->dev); return rc; } + +static void tpm_tis_reenable_interrupts(struct tpm_chip *chip) +{ + u32 intmask; + + /* reenable interrupts that device may have lost or + BIOS/firmware may have disabled */ + iowrite8(chip->vendor.irq, chip->vendor.iobase + + TPM_INT_VECTOR(chip->vendor.locality)); + + intmask = + ioread32(chip->vendor.iobase + + TPM_INT_ENABLE(chip->vendor.locality)); + + intmask |= TPM_INTF_CMD_READY_INT + | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT + | TPM_INTF_STS_VALID_INT | TPM_GLOBAL_INT_ENABLE; + + iowrite32(intmask, + chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality)); +} + + #ifdef CONFIG_PNP static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev, const struct pnp_device_id *pnp_id) @@ -650,6 +785,9 @@ static int tpm_tis_pnp_resume(struct pnp_dev *dev) struct tpm_chip *chip = pnp_get_drvdata(dev); int ret; + if (chip->vendor.irq) + tpm_tis_reenable_interrupts(chip); + ret = tpm_pm_resume(&dev->dev); if (!ret) tpm_continue_selftest(chip); @@ -702,6 +840,11 @@ static int tpm_tis_suspend(struct platform_device *dev, pm_message_t msg) static int tpm_tis_resume(struct platform_device *dev) { + struct tpm_chip *chip = dev_get_drvdata(&dev->dev); + + if (chip->vendor.irq) + tpm_tis_reenable_interrupts(chip); + return tpm_pm_resume(&dev->dev); } static struct platform_driver tis_drv = { |