diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-03-07 19:25:37 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-03-07 19:25:37 -0800 |
commit | b5dd0c658c31b469ccff1b637e5124851e7a4a1c (patch) | |
tree | dc0b31a5aa62bb4e1fa653a4f176c2faae51f9e0 /lib | |
parent | 610cd4eadec4f97acd25d3108b0e50d1362b3319 (diff) | |
parent | fe0436e10c8845aed24cad3a1c719efcd6e583eb (diff) |
Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton:
- some of the rest of MM
- various misc things
- dynamic-debug updates
- checkpatch
- some epoll speedups
- autofs
- rapidio
- lib/, lib/lzo/ updates
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (83 commits)
samples/mic/mpssd/mpssd.h: remove duplicate header
kernel/fork.c: remove duplicated include
include/linux/relay.h: fix percpu annotation in struct rchan
arch/nios2/mm/fault.c: remove duplicate include
unicore32: stop printing the virtual memory layout
MAINTAINERS: fix GTA02 entry and mark as orphan
mm: create the new vm_fault_t type
arm, s390, unicore32: remove oneliner wrappers for memblock_alloc()
arch: simplify several early memory allocations
openrisc: simplify pte_alloc_one_kernel()
sh: prefer memblock APIs returning virtual address
microblaze: prefer memblock API returning virtual address
powerpc: prefer memblock APIs returning virtual address
lib/lzo: separate lzo-rle from lzo
lib/lzo: implement run-length encoding
lib/lzo: fast 8-byte copy on arm64
lib/lzo: 64-bit CTZ on arm64
lib/lzo: tidy-up ifdefs
ipc/sem.c: replace kvmalloc/memset with kvzalloc and use struct_size
ipc: annotate implicit fall through
...
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.ubsan | 14 | ||||
-rw-r--r-- | lib/assoc_array.c | 1 | ||||
-rw-r--r-- | lib/div64.c | 4 | ||||
-rw-r--r-- | lib/dynamic_debug.c | 22 | ||||
-rw-r--r-- | lib/lzo/lzo1x_compress.c | 130 | ||||
-rw-r--r-- | lib/lzo/lzo1x_decompress_safe.c | 75 | ||||
-rw-r--r-- | lib/lzo/lzodefs.h | 21 | ||||
-rw-r--r-- | lib/test_firmware.c | 9 | ||||
-rw-r--r-- | lib/test_ubsan.c | 11 | ||||
-rw-r--r-- | lib/vsprintf.c | 5 |
10 files changed, 211 insertions, 81 deletions
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan index 98fa559ebd80..a2ae4a8e4fa6 100644 --- a/lib/Kconfig.ubsan +++ b/lib/Kconfig.ubsan @@ -27,15 +27,19 @@ config UBSAN_SANITIZE_ALL Enabling this option will get kernel image size increased significantly. -config UBSAN_ALIGNMENT - bool "Enable checking of pointers alignment" +config UBSAN_NO_ALIGNMENT + bool "Disable checking of pointers alignment" depends on UBSAN - default y if !HAVE_EFFICIENT_UNALIGNED_ACCESS + default y if HAVE_EFFICIENT_UNALIGNED_ACCESS help - This option enables detection of unaligned memory accesses. - Enabling this option on architectures that support unaligned + This option disables the check of unaligned memory accesses. + This option should be used when building allmodconfig. + Disabling this option on architectures that support unaligned accesses may produce a lot of false positives. +config UBSAN_ALIGNMENT + def_bool !UBSAN_NO_ALIGNMENT + config TEST_UBSAN tristate "Module for testing for undefined behavior detection" depends on m && UBSAN diff --git a/lib/assoc_array.c b/lib/assoc_array.c index 59875eb278ea..edc3c14af41d 100644 --- a/lib/assoc_array.c +++ b/lib/assoc_array.c @@ -1117,6 +1117,7 @@ struct assoc_array_edit *assoc_array_delete(struct assoc_array *array, index_key)) goto found_leaf; } + /* fall through */ case assoc_array_walk_tree_empty: case assoc_array_walk_found_wrong_shortcut: default: diff --git a/lib/div64.c b/lib/div64.c index 01c8602bb6ff..ee146bb4c558 100644 --- a/lib/div64.c +++ b/lib/div64.c @@ -109,7 +109,7 @@ u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder) quot = div_u64_rem(dividend, divisor, &rem32); *remainder = rem32; } else { - int n = 1 + fls(high); + int n = fls(high); quot = div_u64(dividend >> n, divisor >> n); if (quot != 0) @@ -147,7 +147,7 @@ u64 div64_u64(u64 dividend, u64 divisor) if (high == 0) { quot = div_u64(dividend, divisor); } else { - int n = 1 + fls(high); + int n = fls(high); quot = div_u64(dividend >> n, divisor >> n); if (quot != 0) diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index dbf2b457e47e..7bdf98c37e91 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -847,17 +847,19 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n, const char *name) { struct ddebug_table *dt; - const char *new_name; dt = kzalloc(sizeof(*dt), GFP_KERNEL); - if (dt == NULL) - return -ENOMEM; - new_name = kstrdup_const(name, GFP_KERNEL); - if (new_name == NULL) { - kfree(dt); + if (dt == NULL) { + pr_err("error adding module: %s\n", name); return -ENOMEM; } - dt->mod_name = new_name; + /* + * For built-in modules, name lives in .rodata and is + * immortal. For loaded modules, name points at the name[] + * member of struct module, which lives at least as long as + * this struct ddebug_table. + */ + dt->mod_name = name; dt->num_ddebugs = n; dt->ddebugs = tab; @@ -868,7 +870,6 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n, vpr_info("%u debug prints in module %s\n", n, dt->mod_name); return 0; } -EXPORT_SYMBOL_GPL(ddebug_add_module); /* helper for ddebug_dyndbg_(boot|module)_param_cb */ static int ddebug_dyndbg_param_cb(char *param, char *val, @@ -913,7 +914,6 @@ int ddebug_dyndbg_module_param_cb(char *param, char *val, const char *module) static void ddebug_table_free(struct ddebug_table *dt) { list_del_init(&dt->link); - kfree_const(dt->mod_name); kfree(dt); } @@ -930,15 +930,15 @@ int ddebug_remove_module(const char *mod_name) mutex_lock(&ddebug_lock); list_for_each_entry_safe(dt, nextdt, &ddebug_tables, link) { - if (!strcmp(dt->mod_name, mod_name)) { + if (dt->mod_name == mod_name) { ddebug_table_free(dt); ret = 0; + break; } } mutex_unlock(&ddebug_lock); return ret; } -EXPORT_SYMBOL_GPL(ddebug_remove_module); static void ddebug_remove_all_tables(void) { diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c index 236eb21167b5..4525fb094844 100644 --- a/lib/lzo/lzo1x_compress.c +++ b/lib/lzo/lzo1x_compress.c @@ -20,7 +20,8 @@ static noinline size_t lzo1x_1_do_compress(const unsigned char *in, size_t in_len, unsigned char *out, size_t *out_len, - size_t ti, void *wrkmem) + size_t ti, void *wrkmem, signed char *state_offset, + const unsigned char bitstream_version) { const unsigned char *ip; unsigned char *op; @@ -35,27 +36,85 @@ lzo1x_1_do_compress(const unsigned char *in, size_t in_len, ip += ti < 4 ? 4 - ti : 0; for (;;) { - const unsigned char *m_pos; + const unsigned char *m_pos = NULL; size_t t, m_len, m_off; u32 dv; + u32 run_length = 0; literal: ip += 1 + ((ip - ii) >> 5); next: if (unlikely(ip >= ip_end)) break; dv = get_unaligned_le32(ip); - t = ((dv * 0x1824429d) >> (32 - D_BITS)) & D_MASK; - m_pos = in + dict[t]; - dict[t] = (lzo_dict_t) (ip - in); - if (unlikely(dv != get_unaligned_le32(m_pos))) - goto literal; + + if (dv == 0 && bitstream_version) { + const unsigned char *ir = ip + 4; + const unsigned char *limit = ip_end + < (ip + MAX_ZERO_RUN_LENGTH + 1) + ? ip_end : ip + MAX_ZERO_RUN_LENGTH + 1; +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && \ + defined(LZO_FAST_64BIT_MEMORY_ACCESS) + u64 dv64; + + for (; (ir + 32) <= limit; ir += 32) { + dv64 = get_unaligned((u64 *)ir); + dv64 |= get_unaligned((u64 *)ir + 1); + dv64 |= get_unaligned((u64 *)ir + 2); + dv64 |= get_unaligned((u64 *)ir + 3); + if (dv64) + break; + } + for (; (ir + 8) <= limit; ir += 8) { + dv64 = get_unaligned((u64 *)ir); + if (dv64) { +# if defined(__LITTLE_ENDIAN) + ir += __builtin_ctzll(dv64) >> 3; +# elif defined(__BIG_ENDIAN) + ir += __builtin_clzll(dv64) >> 3; +# else +# error "missing endian definition" +# endif + break; + } + } +#else + while ((ir < (const unsigned char *) + ALIGN((uintptr_t)ir, 4)) && + (ir < limit) && (*ir == 0)) + ir++; + for (; (ir + 4) <= limit; ir += 4) { + dv = *((u32 *)ir); + if (dv) { +# if defined(__LITTLE_ENDIAN) + ir += __builtin_ctz(dv) >> 3; +# elif defined(__BIG_ENDIAN) + ir += __builtin_clz(dv) >> 3; +# else +# error "missing endian definition" +# endif + break; + } + } +#endif + while (likely(ir < limit) && unlikely(*ir == 0)) + ir++; + run_length = ir - ip; + if (run_length > MAX_ZERO_RUN_LENGTH) + run_length = MAX_ZERO_RUN_LENGTH; + } else { + t = ((dv * 0x1824429d) >> (32 - D_BITS)) & D_MASK; + m_pos = in + dict[t]; + dict[t] = (lzo_dict_t) (ip - in); + if (unlikely(dv != get_unaligned_le32(m_pos))) + goto literal; + } ii -= ti; ti = 0; t = ip - ii; if (t != 0) { if (t <= 3) { - op[-2] |= t; + op[*state_offset] |= t; COPY4(op, ii); op += t; } else if (t <= 16) { @@ -88,6 +147,17 @@ next: } } + if (unlikely(run_length)) { + ip += run_length; + run_length -= MIN_ZERO_RUN_LENGTH; + put_unaligned_le32((run_length << 21) | 0xfffc18 + | (run_length & 0x7), op); + op += 4; + run_length = 0; + *state_offset = -3; + goto finished_writing_instruction; + } + m_len = 4; { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && defined(LZO_USE_CTZ64) @@ -170,7 +240,6 @@ m_len_done: m_off = ip - m_pos; ip += m_len; - ii = ip; if (m_len <= M2_MAX_LEN && m_off <= M2_MAX_OFFSET) { m_off -= 1; *op++ = (((m_len - 1) << 5) | ((m_off & 7) << 2)); @@ -207,29 +276,45 @@ m_len_done: *op++ = (m_off << 2); *op++ = (m_off >> 6); } + *state_offset = -2; +finished_writing_instruction: + ii = ip; goto next; } *out_len = op - out; return in_end - (ii - ti); } -int lzo1x_1_compress(const unsigned char *in, size_t in_len, +int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len, unsigned char *out, size_t *out_len, - void *wrkmem) + void *wrkmem, const unsigned char bitstream_version) { const unsigned char *ip = in; unsigned char *op = out; size_t l = in_len; size_t t = 0; + signed char state_offset = -2; + unsigned int m4_max_offset; + + // LZO v0 will never write 17 as first byte, + // so this is used to version the bitstream + if (bitstream_version > 0) { + *op++ = 17; + *op++ = bitstream_version; + m4_max_offset = M4_MAX_OFFSET_V1; + } else { + m4_max_offset = M4_MAX_OFFSET_V0; + } while (l > 20) { - size_t ll = l <= (M4_MAX_OFFSET + 1) ? l : (M4_MAX_OFFSET + 1); + size_t ll = l <= (m4_max_offset + 1) ? l : (m4_max_offset + 1); uintptr_t ll_end = (uintptr_t) ip + ll; if ((ll_end + ((t + ll) >> 5)) <= ll_end) break; BUILD_BUG_ON(D_SIZE * sizeof(lzo_dict_t) > LZO1X_1_MEM_COMPRESS); memset(wrkmem, 0, D_SIZE * sizeof(lzo_dict_t)); - t = lzo1x_1_do_compress(ip, ll, op, out_len, t, wrkmem); + t = lzo1x_1_do_compress(ip, ll, op, out_len, t, wrkmem, + &state_offset, bitstream_version); ip += ll; op += *out_len; l -= ll; @@ -242,7 +327,7 @@ int lzo1x_1_compress(const unsigned char *in, size_t in_len, if (op == out && t <= 238) { *op++ = (17 + t); } else if (t <= 3) { - op[-2] |= t; + op[state_offset] |= t; } else if (t <= 18) { *op++ = (t - 3); } else { @@ -273,7 +358,24 @@ int lzo1x_1_compress(const unsigned char *in, size_t in_len, *out_len = op - out; return LZO_E_OK; } + +int lzo1x_1_compress(const unsigned char *in, size_t in_len, + unsigned char *out, size_t *out_len, + void *wrkmem) +{ + return lzogeneric1x_1_compress(in, in_len, out, out_len, wrkmem, 0); +} + +int lzorle1x_1_compress(const unsigned char *in, size_t in_len, + unsigned char *out, size_t *out_len, + void *wrkmem) +{ + return lzogeneric1x_1_compress(in, in_len, out, out_len, + wrkmem, LZO_VERSION); +} + EXPORT_SYMBOL_GPL(lzo1x_1_compress); +EXPORT_SYMBOL_GPL(lzorle1x_1_compress); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("LZO1X-1 Compressor"); diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c index a1c387f6afba..6d2600ea3b55 100644 --- a/lib/lzo/lzo1x_decompress_safe.c +++ b/lib/lzo/lzo1x_decompress_safe.c @@ -46,11 +46,23 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len, const unsigned char * const ip_end = in + in_len; unsigned char * const op_end = out + *out_len; + unsigned char bitstream_version; + op = out; ip = in; if (unlikely(in_len < 3)) goto input_overrun; + + if (likely(*ip == 17)) { + bitstream_version = ip[1]; + ip += 2; + if (unlikely(in_len < 5)) + goto input_overrun; + } else { + bitstream_version = 0; + } + if (*ip > 17) { t = *ip++ - 17; if (t < 4) { @@ -154,32 +166,49 @@ copy_literal_run: m_pos -= next >> 2; next &= 3; } else { - m_pos = op; - m_pos -= (t & 8) << 11; - t = (t & 7) + (3 - 1); - if (unlikely(t == 2)) { - size_t offset; - const unsigned char *ip_last = ip; + NEED_IP(2); + next = get_unaligned_le16(ip); + if (((next & 0xfffc) == 0xfffc) && + ((t & 0xf8) == 0x18) && + likely(bitstream_version)) { + NEED_IP(3); + t &= 7; + t |= ip[2] << 3; + t += MIN_ZERO_RUN_LENGTH; + NEED_OP(t); + memset(op, 0, t); + op += t; + next &= 3; + ip += 3; + goto match_next; + } else { + m_pos = op; + m_pos -= (t & 8) << 11; + t = (t & 7) + (3 - 1); + if (unlikely(t == 2)) { + size_t offset; + const unsigned char *ip_last = ip; - while (unlikely(*ip == 0)) { - ip++; - NEED_IP(1); - } - offset = ip - ip_last; - if (unlikely(offset > MAX_255_COUNT)) - return LZO_E_ERROR; + while (unlikely(*ip == 0)) { + ip++; + NEED_IP(1); + } + offset = ip - ip_last; + if (unlikely(offset > MAX_255_COUNT)) + return LZO_E_ERROR; - offset = (offset << 8) - offset; - t += offset + 7 + *ip++; - NEED_IP(2); + offset = (offset << 8) - offset; + t += offset + 7 + *ip++; + NEED_IP(2); + next = get_unaligned_le16(ip); + } + ip += 2; + m_pos -= next >> 2; + next &= 3; + if (m_pos == op) + goto eof_found; + m_pos -= 0x4000; } - next = get_unaligned_le16(ip); - ip += 2; - m_pos -= next >> 2; - next &= 3; - if (m_pos == op) - goto eof_found; - m_pos -= 0x4000; } TEST_LB(m_pos); #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) diff --git a/lib/lzo/lzodefs.h b/lib/lzo/lzodefs.h index 4edefd2f540c..b60851fcf6ce 100644 --- a/lib/lzo/lzodefs.h +++ b/lib/lzo/lzodefs.h @@ -13,9 +13,15 @@ */ +/* Version + * 0: original lzo version + * 1: lzo with support for RLE + */ +#define LZO_VERSION 1 + #define COPY4(dst, src) \ put_unaligned(get_unaligned((const u32 *)(src)), (u32 *)(dst)) -#if defined(__x86_64__) +#if defined(CONFIG_X86_64) || defined(CONFIG_ARM64) #define COPY8(dst, src) \ put_unaligned(get_unaligned((const u64 *)(src)), (u64 *)(dst)) #else @@ -25,19 +31,21 @@ #if defined(__BIG_ENDIAN) && defined(__LITTLE_ENDIAN) #error "conflicting endian definitions" -#elif defined(__x86_64__) +#elif defined(CONFIG_X86_64) || defined(CONFIG_ARM64) #define LZO_USE_CTZ64 1 #define LZO_USE_CTZ32 1 -#elif defined(__i386__) || defined(__powerpc__) +#define LZO_FAST_64BIT_MEMORY_ACCESS +#elif defined(CONFIG_X86) || defined(CONFIG_PPC) #define LZO_USE_CTZ32 1 -#elif defined(__arm__) && (__LINUX_ARM_ARCH__ >= 5) +#elif defined(CONFIG_ARM) && (__LINUX_ARM_ARCH__ >= 5) #define LZO_USE_CTZ32 1 #endif #define M1_MAX_OFFSET 0x0400 #define M2_MAX_OFFSET 0x0800 #define M3_MAX_OFFSET 0x4000 -#define M4_MAX_OFFSET 0xbfff +#define M4_MAX_OFFSET_V0 0xbfff +#define M4_MAX_OFFSET_V1 0xbffe #define M1_MIN_LEN 2 #define M1_MAX_LEN 2 @@ -53,6 +61,9 @@ #define M3_MARKER 32 #define M4_MARKER 16 +#define MIN_ZERO_RUN_LENGTH 4 +#define MAX_ZERO_RUN_LENGTH (2047 + MIN_ZERO_RUN_LENGTH) + #define lzo_dict_t unsigned short #define D_BITS 13 #define D_SIZE (1u << D_BITS) diff --git a/lib/test_firmware.c b/lib/test_firmware.c index 7cab9a9869ac..7222093ee00b 100644 --- a/lib/test_firmware.c +++ b/lib/test_firmware.c @@ -631,11 +631,6 @@ static ssize_t trigger_batched_requests_store(struct device *dev, for (i = 0; i < test_fw_config->num_requests; i++) { req = &test_fw_config->reqs[i]; - if (!req) { - WARN_ON(1); - rc = -ENOMEM; - goto out_bail; - } req->fw = NULL; req->idx = i; req->name = test_fw_config->name; @@ -737,10 +732,6 @@ ssize_t trigger_batched_requests_async_store(struct device *dev, for (i = 0; i < test_fw_config->num_requests; i++) { req = &test_fw_config->reqs[i]; - if (!req) { - WARN_ON(1); - goto out_bail; - } req->name = test_fw_config->name; req->fw = NULL; req->idx = i; diff --git a/lib/test_ubsan.c b/lib/test_ubsan.c index 280f4979d00e..9ea10adf7a66 100644 --- a/lib/test_ubsan.c +++ b/lib/test_ubsan.c @@ -42,14 +42,6 @@ static void test_ubsan_divrem_overflow(void) val /= val2; } -static void test_ubsan_vla_bound_not_positive(void) -{ - volatile int size = -1; - char buf[size]; - - (void)buf; -} - static void test_ubsan_shift_out_of_bounds(void) { volatile int val = -1; @@ -61,7 +53,7 @@ static void test_ubsan_shift_out_of_bounds(void) static void test_ubsan_out_of_bounds(void) { volatile int i = 4, j = 5; - volatile int arr[i]; + volatile int arr[4]; arr[j] = i; } @@ -113,7 +105,6 @@ static const test_ubsan_fp test_ubsan_array[] = { test_ubsan_mul_overflow, test_ubsan_negate_overflow, test_ubsan_divrem_overflow, - test_ubsan_vla_bound_not_positive, test_ubsan_shift_out_of_bounds, test_ubsan_out_of_bounds, test_ubsan_load_invalid_value, diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 3add92329bae..30b00de4f321 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -17,6 +17,7 @@ */ #include <stdarg.h> +#include <linux/build_bug.h> #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/module.h> /* for KSYM_SYMBOL_LEN */ @@ -405,6 +406,8 @@ struct printf_spec { unsigned int base:8; /* number base, 8, 10 or 16 only */ signed int precision:16; /* # of digits/chars */ } __packed; +static_assert(sizeof(struct printf_spec) == 8); + #define FIELD_WIDTH_MAX ((1 << 23) - 1) #define PRECISION_MAX ((1 << 15) - 1) @@ -422,8 +425,6 @@ char *number(char *buf, char *end, unsigned long long num, int field_width = spec.field_width; int precision = spec.precision; - BUILD_BUG_ON(sizeof(struct printf_spec) != 8); - /* locase = 0 or 0x20. ORing digits or letters with 'locase' * produces same digits or (maybe lowercased) letters */ locase = (spec.flags & SMALL); |