From 6865dc3ae93b9acb336ca48bd7b2db3446d89370 Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Mon, 20 Jul 2020 13:49:15 -0700 Subject: x86/percpu: Introduce size abstraction macros In preparation for cleaning up the percpu operations, define macros for abstraction based on the width of the operation. Signed-off-by: Brian Gerst Signed-off-by: Nick Desaulniers Signed-off-by: Thomas Gleixner Tested-by: Nick Desaulniers Tested-by: Sedat Dilek Reviewed-by: Nick Desaulniers Acked-by: Linus Torvalds Acked-by: Peter Zijlstra (Intel) Acked-by: Dennis Zhou Link: https://lkml.kernel.org/r/20200720204925.3654302-2-ndesaulniers@google.com --- arch/x86/include/asm/percpu.h | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 2278797c769d..19838e4f7a8f 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -87,6 +87,36 @@ * don't give an lvalue though). */ extern void __bad_percpu_size(void); +#define __pcpu_type_1 u8 +#define __pcpu_type_2 u16 +#define __pcpu_type_4 u32 +#define __pcpu_type_8 u64 + +#define __pcpu_cast_1(val) ((u8)(((unsigned long) val) & 0xff)) +#define __pcpu_cast_2(val) ((u16)(((unsigned long) val) & 0xffff)) +#define __pcpu_cast_4(val) ((u32)(((unsigned long) val) & 0xffffffff)) +#define __pcpu_cast_8(val) ((u64)(val)) + +#define __pcpu_op1_1(op, dst) op "b " dst +#define __pcpu_op1_2(op, dst) op "w " dst +#define __pcpu_op1_4(op, dst) op "l " dst +#define __pcpu_op1_8(op, dst) op "q " dst + +#define __pcpu_op2_1(op, src, dst) op "b " src ", " dst +#define __pcpu_op2_2(op, src, dst) op "w " src ", " dst +#define __pcpu_op2_4(op, src, dst) op "l " src ", " dst +#define __pcpu_op2_8(op, src, dst) op "q " src ", " dst + +#define __pcpu_reg_1(mod, x) mod "q" (x) +#define __pcpu_reg_2(mod, x) mod "r" (x) +#define __pcpu_reg_4(mod, x) mod "r" (x) +#define __pcpu_reg_8(mod, x) mod "r" (x) + +#define __pcpu_reg_imm_1(x) "qi" (x) +#define __pcpu_reg_imm_2(x) "ri" (x) +#define __pcpu_reg_imm_4(x) "ri" (x) +#define __pcpu_reg_imm_8(x) "re" (x) + #define percpu_to_op(qual, op, var, val) \ do { \ typedef typeof(var) pto_T__; \ -- cgit v1.2.3-58-ga151 From c175acc14719e69ecec4dafbb642a7f38c76c064 Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Mon, 20 Jul 2020 13:49:16 -0700 Subject: x86/percpu: Clean up percpu_to_op() The core percpu macros already have a switch on the data size, so the switch in the x86 code is redundant and produces more dead code. Also use appropriate types for the width of the instructions. This avoids errors when compiling with Clang. Signed-off-by: Brian Gerst Signed-off-by: Nick Desaulniers Signed-off-by: Thomas Gleixner Tested-by: Nick Desaulniers Tested-by: Sedat Dilek Reviewed-by: Nick Desaulniers Acked-by: Linus Torvalds Acked-by: Peter Zijlstra (Intel) Acked-by: Dennis Zhou Link: https://lkml.kernel.org/r/20200720204925.3654302-3-ndesaulniers@google.com --- arch/x86/include/asm/percpu.h | 90 +++++++++++++++++-------------------------- 1 file changed, 35 insertions(+), 55 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 19838e4f7a8f..fb280fba94c5 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -117,37 +117,17 @@ extern void __bad_percpu_size(void); #define __pcpu_reg_imm_4(x) "ri" (x) #define __pcpu_reg_imm_8(x) "re" (x) -#define percpu_to_op(qual, op, var, val) \ -do { \ - typedef typeof(var) pto_T__; \ - if (0) { \ - pto_T__ pto_tmp__; \ - pto_tmp__ = (val); \ - (void)pto_tmp__; \ - } \ - switch (sizeof(var)) { \ - case 1: \ - asm qual (op "b %1,"__percpu_arg(0) \ - : "+m" (var) \ - : "qi" ((pto_T__)(val))); \ - break; \ - case 2: \ - asm qual (op "w %1,"__percpu_arg(0) \ - : "+m" (var) \ - : "ri" ((pto_T__)(val))); \ - break; \ - case 4: \ - asm qual (op "l %1,"__percpu_arg(0) \ - : "+m" (var) \ - : "ri" ((pto_T__)(val))); \ - break; \ - case 8: \ - asm qual (op "q %1,"__percpu_arg(0) \ - : "+m" (var) \ - : "re" ((pto_T__)(val))); \ - break; \ - default: __bad_percpu_size(); \ - } \ +#define percpu_to_op(size, qual, op, _var, _val) \ +do { \ + __pcpu_type_##size pto_val__ = __pcpu_cast_##size(_val); \ + if (0) { \ + typeof(_var) pto_tmp__; \ + pto_tmp__ = (_val); \ + (void)pto_tmp__; \ + } \ + asm qual(__pcpu_op2_##size(op, "%[val]", __percpu_arg([var])) \ + : [var] "+m" (_var) \ + : [val] __pcpu_reg_imm_##size(pto_val__)); \ } while (0) /* @@ -425,18 +405,18 @@ do { \ #define raw_cpu_read_2(pcp) percpu_from_op(, "mov", pcp) #define raw_cpu_read_4(pcp) percpu_from_op(, "mov", pcp) -#define raw_cpu_write_1(pcp, val) percpu_to_op(, "mov", (pcp), val) -#define raw_cpu_write_2(pcp, val) percpu_to_op(, "mov", (pcp), val) -#define raw_cpu_write_4(pcp, val) percpu_to_op(, "mov", (pcp), val) +#define raw_cpu_write_1(pcp, val) percpu_to_op(1, , "mov", (pcp), val) +#define raw_cpu_write_2(pcp, val) percpu_to_op(2, , "mov", (pcp), val) +#define raw_cpu_write_4(pcp, val) percpu_to_op(4, , "mov", (pcp), val) #define raw_cpu_add_1(pcp, val) percpu_add_op(, (pcp), val) #define raw_cpu_add_2(pcp, val) percpu_add_op(, (pcp), val) #define raw_cpu_add_4(pcp, val) percpu_add_op(, (pcp), val) -#define raw_cpu_and_1(pcp, val) percpu_to_op(, "and", (pcp), val) -#define raw_cpu_and_2(pcp, val) percpu_to_op(, "and", (pcp), val) -#define raw_cpu_and_4(pcp, val) percpu_to_op(, "and", (pcp), val) -#define raw_cpu_or_1(pcp, val) percpu_to_op(, "or", (pcp), val) -#define raw_cpu_or_2(pcp, val) percpu_to_op(, "or", (pcp), val) -#define raw_cpu_or_4(pcp, val) percpu_to_op(, "or", (pcp), val) +#define raw_cpu_and_1(pcp, val) percpu_to_op(1, , "and", (pcp), val) +#define raw_cpu_and_2(pcp, val) percpu_to_op(2, , "and", (pcp), val) +#define raw_cpu_and_4(pcp, val) percpu_to_op(4, , "and", (pcp), val) +#define raw_cpu_or_1(pcp, val) percpu_to_op(1, , "or", (pcp), val) +#define raw_cpu_or_2(pcp, val) percpu_to_op(2, , "or", (pcp), val) +#define raw_cpu_or_4(pcp, val) percpu_to_op(4, , "or", (pcp), val) /* * raw_cpu_xchg() can use a load-store since it is not required to be @@ -456,18 +436,18 @@ do { \ #define this_cpu_read_1(pcp) percpu_from_op(volatile, "mov", pcp) #define this_cpu_read_2(pcp) percpu_from_op(volatile, "mov", pcp) #define this_cpu_read_4(pcp) percpu_from_op(volatile, "mov", pcp) -#define this_cpu_write_1(pcp, val) percpu_to_op(volatile, "mov", (pcp), val) -#define this_cpu_write_2(pcp, val) percpu_to_op(volatile, "mov", (pcp), val) -#define this_cpu_write_4(pcp, val) percpu_to_op(volatile, "mov", (pcp), val) +#define this_cpu_write_1(pcp, val) percpu_to_op(1, volatile, "mov", (pcp), val) +#define this_cpu_write_2(pcp, val) percpu_to_op(2, volatile, "mov", (pcp), val) +#define this_cpu_write_4(pcp, val) percpu_to_op(4, volatile, "mov", (pcp), val) #define this_cpu_add_1(pcp, val) percpu_add_op(volatile, (pcp), val) #define this_cpu_add_2(pcp, val) percpu_add_op(volatile, (pcp), val) #define this_cpu_add_4(pcp, val) percpu_add_op(volatile, (pcp), val) -#define this_cpu_and_1(pcp, val) percpu_to_op(volatile, "and", (pcp), val) -#define this_cpu_and_2(pcp, val) percpu_to_op(volatile, "and", (pcp), val) -#define this_cpu_and_4(pcp, val) percpu_to_op(volatile, "and", (pcp), val) -#define this_cpu_or_1(pcp, val) percpu_to_op(volatile, "or", (pcp), val) -#define this_cpu_or_2(pcp, val) percpu_to_op(volatile, "or", (pcp), val) -#define this_cpu_or_4(pcp, val) percpu_to_op(volatile, "or", (pcp), val) +#define this_cpu_and_1(pcp, val) percpu_to_op(1, volatile, "and", (pcp), val) +#define this_cpu_and_2(pcp, val) percpu_to_op(2, volatile, "and", (pcp), val) +#define this_cpu_and_4(pcp, val) percpu_to_op(4, volatile, "and", (pcp), val) +#define this_cpu_or_1(pcp, val) percpu_to_op(1, volatile, "or", (pcp), val) +#define this_cpu_or_2(pcp, val) percpu_to_op(2, volatile, "or", (pcp), val) +#define this_cpu_or_4(pcp, val) percpu_to_op(4, volatile, "or", (pcp), val) #define this_cpu_xchg_1(pcp, nval) percpu_xchg_op(volatile, pcp, nval) #define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(volatile, pcp, nval) #define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(volatile, pcp, nval) @@ -509,19 +489,19 @@ do { \ */ #ifdef CONFIG_X86_64 #define raw_cpu_read_8(pcp) percpu_from_op(, "mov", pcp) -#define raw_cpu_write_8(pcp, val) percpu_to_op(, "mov", (pcp), val) +#define raw_cpu_write_8(pcp, val) percpu_to_op(8, , "mov", (pcp), val) #define raw_cpu_add_8(pcp, val) percpu_add_op(, (pcp), val) -#define raw_cpu_and_8(pcp, val) percpu_to_op(, "and", (pcp), val) -#define raw_cpu_or_8(pcp, val) percpu_to_op(, "or", (pcp), val) +#define raw_cpu_and_8(pcp, val) percpu_to_op(8, , "and", (pcp), val) +#define raw_cpu_or_8(pcp, val) percpu_to_op(8, , "or", (pcp), val) #define raw_cpu_add_return_8(pcp, val) percpu_add_return_op(, pcp, val) #define raw_cpu_xchg_8(pcp, nval) raw_percpu_xchg_op(pcp, nval) #define raw_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(, pcp, oval, nval) #define this_cpu_read_8(pcp) percpu_from_op(volatile, "mov", pcp) -#define this_cpu_write_8(pcp, val) percpu_to_op(volatile, "mov", (pcp), val) +#define this_cpu_write_8(pcp, val) percpu_to_op(8, volatile, "mov", (pcp), val) #define this_cpu_add_8(pcp, val) percpu_add_op(volatile, (pcp), val) -#define this_cpu_and_8(pcp, val) percpu_to_op(volatile, "and", (pcp), val) -#define this_cpu_or_8(pcp, val) percpu_to_op(volatile, "or", (pcp), val) +#define this_cpu_and_8(pcp, val) percpu_to_op(8, volatile, "and", (pcp), val) +#define this_cpu_or_8(pcp, val) percpu_to_op(8, volatile, "or", (pcp), val) #define this_cpu_add_return_8(pcp, val) percpu_add_return_op(volatile, pcp, val) #define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(volatile, pcp, nval) #define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(volatile, pcp, oval, nval) -- cgit v1.2.3-58-ga151 From bb631e3002840706362a7d76e3ebb3604cce91a7 Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Mon, 20 Jul 2020 13:49:17 -0700 Subject: x86/percpu: Clean up percpu_from_op() The core percpu macros already have a switch on the data size, so the switch in the x86 code is redundant and produces more dead code. Also use appropriate types for the width of the instructions. This avoids errors when compiling with Clang. Signed-off-by: Brian Gerst Signed-off-by: Nick Desaulniers Signed-off-by: Thomas Gleixner Tested-by: Nick Desaulniers Tested-by: Sedat Dilek Reviewed-by: Nick Desaulniers Acked-by: Linus Torvalds Acked-by: Peter Zijlstra (Intel) Acked-by: Dennis Zhou Link: https://lkml.kernel.org/r/20200720204925.3654302-4-ndesaulniers@google.com --- arch/x86/include/asm/percpu.h | 50 +++++++++++++------------------------------ 1 file changed, 15 insertions(+), 35 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index fb280fba94c5..a40d2e055f58 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -190,33 +190,13 @@ do { \ } \ } while (0) -#define percpu_from_op(qual, op, var) \ -({ \ - typeof(var) pfo_ret__; \ - switch (sizeof(var)) { \ - case 1: \ - asm qual (op "b "__percpu_arg(1)",%0" \ - : "=q" (pfo_ret__) \ - : "m" (var)); \ - break; \ - case 2: \ - asm qual (op "w "__percpu_arg(1)",%0" \ - : "=r" (pfo_ret__) \ - : "m" (var)); \ - break; \ - case 4: \ - asm qual (op "l "__percpu_arg(1)",%0" \ - : "=r" (pfo_ret__) \ - : "m" (var)); \ - break; \ - case 8: \ - asm qual (op "q "__percpu_arg(1)",%0" \ - : "=r" (pfo_ret__) \ - : "m" (var)); \ - break; \ - default: __bad_percpu_size(); \ - } \ - pfo_ret__; \ +#define percpu_from_op(size, qual, op, _var) \ +({ \ + __pcpu_type_##size pfo_val__; \ + asm qual (__pcpu_op2_##size(op, __percpu_arg([var]), "%[val]") \ + : [val] __pcpu_reg_##size("=", pfo_val__) \ + : [var] "m" (_var)); \ + (typeof(_var))(unsigned long) pfo_val__; \ }) #define percpu_stable_op(op, var) \ @@ -401,9 +381,9 @@ do { \ */ #define this_cpu_read_stable(var) percpu_stable_op("mov", var) -#define raw_cpu_read_1(pcp) percpu_from_op(, "mov", pcp) -#define raw_cpu_read_2(pcp) percpu_from_op(, "mov", pcp) -#define raw_cpu_read_4(pcp) percpu_from_op(, "mov", pcp) +#define raw_cpu_read_1(pcp) percpu_from_op(1, , "mov", pcp) +#define raw_cpu_read_2(pcp) percpu_from_op(2, , "mov", pcp) +#define raw_cpu_read_4(pcp) percpu_from_op(4, , "mov", pcp) #define raw_cpu_write_1(pcp, val) percpu_to_op(1, , "mov", (pcp), val) #define raw_cpu_write_2(pcp, val) percpu_to_op(2, , "mov", (pcp), val) @@ -433,9 +413,9 @@ do { \ #define raw_cpu_xchg_2(pcp, val) raw_percpu_xchg_op(pcp, val) #define raw_cpu_xchg_4(pcp, val) raw_percpu_xchg_op(pcp, val) -#define this_cpu_read_1(pcp) percpu_from_op(volatile, "mov", pcp) -#define this_cpu_read_2(pcp) percpu_from_op(volatile, "mov", pcp) -#define this_cpu_read_4(pcp) percpu_from_op(volatile, "mov", pcp) +#define this_cpu_read_1(pcp) percpu_from_op(1, volatile, "mov", pcp) +#define this_cpu_read_2(pcp) percpu_from_op(2, volatile, "mov", pcp) +#define this_cpu_read_4(pcp) percpu_from_op(4, volatile, "mov", pcp) #define this_cpu_write_1(pcp, val) percpu_to_op(1, volatile, "mov", (pcp), val) #define this_cpu_write_2(pcp, val) percpu_to_op(2, volatile, "mov", (pcp), val) #define this_cpu_write_4(pcp, val) percpu_to_op(4, volatile, "mov", (pcp), val) @@ -488,7 +468,7 @@ do { \ * 32 bit must fall back to generic operations. */ #ifdef CONFIG_X86_64 -#define raw_cpu_read_8(pcp) percpu_from_op(, "mov", pcp) +#define raw_cpu_read_8(pcp) percpu_from_op(8, , "mov", pcp) #define raw_cpu_write_8(pcp, val) percpu_to_op(8, , "mov", (pcp), val) #define raw_cpu_add_8(pcp, val) percpu_add_op(, (pcp), val) #define raw_cpu_and_8(pcp, val) percpu_to_op(8, , "and", (pcp), val) @@ -497,7 +477,7 @@ do { \ #define raw_cpu_xchg_8(pcp, nval) raw_percpu_xchg_op(pcp, nval) #define raw_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(, pcp, oval, nval) -#define this_cpu_read_8(pcp) percpu_from_op(volatile, "mov", pcp) +#define this_cpu_read_8(pcp) percpu_from_op(8, volatile, "mov", pcp) #define this_cpu_write_8(pcp, val) percpu_to_op(8, volatile, "mov", (pcp), val) #define this_cpu_add_8(pcp, val) percpu_add_op(volatile, (pcp), val) #define this_cpu_and_8(pcp, val) percpu_to_op(8, volatile, "and", (pcp), val) -- cgit v1.2.3-58-ga151 From 33e5614a435ff8047d768e6501454ae1cc7f131f Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Mon, 20 Jul 2020 13:49:18 -0700 Subject: x86/percpu: Clean up percpu_add_op() The core percpu macros already have a switch on the data size, so the switch in the x86 code is redundant and produces more dead code. Also use appropriate types for the width of the instructions. This avoids errors when compiling with Clang. Signed-off-by: Brian Gerst Signed-off-by: Nick Desaulniers Signed-off-by: Thomas Gleixner Tested-by: Nick Desaulniers Tested-by: Sedat Dilek Reviewed-by: Nick Desaulniers Acked-by: Linus Torvalds Acked-by: Peter Zijlstra (Intel) Acked-by: Dennis Zhou Link: https://lkml.kernel.org/r/20200720204925.3654302-5-ndesaulniers@google.com --- arch/x86/include/asm/percpu.h | 99 ++++++++++--------------------------------- 1 file changed, 22 insertions(+), 77 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index a40d2e055f58..2a24f3c795eb 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -130,64 +130,32 @@ do { \ : [val] __pcpu_reg_imm_##size(pto_val__)); \ } while (0) +#define percpu_unary_op(size, qual, op, _var) \ +({ \ + asm qual (__pcpu_op1_##size(op, __percpu_arg([var])) \ + : [var] "+m" (_var)); \ +}) + /* * Generate a percpu add to memory instruction and optimize code * if one is added or subtracted. */ -#define percpu_add_op(qual, var, val) \ +#define percpu_add_op(size, qual, var, val) \ do { \ - typedef typeof(var) pao_T__; \ const int pao_ID__ = (__builtin_constant_p(val) && \ ((val) == 1 || (val) == -1)) ? \ (int)(val) : 0; \ if (0) { \ - pao_T__ pao_tmp__; \ + typeof(var) pao_tmp__; \ pao_tmp__ = (val); \ (void)pao_tmp__; \ } \ - switch (sizeof(var)) { \ - case 1: \ - if (pao_ID__ == 1) \ - asm qual ("incb "__percpu_arg(0) : "+m" (var)); \ - else if (pao_ID__ == -1) \ - asm qual ("decb "__percpu_arg(0) : "+m" (var)); \ - else \ - asm qual ("addb %1, "__percpu_arg(0) \ - : "+m" (var) \ - : "qi" ((pao_T__)(val))); \ - break; \ - case 2: \ - if (pao_ID__ == 1) \ - asm qual ("incw "__percpu_arg(0) : "+m" (var)); \ - else if (pao_ID__ == -1) \ - asm qual ("decw "__percpu_arg(0) : "+m" (var)); \ - else \ - asm qual ("addw %1, "__percpu_arg(0) \ - : "+m" (var) \ - : "ri" ((pao_T__)(val))); \ - break; \ - case 4: \ - if (pao_ID__ == 1) \ - asm qual ("incl "__percpu_arg(0) : "+m" (var)); \ - else if (pao_ID__ == -1) \ - asm qual ("decl "__percpu_arg(0) : "+m" (var)); \ - else \ - asm qual ("addl %1, "__percpu_arg(0) \ - : "+m" (var) \ - : "ri" ((pao_T__)(val))); \ - break; \ - case 8: \ - if (pao_ID__ == 1) \ - asm qual ("incq "__percpu_arg(0) : "+m" (var)); \ - else if (pao_ID__ == -1) \ - asm qual ("decq "__percpu_arg(0) : "+m" (var)); \ - else \ - asm qual ("addq %1, "__percpu_arg(0) \ - : "+m" (var) \ - : "re" ((pao_T__)(val))); \ - break; \ - default: __bad_percpu_size(); \ - } \ + if (pao_ID__ == 1) \ + percpu_unary_op(size, qual, "inc", var); \ + else if (pao_ID__ == -1) \ + percpu_unary_op(size, qual, "dec", var); \ + else \ + percpu_to_op(size, qual, "add", var, val); \ } while (0) #define percpu_from_op(size, qual, op, _var) \ @@ -228,29 +196,6 @@ do { \ pfo_ret__; \ }) -#define percpu_unary_op(qual, op, var) \ -({ \ - switch (sizeof(var)) { \ - case 1: \ - asm qual (op "b "__percpu_arg(0) \ - : "+m" (var)); \ - break; \ - case 2: \ - asm qual (op "w "__percpu_arg(0) \ - : "+m" (var)); \ - break; \ - case 4: \ - asm qual (op "l "__percpu_arg(0) \ - : "+m" (var)); \ - break; \ - case 8: \ - asm qual (op "q "__percpu_arg(0) \ - : "+m" (var)); \ - break; \ - default: __bad_percpu_size(); \ - } \ -}) - /* * Add return operation */ @@ -388,9 +333,9 @@ do { \ #define raw_cpu_write_1(pcp, val) percpu_to_op(1, , "mov", (pcp), val) #define raw_cpu_write_2(pcp, val) percpu_to_op(2, , "mov", (pcp), val) #define raw_cpu_write_4(pcp, val) percpu_to_op(4, , "mov", (pcp), val) -#define raw_cpu_add_1(pcp, val) percpu_add_op(, (pcp), val) -#define raw_cpu_add_2(pcp, val) percpu_add_op(, (pcp), val) -#define raw_cpu_add_4(pcp, val) percpu_add_op(, (pcp), val) +#define raw_cpu_add_1(pcp, val) percpu_add_op(1, , (pcp), val) +#define raw_cpu_add_2(pcp, val) percpu_add_op(2, , (pcp), val) +#define raw_cpu_add_4(pcp, val) percpu_add_op(4, , (pcp), val) #define raw_cpu_and_1(pcp, val) percpu_to_op(1, , "and", (pcp), val) #define raw_cpu_and_2(pcp, val) percpu_to_op(2, , "and", (pcp), val) #define raw_cpu_and_4(pcp, val) percpu_to_op(4, , "and", (pcp), val) @@ -419,9 +364,9 @@ do { \ #define this_cpu_write_1(pcp, val) percpu_to_op(1, volatile, "mov", (pcp), val) #define this_cpu_write_2(pcp, val) percpu_to_op(2, volatile, "mov", (pcp), val) #define this_cpu_write_4(pcp, val) percpu_to_op(4, volatile, "mov", (pcp), val) -#define this_cpu_add_1(pcp, val) percpu_add_op(volatile, (pcp), val) -#define this_cpu_add_2(pcp, val) percpu_add_op(volatile, (pcp), val) -#define this_cpu_add_4(pcp, val) percpu_add_op(volatile, (pcp), val) +#define this_cpu_add_1(pcp, val) percpu_add_op(1, volatile, (pcp), val) +#define this_cpu_add_2(pcp, val) percpu_add_op(2, volatile, (pcp), val) +#define this_cpu_add_4(pcp, val) percpu_add_op(4, volatile, (pcp), val) #define this_cpu_and_1(pcp, val) percpu_to_op(1, volatile, "and", (pcp), val) #define this_cpu_and_2(pcp, val) percpu_to_op(2, volatile, "and", (pcp), val) #define this_cpu_and_4(pcp, val) percpu_to_op(4, volatile, "and", (pcp), val) @@ -470,7 +415,7 @@ do { \ #ifdef CONFIG_X86_64 #define raw_cpu_read_8(pcp) percpu_from_op(8, , "mov", pcp) #define raw_cpu_write_8(pcp, val) percpu_to_op(8, , "mov", (pcp), val) -#define raw_cpu_add_8(pcp, val) percpu_add_op(, (pcp), val) +#define raw_cpu_add_8(pcp, val) percpu_add_op(8, , (pcp), val) #define raw_cpu_and_8(pcp, val) percpu_to_op(8, , "and", (pcp), val) #define raw_cpu_or_8(pcp, val) percpu_to_op(8, , "or", (pcp), val) #define raw_cpu_add_return_8(pcp, val) percpu_add_return_op(, pcp, val) @@ -479,7 +424,7 @@ do { \ #define this_cpu_read_8(pcp) percpu_from_op(8, volatile, "mov", pcp) #define this_cpu_write_8(pcp, val) percpu_to_op(8, volatile, "mov", (pcp), val) -#define this_cpu_add_8(pcp, val) percpu_add_op(volatile, (pcp), val) +#define this_cpu_add_8(pcp, val) percpu_add_op(8, volatile, (pcp), val) #define this_cpu_and_8(pcp, val) percpu_to_op(8, volatile, "and", (pcp), val) #define this_cpu_or_8(pcp, val) percpu_to_op(8, volatile, "or", (pcp), val) #define this_cpu_add_return_8(pcp, val) percpu_add_return_op(volatile, pcp, val) -- cgit v1.2.3-58-ga151 From e4d16defbbde028aeab2026995f0ced4240df6d6 Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Mon, 20 Jul 2020 13:49:19 -0700 Subject: x86/percpu: Remove "e" constraint from XADD The "e" constraint represents a constant, but the XADD instruction doesn't accept immediate operands. Signed-off-by: Brian Gerst Signed-off-by: Nick Desaulniers Signed-off-by: Thomas Gleixner Tested-by: Nick Desaulniers Tested-by: Sedat Dilek Reviewed-by: Nick Desaulniers Acked-by: Linus Torvalds Acked-by: Peter Zijlstra (Intel) Acked-by: Dennis Zhou Link: https://lkml.kernel.org/r/20200720204925.3654302-6-ndesaulniers@google.com --- arch/x86/include/asm/percpu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 2a24f3c795eb..9bb5440d98d3 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -220,7 +220,7 @@ do { \ break; \ case 8: \ asm qual ("xaddq %0, "__percpu_arg(1) \ - : "+re" (paro_ret__), "+m" (var) \ + : "+r" (paro_ret__), "+m" (var) \ : : "memory"); \ break; \ default: __bad_percpu_size(); \ -- cgit v1.2.3-58-ga151 From bbff583b84a130d4d1234d68906c41690575be36 Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Mon, 20 Jul 2020 13:49:20 -0700 Subject: x86/percpu: Clean up percpu_add_return_op() The core percpu macros already have a switch on the data size, so the switch in the x86 code is redundant and produces more dead code. Also use appropriate types for the width of the instructions. This avoids errors when compiling with Clang. Signed-off-by: Brian Gerst Signed-off-by: Nick Desaulniers Signed-off-by: Thomas Gleixner Tested-by: Nick Desaulniers Tested-by: Sedat Dilek Reviewed-by: Nick Desaulniers Acked-by: Linus Torvalds Acked-by: Peter Zijlstra (Intel) Acked-by: Dennis Zhou Link: https://lkml.kernel.org/r/20200720204925.3654302-7-ndesaulniers@google.com --- arch/x86/include/asm/percpu.h | 51 ++++++++++++++----------------------------- 1 file changed, 16 insertions(+), 35 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 9bb5440d98d3..0776a11e7e11 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -199,34 +199,15 @@ do { \ /* * Add return operation */ -#define percpu_add_return_op(qual, var, val) \ +#define percpu_add_return_op(size, qual, _var, _val) \ ({ \ - typeof(var) paro_ret__ = val; \ - switch (sizeof(var)) { \ - case 1: \ - asm qual ("xaddb %0, "__percpu_arg(1) \ - : "+q" (paro_ret__), "+m" (var) \ - : : "memory"); \ - break; \ - case 2: \ - asm qual ("xaddw %0, "__percpu_arg(1) \ - : "+r" (paro_ret__), "+m" (var) \ - : : "memory"); \ - break; \ - case 4: \ - asm qual ("xaddl %0, "__percpu_arg(1) \ - : "+r" (paro_ret__), "+m" (var) \ - : : "memory"); \ - break; \ - case 8: \ - asm qual ("xaddq %0, "__percpu_arg(1) \ - : "+r" (paro_ret__), "+m" (var) \ - : : "memory"); \ - break; \ - default: __bad_percpu_size(); \ - } \ - paro_ret__ += val; \ - paro_ret__; \ + __pcpu_type_##size paro_tmp__ = __pcpu_cast_##size(_val); \ + asm qual (__pcpu_op2_##size("xadd", "%[tmp]", \ + __percpu_arg([var])) \ + : [tmp] __pcpu_reg_##size("+", paro_tmp__), \ + [var] "+m" (_var) \ + : : "memory"); \ + (typeof(_var))(unsigned long) (paro_tmp__ + _val); \ }) /* @@ -377,16 +358,16 @@ do { \ #define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(volatile, pcp, nval) #define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(volatile, pcp, nval) -#define raw_cpu_add_return_1(pcp, val) percpu_add_return_op(, pcp, val) -#define raw_cpu_add_return_2(pcp, val) percpu_add_return_op(, pcp, val) -#define raw_cpu_add_return_4(pcp, val) percpu_add_return_op(, pcp, val) +#define raw_cpu_add_return_1(pcp, val) percpu_add_return_op(1, , pcp, val) +#define raw_cpu_add_return_2(pcp, val) percpu_add_return_op(2, , pcp, val) +#define raw_cpu_add_return_4(pcp, val) percpu_add_return_op(4, , pcp, val) #define raw_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(, pcp, oval, nval) #define raw_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(, pcp, oval, nval) #define raw_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(, pcp, oval, nval) -#define this_cpu_add_return_1(pcp, val) percpu_add_return_op(volatile, pcp, val) -#define this_cpu_add_return_2(pcp, val) percpu_add_return_op(volatile, pcp, val) -#define this_cpu_add_return_4(pcp, val) percpu_add_return_op(volatile, pcp, val) +#define this_cpu_add_return_1(pcp, val) percpu_add_return_op(1, volatile, pcp, val) +#define this_cpu_add_return_2(pcp, val) percpu_add_return_op(2, volatile, pcp, val) +#define this_cpu_add_return_4(pcp, val) percpu_add_return_op(4, volatile, pcp, val) #define this_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(volatile, pcp, oval, nval) #define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(volatile, pcp, oval, nval) #define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(volatile, pcp, oval, nval) @@ -418,7 +399,7 @@ do { \ #define raw_cpu_add_8(pcp, val) percpu_add_op(8, , (pcp), val) #define raw_cpu_and_8(pcp, val) percpu_to_op(8, , "and", (pcp), val) #define raw_cpu_or_8(pcp, val) percpu_to_op(8, , "or", (pcp), val) -#define raw_cpu_add_return_8(pcp, val) percpu_add_return_op(, pcp, val) +#define raw_cpu_add_return_8(pcp, val) percpu_add_return_op(8, , pcp, val) #define raw_cpu_xchg_8(pcp, nval) raw_percpu_xchg_op(pcp, nval) #define raw_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(, pcp, oval, nval) @@ -427,7 +408,7 @@ do { \ #define this_cpu_add_8(pcp, val) percpu_add_op(8, volatile, (pcp), val) #define this_cpu_and_8(pcp, val) percpu_to_op(8, volatile, "and", (pcp), val) #define this_cpu_or_8(pcp, val) percpu_to_op(8, volatile, "or", (pcp), val) -#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(volatile, pcp, val) +#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(8, volatile, pcp, val) #define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(volatile, pcp, nval) #define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(volatile, pcp, oval, nval) -- cgit v1.2.3-58-ga151 From 73ca542fbabb68deaa90130a8153cab1fa8288fe Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Mon, 20 Jul 2020 13:49:21 -0700 Subject: x86/percpu: Clean up percpu_xchg_op() The core percpu macros already have a switch on the data size, so the switch in the x86 code is redundant and produces more dead code. Also use appropriate types for the width of the instructions. This avoids errors when compiling with Clang. Signed-off-by: Brian Gerst Signed-off-by: Nick Desaulniers Signed-off-by: Thomas Gleixner Tested-by: Nick Desaulniers Tested-by: Sedat Dilek Reviewed-by: Nick Desaulniers Acked-by: Linus Torvalds Acked-by: Peter Zijlstra (Intel) Acked-by: Dennis Zhou Link: https://lkml.kernel.org/r/20200720204925.3654302-8-ndesaulniers@google.com --- arch/x86/include/asm/percpu.h | 61 +++++++++++++------------------------------ 1 file changed, 18 insertions(+), 43 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 0776a11e7e11..ac6d7e76c0d4 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -215,46 +215,21 @@ do { \ * expensive due to the implied lock prefix. The processor cannot prefetch * cachelines if xchg is used. */ -#define percpu_xchg_op(qual, var, nval) \ +#define percpu_xchg_op(size, qual, _var, _nval) \ ({ \ - typeof(var) pxo_ret__; \ - typeof(var) pxo_new__ = (nval); \ - switch (sizeof(var)) { \ - case 1: \ - asm qual ("\n\tmov "__percpu_arg(1)",%%al" \ - "\n1:\tcmpxchgb %2, "__percpu_arg(1) \ - "\n\tjnz 1b" \ - : "=&a" (pxo_ret__), "+m" (var) \ - : "q" (pxo_new__) \ - : "memory"); \ - break; \ - case 2: \ - asm qual ("\n\tmov "__percpu_arg(1)",%%ax" \ - "\n1:\tcmpxchgw %2, "__percpu_arg(1) \ - "\n\tjnz 1b" \ - : "=&a" (pxo_ret__), "+m" (var) \ - : "r" (pxo_new__) \ - : "memory"); \ - break; \ - case 4: \ - asm qual ("\n\tmov "__percpu_arg(1)",%%eax" \ - "\n1:\tcmpxchgl %2, "__percpu_arg(1) \ - "\n\tjnz 1b" \ - : "=&a" (pxo_ret__), "+m" (var) \ - : "r" (pxo_new__) \ - : "memory"); \ - break; \ - case 8: \ - asm qual ("\n\tmov "__percpu_arg(1)",%%rax" \ - "\n1:\tcmpxchgq %2, "__percpu_arg(1) \ - "\n\tjnz 1b" \ - : "=&a" (pxo_ret__), "+m" (var) \ - : "r" (pxo_new__) \ - : "memory"); \ - break; \ - default: __bad_percpu_size(); \ - } \ - pxo_ret__; \ + __pcpu_type_##size pxo_old__; \ + __pcpu_type_##size pxo_new__ = __pcpu_cast_##size(_nval); \ + asm qual (__pcpu_op2_##size("mov", __percpu_arg([var]), \ + "%[oval]") \ + "\n1:\t" \ + __pcpu_op2_##size("cmpxchg", "%[nval]", \ + __percpu_arg([var])) \ + "\n\tjnz 1b" \ + : [oval] "=&a" (pxo_old__), \ + [var] "+m" (_var) \ + : [nval] __pcpu_reg_##size(, pxo_new__) \ + : "memory"); \ + (typeof(_var))(unsigned long) pxo_old__; \ }) /* @@ -354,9 +329,9 @@ do { \ #define this_cpu_or_1(pcp, val) percpu_to_op(1, volatile, "or", (pcp), val) #define this_cpu_or_2(pcp, val) percpu_to_op(2, volatile, "or", (pcp), val) #define this_cpu_or_4(pcp, val) percpu_to_op(4, volatile, "or", (pcp), val) -#define this_cpu_xchg_1(pcp, nval) percpu_xchg_op(volatile, pcp, nval) -#define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(volatile, pcp, nval) -#define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(volatile, pcp, nval) +#define this_cpu_xchg_1(pcp, nval) percpu_xchg_op(1, volatile, pcp, nval) +#define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(2, volatile, pcp, nval) +#define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(4, volatile, pcp, nval) #define raw_cpu_add_return_1(pcp, val) percpu_add_return_op(1, , pcp, val) #define raw_cpu_add_return_2(pcp, val) percpu_add_return_op(2, , pcp, val) @@ -409,7 +384,7 @@ do { \ #define this_cpu_and_8(pcp, val) percpu_to_op(8, volatile, "and", (pcp), val) #define this_cpu_or_8(pcp, val) percpu_to_op(8, volatile, "or", (pcp), val) #define this_cpu_add_return_8(pcp, val) percpu_add_return_op(8, volatile, pcp, val) -#define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(volatile, pcp, nval) +#define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(8, volatile, pcp, nval) #define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(volatile, pcp, oval, nval) /* -- cgit v1.2.3-58-ga151 From ebcd580bed4a357ea894e6878d9099b3919f727f Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Mon, 20 Jul 2020 13:49:22 -0700 Subject: x86/percpu: Clean up percpu_cmpxchg_op() The core percpu macros already have a switch on the data size, so the switch in the x86 code is redundant and produces more dead code. Also use appropriate types for the width of the instructions. This avoids errors when compiling with Clang. Signed-off-by: Brian Gerst Signed-off-by: Nick Desaulniers Signed-off-by: Thomas Gleixner Tested-by: Nick Desaulniers Tested-by: Sedat Dilek Reviewed-by: Nick Desaulniers Acked-by: Linus Torvalds Acked-by: Peter Zijlstra (Intel) Acked-by: Dennis Zhou Link: https://lkml.kernel.org/r/20200720204925.3654302-9-ndesaulniers@google.com --- arch/x86/include/asm/percpu.h | 58 ++++++++++++++----------------------------- 1 file changed, 18 insertions(+), 40 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index ac6d7e76c0d4..7efc0b5c4ff0 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -236,39 +236,17 @@ do { \ * cmpxchg has no such implied lock semantics as a result it is much * more efficient for cpu local operations. */ -#define percpu_cmpxchg_op(qual, var, oval, nval) \ +#define percpu_cmpxchg_op(size, qual, _var, _oval, _nval) \ ({ \ - typeof(var) pco_ret__; \ - typeof(var) pco_old__ = (oval); \ - typeof(var) pco_new__ = (nval); \ - switch (sizeof(var)) { \ - case 1: \ - asm qual ("cmpxchgb %2, "__percpu_arg(1) \ - : "=a" (pco_ret__), "+m" (var) \ - : "q" (pco_new__), "0" (pco_old__) \ - : "memory"); \ - break; \ - case 2: \ - asm qual ("cmpxchgw %2, "__percpu_arg(1) \ - : "=a" (pco_ret__), "+m" (var) \ - : "r" (pco_new__), "0" (pco_old__) \ - : "memory"); \ - break; \ - case 4: \ - asm qual ("cmpxchgl %2, "__percpu_arg(1) \ - : "=a" (pco_ret__), "+m" (var) \ - : "r" (pco_new__), "0" (pco_old__) \ - : "memory"); \ - break; \ - case 8: \ - asm qual ("cmpxchgq %2, "__percpu_arg(1) \ - : "=a" (pco_ret__), "+m" (var) \ - : "r" (pco_new__), "0" (pco_old__) \ - : "memory"); \ - break; \ - default: __bad_percpu_size(); \ - } \ - pco_ret__; \ + __pcpu_type_##size pco_old__ = __pcpu_cast_##size(_oval); \ + __pcpu_type_##size pco_new__ = __pcpu_cast_##size(_nval); \ + asm qual (__pcpu_op2_##size("cmpxchg", "%[nval]", \ + __percpu_arg([var])) \ + : [oval] "+a" (pco_old__), \ + [var] "+m" (_var) \ + : [nval] __pcpu_reg_##size(, pco_new__) \ + : "memory"); \ + (typeof(_var))(unsigned long) pco_old__; \ }) /* @@ -336,16 +314,16 @@ do { \ #define raw_cpu_add_return_1(pcp, val) percpu_add_return_op(1, , pcp, val) #define raw_cpu_add_return_2(pcp, val) percpu_add_return_op(2, , pcp, val) #define raw_cpu_add_return_4(pcp, val) percpu_add_return_op(4, , pcp, val) -#define raw_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(, pcp, oval, nval) -#define raw_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(, pcp, oval, nval) -#define raw_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(, pcp, oval, nval) +#define raw_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(1, , pcp, oval, nval) +#define raw_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(2, , pcp, oval, nval) +#define raw_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(4, , pcp, oval, nval) #define this_cpu_add_return_1(pcp, val) percpu_add_return_op(1, volatile, pcp, val) #define this_cpu_add_return_2(pcp, val) percpu_add_return_op(2, volatile, pcp, val) #define this_cpu_add_return_4(pcp, val) percpu_add_return_op(4, volatile, pcp, val) -#define this_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(volatile, pcp, oval, nval) -#define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(volatile, pcp, oval, nval) -#define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(volatile, pcp, oval, nval) +#define this_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(1, volatile, pcp, oval, nval) +#define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(2, volatile, pcp, oval, nval) +#define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(4, volatile, pcp, oval, nval) #ifdef CONFIG_X86_CMPXCHG64 #define percpu_cmpxchg8b_double(pcp1, pcp2, o1, o2, n1, n2) \ @@ -376,7 +354,7 @@ do { \ #define raw_cpu_or_8(pcp, val) percpu_to_op(8, , "or", (pcp), val) #define raw_cpu_add_return_8(pcp, val) percpu_add_return_op(8, , pcp, val) #define raw_cpu_xchg_8(pcp, nval) raw_percpu_xchg_op(pcp, nval) -#define raw_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(, pcp, oval, nval) +#define raw_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(8, , pcp, oval, nval) #define this_cpu_read_8(pcp) percpu_from_op(8, volatile, "mov", pcp) #define this_cpu_write_8(pcp, val) percpu_to_op(8, volatile, "mov", (pcp), val) @@ -385,7 +363,7 @@ do { \ #define this_cpu_or_8(pcp, val) percpu_to_op(8, volatile, "or", (pcp), val) #define this_cpu_add_return_8(pcp, val) percpu_add_return_op(8, volatile, pcp, val) #define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(8, volatile, pcp, nval) -#define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(volatile, pcp, oval, nval) +#define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(8, volatile, pcp, oval, nval) /* * Pretty complex macro to generate cmpxchg16 instruction. The instruction -- cgit v1.2.3-58-ga151 From c94055fe93c8d00bfa23fa2cb9af080f7fc53aa0 Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Mon, 20 Jul 2020 13:49:23 -0700 Subject: x86/percpu: Clean up percpu_stable_op() Use __pcpu_size_call_return() to simplify this_cpu_read_stable(). Also remove __bad_percpu_size() which is now unused. Signed-off-by: Brian Gerst Signed-off-by: Nick Desaulniers Signed-off-by: Thomas Gleixner Tested-by: Nick Desaulniers Tested-by: Sedat Dilek Reviewed-by: Nick Desaulniers Acked-by: Linus Torvalds Acked-by: Peter Zijlstra (Intel) Acked-by: Dennis Zhou Link: https://lkml.kernel.org/r/20200720204925.3654302-10-ndesaulniers@google.com --- arch/x86/include/asm/percpu.h | 41 ++++++++++++----------------------------- 1 file changed, 12 insertions(+), 29 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 7efc0b5c4ff0..cf2b9c2a241e 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -85,7 +85,6 @@ /* For arch-specific code, we can use direct single-insn ops (they * don't give an lvalue though). */ -extern void __bad_percpu_size(void); #define __pcpu_type_1 u8 #define __pcpu_type_2 u16 @@ -167,33 +166,13 @@ do { \ (typeof(_var))(unsigned long) pfo_val__; \ }) -#define percpu_stable_op(op, var) \ -({ \ - typeof(var) pfo_ret__; \ - switch (sizeof(var)) { \ - case 1: \ - asm(op "b "__percpu_arg(P1)",%0" \ - : "=q" (pfo_ret__) \ - : "p" (&(var))); \ - break; \ - case 2: \ - asm(op "w "__percpu_arg(P1)",%0" \ - : "=r" (pfo_ret__) \ - : "p" (&(var))); \ - break; \ - case 4: \ - asm(op "l "__percpu_arg(P1)",%0" \ - : "=r" (pfo_ret__) \ - : "p" (&(var))); \ - break; \ - case 8: \ - asm(op "q "__percpu_arg(P1)",%0" \ - : "=r" (pfo_ret__) \ - : "p" (&(var))); \ - break; \ - default: __bad_percpu_size(); \ - } \ - pfo_ret__; \ +#define percpu_stable_op(size, op, _var) \ +({ \ + __pcpu_type_##size pfo_val__; \ + asm(__pcpu_op2_##size(op, __percpu_arg(P[var]), "%[val]") \ + : [val] __pcpu_reg_##size("=", pfo_val__) \ + : [var] "p" (&(_var))); \ + (typeof(_var))(unsigned long) pfo_val__; \ }) /* @@ -258,7 +237,11 @@ do { \ * per-thread variables implemented as per-cpu variables and thus * stable for the duration of the respective task. */ -#define this_cpu_read_stable(var) percpu_stable_op("mov", var) +#define this_cpu_read_stable_1(pcp) percpu_stable_op(1, "mov", pcp) +#define this_cpu_read_stable_2(pcp) percpu_stable_op(2, "mov", pcp) +#define this_cpu_read_stable_4(pcp) percpu_stable_op(4, "mov", pcp) +#define this_cpu_read_stable_8(pcp) percpu_stable_op(8, "mov", pcp) +#define this_cpu_read_stable(pcp) __pcpu_size_call_return(this_cpu_read_stable_, pcp) #define raw_cpu_read_1(pcp) percpu_from_op(1, , "mov", pcp) #define raw_cpu_read_2(pcp) percpu_from_op(2, , "mov", pcp) -- cgit v1.2.3-58-ga151 From 4719ffecbb0659faf1fd39f4b8eb2674f0042890 Mon Sep 17 00:00:00 2001 From: Brian Gerst Date: Mon, 20 Jul 2020 13:49:24 -0700 Subject: x86/percpu: Remove unused PER_CPU() macro Also remove now unused __percpu_mov_op. Signed-off-by: Brian Gerst Signed-off-by: Nick Desaulniers Signed-off-by: Thomas Gleixner Tested-by: Nick Desaulniers Tested-by: Sedat Dilek Reviewed-by: Nick Desaulniers Acked-by: Peter Zijlstra (Intel) Acked-by: Linus Torvalds Acked-by: Dennis Zhou Link: https://lkml.kernel.org/r/20200720204925.3654302-11-ndesaulniers@google.com --- arch/x86/include/asm/percpu.h | 18 ------------------ 1 file changed, 18 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index cf2b9c2a241e..a3c33b79fb86 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -4,33 +4,15 @@ #ifdef CONFIG_X86_64 #define __percpu_seg gs -#define __percpu_mov_op movq #else #define __percpu_seg fs -#define __percpu_mov_op movl #endif #ifdef __ASSEMBLY__ -/* - * PER_CPU finds an address of a per-cpu variable. - * - * Args: - * var - variable name - * reg - 32bit register - * - * The resulting address is stored in the "reg" argument. - * - * Example: - * PER_CPU(cpu_gdt_descr, %ebx) - */ #ifdef CONFIG_SMP -#define PER_CPU(var, reg) \ - __percpu_mov_op %__percpu_seg:this_cpu_off, reg; \ - lea var(reg), reg #define PER_CPU_VAR(var) %__percpu_seg:var #else /* ! SMP */ -#define PER_CPU(var, reg) __percpu_mov_op $var, reg #define PER_CPU_VAR(var) var #endif /* SMP */ -- cgit v1.2.3-58-ga151 From 158807de5822d1079e162a3762956fd743dd483e Mon Sep 17 00:00:00 2001 From: Nick Desaulniers Date: Mon, 20 Jul 2020 13:49:25 -0700 Subject: x86/uaccess: Make __get_user_size() Clang compliant on 32-bit Clang fails to compile __get_user_size() on 32-bit for the following code: long long val; __get_user(val, usrptr); with: error: invalid output size for constraint '=q' GCC compiles the same code without complaints. The reason is that GCC and Clang are architecturally different, which leads to subtle issues for code that's invalid but clearly dead, i.e. with code that emulates polymorphism with the preprocessor and sizeof. GCC will perform semantic analysis after early inlining and dead code elimination, so it will not warn on invalid code that's dead. Clang strictly performs optimizations after semantic analysis, so it will warn for dead code. Neither Clang nor GCC like this very much with -m32: long long ret; asm ("movb $5, %0" : "=q" (ret)); However, GCC can tolerate this variant: long long ret; switch (sizeof(ret)) { case 1: asm ("movb $5, %0" : "=q" (ret)); break; case 8:; } Clang, on the other hand, won't accept that because it validates the inline asm for the '1' case before the optimisation phase where it realises that it wouldn't have to emit it anyway. If LLVM (Clang's "back end") fails such as during instruction selection or register allocation, it cannot provide accurate diagnostics (warnings / errors) that contain line information, as the AST has been discarded from memory at that point. While there have been early discussions about having C/C++ specific language optimizations in Clang via the use of MLIR, which would enable such earlier optimizations, such work is not scoped and likely a multi-year endeavor. It was discussed to change the asm output constraint for the one byte case from "=q" to "=r". While it works for 64-bit, it fails on 32-bit. With '=r' the compiler could fail to chose a register accessible as high/low which is required for the byte operation. If that happens the assembly will fail. Use a local temporary variable of type 'unsigned char' as output for the byte copy inline asm and then assign it to the real output variable. This prevents Clang from failing the semantic analysis in the above case. The resulting code for the actual one byte copy is not affected as the temporary variable is optimized out. [ tglx: Amended changelog ] Reported-by: Arnd Bergmann Reported-by: David Woodhouse Reported-by: Dmitry Golovin Reported-by: Linus Torvalds Signed-off-by: Nick Desaulniers Signed-off-by: Thomas Gleixner Tested-by: Sedat Dilek Acked-by: Linus Torvalds Acked-by: Dennis Zhou Link: https://bugs.llvm.org/show_bug.cgi?id=33587 Link: https://github.com/ClangBuiltLinux/linux/issues/3 Link: https://github.com/ClangBuiltLinux/linux/issues/194 Link: https://github.com/ClangBuiltLinux/linux/issues/781 Link: https://lore.kernel.org/lkml/20180209161833.4605-1-dwmw2@infradead.org/ Link: https://lore.kernel.org/lkml/CAK8P3a1EBaWdbAEzirFDSgHVJMtWjuNt2HGG8z+vpXeNHwETFQ@mail.gmail.com/ Link: https://lkml.kernel.org/r/20200720204925.3654302-12-ndesaulniers@google.com --- arch/x86/include/asm/uaccess.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 18dfa07d3ef0..2f3e8f2a958f 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -314,11 +314,14 @@ do { \ #define __get_user_size(x, ptr, size, retval) \ do { \ + unsigned char x_u8__; \ + \ retval = 0; \ __chk_user_ptr(ptr); \ switch (size) { \ case 1: \ - __get_user_asm(x, ptr, retval, "b", "=q"); \ + __get_user_asm(x_u8__, ptr, retval, "b", "=q"); \ + (x) = x_u8__; \ break; \ case 2: \ __get_user_asm(x, ptr, retval, "w", "=r"); \ -- cgit v1.2.3-58-ga151