summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-05-21 15:29:01 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2024-05-21 15:29:01 -0700
commit4865a27c66fda6a32511ec5492f4bbec437f512d (patch)
treedae4d82641bb45bc97735799dd7f0aac88b34778 /include
parentb6394d6f715919c053c1450ef0d7c5e517b53764 (diff)
parent5671dca241b9a2f4ecf88d8e992041cfb580e0a5 (diff)
Merge tag 'bitmap-for-6.10v2' of https://github.com/norov/linux
Pull bitmap updates from Yury Norov: - topology_span_sane() optimization from Kyle Meyer - fns() rework from Kuan-Wei Chiu (used in cpumask_local_spread() and other places) - headers cleanup from Andy - add a MAINTAINERS record for bitops API * tag 'bitmap-for-6.10v2' of https://github.com/norov/linux: usercopy: Don't use "proxy" headers bitops: Move aligned_byte_mask() to wordpart.h MAINTAINERS: add BITOPS API record bitmap: relax find_nth_bit() limitation on return value lib: make test_bitops compilable into the kernel image bitops: Optimize fns() for improved performance lib/test_bitops: Add benchmark test for fns() Compiler Attributes: Add __always_used macro sched/topology: Optimize topology_span_sane() cpumask: Add for_each_cpu_from()
Diffstat (limited to 'include')
-rw-r--r--include/linux/bitops.h19
-rw-r--r--include/linux/compiler_attributes.h13
-rw-r--r--include/linux/cpumask.h10
-rw-r--r--include/linux/find.h2
-rw-r--r--include/linux/wordpart.h7
5 files changed, 34 insertions, 17 deletions
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 3112ae7d6524..46d4bdc634c0 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -8,13 +8,6 @@
#include <uapi/linux/kernel.h>
-/* Set bits in the first 'n' bytes when loaded from memory */
-#ifdef __LITTLE_ENDIAN
-# define aligned_byte_mask(n) ((1UL << 8*(n))-1)
-#else
-# define aligned_byte_mask(n) (~0xffUL << (BITS_PER_LONG - 8 - 8*(n)))
-#endif
-
#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
#define BITS_TO_LONGS(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
#define BITS_TO_U64(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u64))
@@ -257,16 +250,10 @@ static inline unsigned int __ffs64(u64 word)
*/
static inline unsigned int fns(unsigned long word, unsigned int n)
{
- unsigned int bit;
-
- while (word) {
- bit = __ffs(word);
- if (n-- == 0)
- return bit;
- __clear_bit(bit, &word);
- }
+ while (word && n--)
+ word &= word - 1;
- return BITS_PER_LONG;
+ return word ? __ffs(word) : BITS_PER_LONG;
}
/**
diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h
index 8bdf6e0918c1..32284cd26d52 100644
--- a/include/linux/compiler_attributes.h
+++ b/include/linux/compiler_attributes.h
@@ -362,6 +362,19 @@
#define __used __attribute__((__used__))
/*
+ * The __used attribute guarantees that the attributed variable will be
+ * always emitted by a compiler. It doesn't prevent the compiler from
+ * throwing 'unused' warnings when it can't detect how the variable is
+ * actually used. It's a compiler implementation details either emit
+ * the warning in that case or not.
+ *
+ * The combination of both 'used' and 'unused' attributes ensures that
+ * the variable would be emitted, and will not trigger 'unused' warnings.
+ * The attribute is applicable for functions, static and global variables.
+ */
+#define __always_used __used __maybe_unused
+
+/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-warn_005funused_005fresult-function-attribute
* clang: https://clang.llvm.org/docs/AttributeReference.html#nodiscard-warn-unused-result
*/
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 7600e877908f..4d3bef644add 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -386,6 +386,16 @@ unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int sta
for_each_or_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), small_cpumask_bits)
/**
+ * for_each_cpu_from - iterate over CPUs present in @mask, from @cpu to the end of @mask.
+ * @cpu: the (optionally unsigned) integer iterator
+ * @mask: the cpumask pointer
+ *
+ * After the loop, cpu is >= nr_cpu_ids.
+ */
+#define for_each_cpu_from(cpu, mask) \
+ for_each_set_bit_from(cpu, cpumask_bits(mask), small_cpumask_bits)
+
+/**
* cpumask_any_but - return a "random" in a cpumask, but not this one.
* @mask: the cpumask to search
* @cpu: the cpu to ignore.
diff --git a/include/linux/find.h b/include/linux/find.h
index 28ec5a03393a..5dfca4225fef 100644
--- a/include/linux/find.h
+++ b/include/linux/find.h
@@ -222,7 +222,7 @@ unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
* idx = find_first_bit(addr, size);
*
* Returns the bit number of the N'th set bit.
- * If no such, returns @size.
+ * If no such, returns >= @size.
*/
static inline
unsigned long find_nth_bit(const unsigned long *addr, unsigned long size, unsigned long n)
diff --git a/include/linux/wordpart.h b/include/linux/wordpart.h
index f6f8f83b15b0..4ca1ba66d2f0 100644
--- a/include/linux/wordpart.h
+++ b/include/linux/wordpart.h
@@ -39,4 +39,11 @@
*/
#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
+/* Set bits in the first 'n' bytes when loaded from memory */
+#ifdef __LITTLE_ENDIAN
+# define aligned_byte_mask(n) ((1UL << 8*(n))-1)
+#else
+# define aligned_byte_mask(n) (~0xffUL << (BITS_PER_LONG - 8 - 8*(n)))
+#endif
+
#endif // _LINUX_WORDPART_H