diff options
-rw-r--r-- | include/linux/bitmap.h | 8 | ||||
-rw-r--r-- | lib/percpu-refcount.c | 7 | ||||
-rw-r--r-- | mm/percpu-stats.c | 2 | ||||
-rw-r--r-- | mm/percpu.c | 2 |
4 files changed, 13 insertions, 6 deletions
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index e52ceb1a73d3..99058eb81042 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -50,7 +50,13 @@ * bitmap_set(dst, pos, nbits) Set specified bit area * bitmap_clear(dst, pos, nbits) Clear specified bit area * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area - * bitmap_find_next_zero_area_off(buf, len, pos, n, mask) as above + * bitmap_find_next_zero_area_off(buf, len, pos, n, mask, mask_off) as above + * bitmap_next_clear_region(map, &start, &end, nbits) Find next clear region + * bitmap_next_set_region(map, &start, &end, nbits) Find next set region + * bitmap_for_each_clear_region(map, rs, re, start, end) + * Iterate over all clear regions + * bitmap_for_each_set_region(map, rs, re, start, end) + * Iterate over all set regions * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n * bitmap_cut(dst, src, first, n, nbits) Cut n bits from first, copy rest diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c index 4f6c6ebbbbde..8d092609928e 100644 --- a/lib/percpu-refcount.c +++ b/lib/percpu-refcount.c @@ -50,9 +50,10 @@ static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref) * @flags: PERCPU_REF_INIT_* flags * @gfp: allocation mask to use * - * Initializes @ref. If @flags is zero, @ref starts in percpu mode with a - * refcount of 1; analagous to atomic_long_set(ref, 1). See the - * definitions of PERCPU_REF_INIT_* flags for flag behaviors. + * Initializes @ref. @ref starts out in percpu mode with a refcount of 1 unless + * @flags contains PERCPU_REF_INIT_ATOMIC or PERCPU_REF_INIT_DEAD. These flags + * change the start state to atomic with the latter setting the initial refcount + * to 0. See the definitions of PERCPU_REF_INIT_* flags for flag behaviors. * * Note that @release must not sleep - it may potentially be called from RCU * callback context by percpu_ref_kill(). diff --git a/mm/percpu-stats.c b/mm/percpu-stats.c index a5a8b22816ff..32558063c3f9 100644 --- a/mm/percpu-stats.c +++ b/mm/percpu-stats.c @@ -3,7 +3,7 @@ * mm/percpu-debug.c * * Copyright (C) 2017 Facebook Inc. - * Copyright (C) 2017 Dennis Zhou <dennisz@fb.com> + * Copyright (C) 2017 Dennis Zhou <dennis@kernel.org> * * Prints statistics about the percpu allocator and backing chunks. */ diff --git a/mm/percpu.c b/mm/percpu.c index e9844086b236..d7e3bc649f4e 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -6,7 +6,7 @@ * Copyright (C) 2009 Tejun Heo <tj@kernel.org> * * Copyright (C) 2017 Facebook Inc. - * Copyright (C) 2017 Dennis Zhou <dennisszhou@gmail.com> + * Copyright (C) 2017 Dennis Zhou <dennis@kernel.org> * * The percpu allocator handles both static and dynamic areas. Percpu * areas are allocated in chunks which are divided into units. There is |