diff options
author | Matthew Wilcox <willy@infradead.org> | 2019-02-08 14:02:45 -0500 |
---|---|---|
committer | Matthew Wilcox <willy@infradead.org> | 2019-02-09 00:00:49 -0500 |
commit | f818b82b80164014d7ee3df89bb110808778c796 (patch) | |
tree | 36ee086ab1bd913f9a0519b1a4b8c08b7176b875 /include | |
parent | 2fa044e51a1f35d7b04cbde07ec513b0ba195e38 (diff) |
XArray: Mark xa_insert and xa_reserve as must_check
If the user doesn't care about the return value from xa_insert(), then
they should be using xa_store() instead. The point of xa_reserve() is
to get the return value early before taking another lock, so this should
also be __must_check.
Signed-off-by: Matthew Wilcox <willy@infradead.org>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/xarray.h | 23 |
1 files changed, 12 insertions, 11 deletions
diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 5ed6b462e754..687c150071a5 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -497,12 +497,13 @@ void *__xa_erase(struct xarray *, unsigned long index); void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old, void *entry, gfp_t); -int __xa_insert(struct xarray *, unsigned long index, void *entry, gfp_t); +int __must_check __xa_insert(struct xarray *, unsigned long index, + void *entry, gfp_t); int __must_check __xa_alloc(struct xarray *, u32 *id, void *entry, struct xa_limit, gfp_t); int __must_check __xa_alloc_cyclic(struct xarray *, u32 *id, void *entry, struct xa_limit, u32 *next, gfp_t); -int __xa_reserve(struct xarray *, unsigned long index, gfp_t); +int __must_check __xa_reserve(struct xarray *, unsigned long index, gfp_t); void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); @@ -704,8 +705,8 @@ static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index, * Return: 0 if the store succeeded. -EBUSY if another entry was present. * -ENOMEM if memory could not be allocated. */ -static inline int xa_insert(struct xarray *xa, unsigned long index, - void *entry, gfp_t gfp) +static inline int __must_check xa_insert(struct xarray *xa, + unsigned long index, void *entry, gfp_t gfp) { int err; @@ -733,8 +734,8 @@ static inline int xa_insert(struct xarray *xa, unsigned long index, * Return: 0 if the store succeeded. -EBUSY if another entry was present. * -ENOMEM if memory could not be allocated. */ -static inline int xa_insert_bh(struct xarray *xa, unsigned long index, - void *entry, gfp_t gfp) +static inline int __must_check xa_insert_bh(struct xarray *xa, + unsigned long index, void *entry, gfp_t gfp) { int err; @@ -762,8 +763,8 @@ static inline int xa_insert_bh(struct xarray *xa, unsigned long index, * Return: 0 if the store succeeded. -EBUSY if another entry was present. * -ENOMEM if memory could not be allocated. */ -static inline int xa_insert_irq(struct xarray *xa, unsigned long index, - void *entry, gfp_t gfp) +static inline int __must_check xa_insert_irq(struct xarray *xa, + unsigned long index, void *entry, gfp_t gfp) { int err; @@ -978,7 +979,7 @@ static inline int xa_alloc_cyclic_irq(struct xarray *xa, u32 *id, void *entry, * May sleep if the @gfp flags permit. * Return: 0 if the reservation succeeded or -ENOMEM if it failed. */ -static inline +static inline __must_check int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp) { int ret; @@ -1002,7 +1003,7 @@ int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp) * disabling softirqs. * Return: 0 if the reservation succeeded or -ENOMEM if it failed. */ -static inline +static inline __must_check int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp) { int ret; @@ -1026,7 +1027,7 @@ int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp) * disabling interrupts. * Return: 0 if the reservation succeeded or -ENOMEM if it failed. */ -static inline +static inline __must_check int xa_reserve_irq(struct xarray *xa, unsigned long index, gfp_t gfp) { int ret; |