diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2021-10-04 14:45:51 +0100 |
---|---|---|
committer | Vlastimil Babka <vbabka@suse.cz> | 2022-01-06 12:25:40 +0100 |
commit | d122019bf061cccc4583eb9ad40bf58c2fe517be (patch) | |
tree | 5488e5e9b595fc47e34c1a3f766978ee9bbc50be /mm/slab.h | |
parent | ae16d059f8c9409eba0c412639def0494765b761 (diff) |
mm: Split slab into its own type
Make struct slab independent of struct page. It still uses the
underlying memory in struct page for storing slab-specific data, but
slab and slub can now be weaned off using struct page directly. Some of
the wrapper functions (slab_address() and slab_order()) still need to
cast to struct folio, but this is a significant disentanglement.
[ vbabka@suse.cz: Rebase on folios, use folio instead of page where
possible.
Do not duplicate flags field in struct slab, instead make the related
accessors go through slab_folio(). For testing pfmemalloc use the
folio_*_active flag accessors directly so the PageSlabPfmemalloc
wrappers can be removed later.
Make folio_slab() expect only folio_test_slab() == true folios and
virt_to_slab() return NULL when folio_test_slab() == false.
Move struct slab to mm/slab.h.
Don't represent with struct slab pages that are not true slab pages,
but just a compound page obtained directly rom page allocator (with
large kmalloc() for SLUB and SLOB). ]
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Roman Gushchin <guro@fb.com>
Diffstat (limited to 'mm/slab.h')
-rw-r--r-- | mm/slab.h | 167 |
1 files changed, 167 insertions, 0 deletions
diff --git a/mm/slab.h b/mm/slab.h index 56ad7eea3ddf..0e67a8cb7f80 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -5,6 +5,173 @@ * Internal slab definitions */ +/* Reuses the bits in struct page */ +struct slab { + unsigned long __page_flags; + union { + struct list_head slab_list; + struct { /* Partial pages */ + struct slab *next; +#ifdef CONFIG_64BIT + int slabs; /* Nr of slabs left */ +#else + short int slabs; +#endif + }; + struct rcu_head rcu_head; + }; + struct kmem_cache *slab_cache; /* not slob */ + /* Double-word boundary */ + void *freelist; /* first free object */ + union { + void *s_mem; /* slab: first object */ + unsigned long counters; /* SLUB */ + struct { /* SLUB */ + unsigned inuse:16; + unsigned objects:15; + unsigned frozen:1; + }; + }; + + union { + unsigned int active; /* SLAB */ + int units; /* SLOB */ + }; + atomic_t __page_refcount; +#ifdef CONFIG_MEMCG + unsigned long memcg_data; +#endif +}; + +#define SLAB_MATCH(pg, sl) \ + static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl)) +SLAB_MATCH(flags, __page_flags); +SLAB_MATCH(compound_head, slab_list); /* Ensure bit 0 is clear */ +SLAB_MATCH(slab_list, slab_list); +SLAB_MATCH(rcu_head, rcu_head); +SLAB_MATCH(slab_cache, slab_cache); +SLAB_MATCH(s_mem, s_mem); +SLAB_MATCH(active, active); +SLAB_MATCH(_refcount, __page_refcount); +#ifdef CONFIG_MEMCG +SLAB_MATCH(memcg_data, memcg_data); +#endif +#undef SLAB_MATCH +static_assert(sizeof(struct slab) <= sizeof(struct page)); + +/** + * folio_slab - Converts from folio to slab. + * @folio: The folio. + * + * Currently struct slab is a different representation of a folio where + * folio_test_slab() is true. + * + * Return: The slab which contains this folio. + */ +#define folio_slab(folio) (_Generic((folio), \ + const struct folio *: (const struct slab *)(folio), \ + struct folio *: (struct slab *)(folio))) + +/** + * slab_folio - The folio allocated for a slab + * @slab: The slab. + * + * Slabs are allocated as folios that contain the individual objects and are + * using some fields in the first struct page of the folio - those fields are + * now accessed by struct slab. It is occasionally necessary to convert back to + * a folio in order to communicate with the rest of the mm. Please use this + * helper function instead of casting yourself, as the implementation may change + * in the future. + */ +#define slab_folio(s) (_Generic((s), \ + const struct slab *: (const struct folio *)s, \ + struct slab *: (struct folio *)s)) + +/** + * page_slab - Converts from first struct page to slab. + * @p: The first (either head of compound or single) page of slab. + * + * A temporary wrapper to convert struct page to struct slab in situations where + * we know the page is the compound head, or single order-0 page. + * + * Long-term ideally everything would work with struct slab directly or go + * through folio to struct slab. + * + * Return: The slab which contains this page + */ +#define page_slab(p) (_Generic((p), \ + const struct page *: (const struct slab *)(p), \ + struct page *: (struct slab *)(p))) + +/** + * slab_page - The first struct page allocated for a slab + * @slab: The slab. + * + * A convenience wrapper for converting slab to the first struct page of the + * underlying folio, to communicate with code not yet converted to folio or + * struct slab. + */ +#define slab_page(s) folio_page(slab_folio(s), 0) + +/* + * If network-based swap is enabled, sl*b must keep track of whether pages + * were allocated from pfmemalloc reserves. + */ +static inline bool slab_test_pfmemalloc(const struct slab *slab) +{ + return folio_test_active((struct folio *)slab_folio(slab)); +} + +static inline void slab_set_pfmemalloc(struct slab *slab) +{ + folio_set_active(slab_folio(slab)); +} + +static inline void slab_clear_pfmemalloc(struct slab *slab) +{ + folio_clear_active(slab_folio(slab)); +} + +static inline void __slab_clear_pfmemalloc(struct slab *slab) +{ + __folio_clear_active(slab_folio(slab)); +} + +static inline void *slab_address(const struct slab *slab) +{ + return folio_address(slab_folio(slab)); +} + +static inline int slab_nid(const struct slab *slab) +{ + return folio_nid(slab_folio(slab)); +} + +static inline pg_data_t *slab_pgdat(const struct slab *slab) +{ + return folio_pgdat(slab_folio(slab)); +} + +static inline struct slab *virt_to_slab(const void *addr) +{ + struct folio *folio = virt_to_folio(addr); + + if (!folio_test_slab(folio)) + return NULL; + + return folio_slab(folio); +} + +static inline int slab_order(const struct slab *slab) +{ + return folio_order((struct folio *)slab_folio(slab)); +} + +static inline size_t slab_size(const struct slab *slab) +{ + return PAGE_SIZE << slab_order(slab); +} + #ifdef CONFIG_SLOB /* * Common fields provided in kmem_cache by all slab allocators |