diff options
Diffstat (limited to 'mm/ksm.c')
-rw-r--r-- | mm/ksm.c | 136 |
1 files changed, 65 insertions, 71 deletions
@@ -990,14 +990,6 @@ stale: return NULL; } -static struct page *get_ksm_page(struct ksm_stable_node *stable_node, - enum get_ksm_page_flags flags) -{ - struct folio *folio = ksm_get_folio(stable_node, flags); - - return &folio->page; -} - /* * Removing rmap_item from stable or unstable tree. * This function will clean the information from the stable/unstable tree. @@ -1632,10 +1624,10 @@ bool is_page_sharing_candidate(struct ksm_stable_node *stable_node) return __is_page_sharing_candidate(stable_node, 0); } -static struct page *stable_node_dup(struct ksm_stable_node **_stable_node_dup, - struct ksm_stable_node **_stable_node, - struct rb_root *root, - bool prune_stale_stable_nodes) +static struct folio *stable_node_dup(struct ksm_stable_node **_stable_node_dup, + struct ksm_stable_node **_stable_node, + struct rb_root *root, + bool prune_stale_stable_nodes) { struct ksm_stable_node *dup, *found = NULL, *stable_node = *_stable_node; struct hlist_node *hlist_safe; @@ -1748,7 +1740,7 @@ static struct page *stable_node_dup(struct ksm_stable_node **_stable_node_dup, } *_stable_node_dup = found; - return &tree_folio->page; + return tree_folio; } static struct ksm_stable_node *stable_node_dup_any(struct ksm_stable_node *stable_node, @@ -1765,7 +1757,7 @@ static struct ksm_stable_node *stable_node_dup_any(struct ksm_stable_node *stabl } /* - * Like for get_ksm_page, this function can free the *_stable_node and + * Like for ksm_get_folio, this function can free the *_stable_node and * *_stable_node_dup if the returned tree_page is NULL. * * It can also free and overwrite *_stable_node with the found @@ -1778,16 +1770,16 @@ static struct ksm_stable_node *stable_node_dup_any(struct ksm_stable_node *stabl * function and will be overwritten in all cases, the caller doesn't * need to initialize it. */ -static struct page *__stable_node_chain(struct ksm_stable_node **_stable_node_dup, - struct ksm_stable_node **_stable_node, - struct rb_root *root, - bool prune_stale_stable_nodes) +static struct folio *__stable_node_chain(struct ksm_stable_node **_stable_node_dup, + struct ksm_stable_node **_stable_node, + struct rb_root *root, + bool prune_stale_stable_nodes) { struct ksm_stable_node *stable_node = *_stable_node; if (!is_stable_node_chain(stable_node)) { if (is_page_sharing_candidate(stable_node)) { *_stable_node_dup = stable_node; - return get_ksm_page(stable_node, GET_KSM_PAGE_NOLOCK); + return ksm_get_folio(stable_node, GET_KSM_PAGE_NOLOCK); } /* * _stable_node_dup set to NULL means the stable_node @@ -1800,24 +1792,24 @@ static struct page *__stable_node_chain(struct ksm_stable_node **_stable_node_du prune_stale_stable_nodes); } -static __always_inline struct page *chain_prune(struct ksm_stable_node **s_n_d, - struct ksm_stable_node **s_n, - struct rb_root *root) +static __always_inline struct folio *chain_prune(struct ksm_stable_node **s_n_d, + struct ksm_stable_node **s_n, + struct rb_root *root) { return __stable_node_chain(s_n_d, s_n, root, true); } -static __always_inline struct page *chain(struct ksm_stable_node **s_n_d, - struct ksm_stable_node *s_n, - struct rb_root *root) +static __always_inline struct folio *chain(struct ksm_stable_node **s_n_d, + struct ksm_stable_node *s_n, + struct rb_root *root) { struct ksm_stable_node *old_stable_node = s_n; - struct page *tree_page; + struct folio *tree_folio; - tree_page = __stable_node_chain(s_n_d, &s_n, root, false); + tree_folio = __stable_node_chain(s_n_d, &s_n, root, false); /* not pruning dups so s_n cannot have changed */ VM_BUG_ON(s_n != old_stable_node); - return tree_page; + return tree_folio; } /* @@ -1837,28 +1829,30 @@ static struct page *stable_tree_search(struct page *page) struct rb_node *parent; struct ksm_stable_node *stable_node, *stable_node_dup, *stable_node_any; struct ksm_stable_node *page_node; + struct folio *folio; - page_node = page_stable_node(page); + folio = page_folio(page); + page_node = folio_stable_node(folio); if (page_node && page_node->head != &migrate_nodes) { /* ksm page forked */ - get_page(page); - return page; + folio_get(folio); + return &folio->page; } - nid = get_kpfn_nid(page_to_pfn(page)); + nid = get_kpfn_nid(folio_pfn(folio)); root = root_stable_tree + nid; again: new = &root->rb_node; parent = NULL; while (*new) { - struct page *tree_page; + struct folio *tree_folio; int ret; cond_resched(); stable_node = rb_entry(*new, struct ksm_stable_node, node); stable_node_any = NULL; - tree_page = chain_prune(&stable_node_dup, &stable_node, root); + tree_folio = chain_prune(&stable_node_dup, &stable_node, root); /* * NOTE: stable_node may have been freed by * chain_prune() if the returned stable_node_dup is @@ -1892,14 +1886,14 @@ again: * write protected at all times. Any will work * fine to continue the walk. */ - tree_page = get_ksm_page(stable_node_any, - GET_KSM_PAGE_NOLOCK); + tree_folio = ksm_get_folio(stable_node_any, + GET_KSM_PAGE_NOLOCK); } VM_BUG_ON(!stable_node_dup ^ !!stable_node_any); - if (!tree_page) { + if (!tree_folio) { /* * If we walked over a stale stable_node, - * get_ksm_page() will call rb_erase() and it + * ksm_get_folio() will call rb_erase() and it * may rebalance the tree from under us. So * restart the search from scratch. Returning * NULL would be safe too, but we'd generate @@ -1909,8 +1903,8 @@ again: goto again; } - ret = memcmp_pages(page, tree_page); - put_page(tree_page); + ret = memcmp_pages(page, &tree_folio->page); + folio_put(tree_folio); parent = *new; if (ret < 0) @@ -1953,26 +1947,26 @@ again: * It would be more elegant to return stable_node * than kpage, but that involves more changes. */ - tree_page = get_ksm_page(stable_node_dup, - GET_KSM_PAGE_TRYLOCK); + tree_folio = ksm_get_folio(stable_node_dup, + GET_KSM_PAGE_TRYLOCK); - if (PTR_ERR(tree_page) == -EBUSY) + if (PTR_ERR(tree_folio) == -EBUSY) return ERR_PTR(-EBUSY); - if (unlikely(!tree_page)) + if (unlikely(!tree_folio)) /* * The tree may have been rebalanced, * so re-evaluate parent and new. */ goto again; - unlock_page(tree_page); + folio_unlock(tree_folio); if (get_kpfn_nid(stable_node_dup->kpfn) != NUMA(stable_node_dup->nid)) { - put_page(tree_page); + folio_put(tree_folio); goto replace; } - return tree_page; + return &tree_folio->page; } } @@ -1985,8 +1979,8 @@ again: rb_insert_color(&page_node->node, root); out: if (is_page_sharing_candidate(page_node)) { - get_page(page); - return page; + folio_get(folio); + return &folio->page; } else return NULL; @@ -2011,12 +2005,12 @@ replace: &page_node->node, root); if (is_page_sharing_candidate(page_node)) - get_page(page); + folio_get(folio); else - page = NULL; + folio = NULL; } else { rb_erase(&stable_node_dup->node, root); - page = NULL; + folio = NULL; } } else { VM_BUG_ON(!is_stable_node_chain(stable_node)); @@ -2027,16 +2021,16 @@ replace: DO_NUMA(page_node->nid = nid); stable_node_chain_add_dup(page_node, stable_node); if (is_page_sharing_candidate(page_node)) - get_page(page); + folio_get(folio); else - page = NULL; + folio = NULL; } else { - page = NULL; + folio = NULL; } } stable_node_dup->head = &migrate_nodes; list_add(&stable_node_dup->list, stable_node_dup->head); - return page; + return &folio->page; chain_append: /* stable_node_dup could be null if it reached the limit */ @@ -2079,7 +2073,7 @@ chain_append: * This function returns the stable tree node just allocated on success, * NULL otherwise. */ -static struct ksm_stable_node *stable_tree_insert(struct page *kpage) +static struct ksm_stable_node *stable_tree_insert(struct folio *kfolio) { int nid; unsigned long kpfn; @@ -2089,7 +2083,7 @@ static struct ksm_stable_node *stable_tree_insert(struct page *kpage) struct ksm_stable_node *stable_node, *stable_node_dup, *stable_node_any; bool need_chain = false; - kpfn = page_to_pfn(kpage); + kpfn = folio_pfn(kfolio); nid = get_kpfn_nid(kpfn); root = root_stable_tree + nid; again: @@ -2097,13 +2091,13 @@ again: new = &root->rb_node; while (*new) { - struct page *tree_page; + struct folio *tree_folio; int ret; cond_resched(); stable_node = rb_entry(*new, struct ksm_stable_node, node); stable_node_any = NULL; - tree_page = chain(&stable_node_dup, stable_node, root); + tree_folio = chain(&stable_node_dup, stable_node, root); if (!stable_node_dup) { /* * Either all stable_node dups were full in @@ -2125,14 +2119,14 @@ again: * write protected at all times. Any will work * fine to continue the walk. */ - tree_page = get_ksm_page(stable_node_any, - GET_KSM_PAGE_NOLOCK); + tree_folio = ksm_get_folio(stable_node_any, + GET_KSM_PAGE_NOLOCK); } VM_BUG_ON(!stable_node_dup ^ !!stable_node_any); - if (!tree_page) { + if (!tree_folio) { /* * If we walked over a stale stable_node, - * get_ksm_page() will call rb_erase() and it + * ksm_get_folio() will call rb_erase() and it * may rebalance the tree from under us. So * restart the search from scratch. Returning * NULL would be safe too, but we'd generate @@ -2142,8 +2136,8 @@ again: goto again; } - ret = memcmp_pages(kpage, tree_page); - put_page(tree_page); + ret = memcmp_pages(&kfolio->page, &tree_folio->page); + folio_put(tree_folio); parent = *new; if (ret < 0) @@ -2162,7 +2156,7 @@ again: INIT_HLIST_HEAD(&stable_node_dup->hlist); stable_node_dup->kpfn = kpfn; - set_page_stable_node(kpage, stable_node_dup); + folio_set_stable_node(kfolio, stable_node_dup); stable_node_dup->rmap_hlist_len = 0; DO_NUMA(stable_node_dup->nid = nid); if (!need_chain) { @@ -2440,7 +2434,7 @@ static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_ite * node in the stable tree and add both rmap_items. */ lock_page(kpage); - stable_node = stable_tree_insert(kpage); + stable_node = stable_tree_insert(page_folio(kpage)); if (stable_node) { stable_tree_append(tree_rmap_item, stable_node, false); @@ -3244,7 +3238,7 @@ void folio_migrate_ksm(struct folio *newfolio, struct folio *folio) /* * newfolio->mapping was set in advance; now we need smp_wmb() * to make sure that the new stable_node->kpfn is visible - * to get_ksm_page() before it can see that folio->mapping + * to ksm_get_folio() before it can see that folio->mapping * has gone stale (or that folio_test_swapcache has been cleared). */ smp_wmb(); @@ -3271,7 +3265,7 @@ static bool stable_node_dup_remove_range(struct ksm_stable_node *stable_node, if (stable_node->kpfn >= start_pfn && stable_node->kpfn < end_pfn) { /* - * Don't get_ksm_page, page has already gone: + * Don't ksm_get_folio, page has already gone: * which is why we keep kpfn instead of page* */ remove_node_from_stable_tree(stable_node); @@ -3359,7 +3353,7 @@ static int ksm_memory_callback(struct notifier_block *self, * Most of the work is done by page migration; but there might * be a few stable_nodes left over, still pointing to struct * pages which have been offlined: prune those from the tree, - * otherwise get_ksm_page() might later try to access a + * otherwise ksm_get_folio() might later try to access a * non-existent struct page. */ ksm_check_stable_tree(mn->start_pfn, |