diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-05-19 20:00:06 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-05-19 20:00:06 -0700 |
commit | a05a70db34ba24ca009e1c9cedaef26fd17d5470 (patch) | |
tree | d5d8d0c80293bed52f2103ccc56a9e09117dc983 /mm/hugetlb.c | |
parent | 03b979dd0323ace8e29a0561cd5232f73a060c09 (diff) | |
parent | 4741526b83c5d3a3d661d1896f9e7414c5730bcb (diff) |
Merge branch 'akpm' (patches from Andrew)
Merge updates from Andrew Morton:
- fsnotify fix
- poll() timeout fix
- a few scripts/ tweaks
- debugobjects updates
- the (small) ocfs2 queue
- Minor fixes to kernel/padata.c
- Maybe half of the MM queue
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (117 commits)
mm, page_alloc: restore the original nodemask if the fast path allocation failed
mm, page_alloc: uninline the bad page part of check_new_page()
mm, page_alloc: don't duplicate code in free_pcp_prepare
mm, page_alloc: defer debugging checks of pages allocated from the PCP
mm, page_alloc: defer debugging checks of freed pages until a PCP drain
cpuset: use static key better and convert to new API
mm, page_alloc: inline pageblock lookup in page free fast paths
mm, page_alloc: remove unnecessary variable from free_pcppages_bulk
mm, page_alloc: pull out side effects from free_pages_check
mm, page_alloc: un-inline the bad part of free_pages_check
mm, page_alloc: check multiple page fields with a single branch
mm, page_alloc: remove field from alloc_context
mm, page_alloc: avoid looking up the first zone in a zonelist twice
mm, page_alloc: shortcut watermark checks for order-0 pages
mm, page_alloc: reduce cost of fair zone allocation policy retry
mm, page_alloc: shorten the page allocator fast path
mm, page_alloc: check once if a zone has isolated pageblocks
mm, page_alloc: move __GFP_HARDWALL modifications out of the fastpath
mm, page_alloc: simplify last cpupid reset
mm, page_alloc: remove unnecessary initialisation from __alloc_pages_nodemask()
...
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r-- | mm/hugetlb.c | 37 |
1 files changed, 26 insertions, 11 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 19d0d08b396f..949d80609a32 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -51,6 +51,7 @@ __initdata LIST_HEAD(huge_boot_pages); static struct hstate * __initdata parsed_hstate; static unsigned long __initdata default_hstate_max_huge_pages; static unsigned long __initdata default_hstate_size; +static bool __initdata parsed_valid_hugepagesz = true; /* * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages, @@ -144,7 +145,8 @@ static long hugepage_subpool_get_pages(struct hugepage_subpool *spool, } } - if (spool->min_hpages != -1) { /* minimum size accounting */ + /* minimum size accounting */ + if (spool->min_hpages != -1 && spool->rsv_hpages) { if (delta > spool->rsv_hpages) { /* * Asking for more reserves than those already taken on @@ -182,7 +184,8 @@ static long hugepage_subpool_put_pages(struct hugepage_subpool *spool, if (spool->max_hpages != -1) /* maximum size accounting */ spool->used_hpages -= delta; - if (spool->min_hpages != -1) { /* minimum size accounting */ + /* minimum size accounting */ + if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) { if (spool->rsv_hpages + delta <= spool->min_hpages) ret = 0; else @@ -937,9 +940,7 @@ err: */ static int next_node_allowed(int nid, nodemask_t *nodes_allowed) { - nid = next_node(nid, *nodes_allowed); - if (nid == MAX_NUMNODES) - nid = first_node(*nodes_allowed); + nid = next_node_in(nid, *nodes_allowed); VM_BUG_ON(nid >= MAX_NUMNODES); return nid; @@ -1030,8 +1031,8 @@ static int __alloc_gigantic_page(unsigned long start_pfn, return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE); } -static bool pfn_range_valid_gigantic(unsigned long start_pfn, - unsigned long nr_pages) +static bool pfn_range_valid_gigantic(struct zone *z, + unsigned long start_pfn, unsigned long nr_pages) { unsigned long i, end_pfn = start_pfn + nr_pages; struct page *page; @@ -1042,6 +1043,9 @@ static bool pfn_range_valid_gigantic(unsigned long start_pfn, page = pfn_to_page(i); + if (page_zone(page) != z) + return false; + if (PageReserved(page)) return false; @@ -1074,7 +1078,7 @@ static struct page *alloc_gigantic_page(int nid, unsigned int order) pfn = ALIGN(z->zone_start_pfn, nr_pages); while (zone_spans_last_pfn(z, pfn, nr_pages)) { - if (pfn_range_valid_gigantic(pfn, nr_pages)) { + if (pfn_range_valid_gigantic(z, pfn, nr_pages)) { /* * We release the zone lock here because * alloc_contig_range() will also lock the zone @@ -2659,6 +2663,11 @@ static int __init hugetlb_init(void) subsys_initcall(hugetlb_init); /* Should be called on processing a hugepagesz=... option */ +void __init hugetlb_bad_size(void) +{ + parsed_valid_hugepagesz = false; +} + void __init hugetlb_add_hstate(unsigned int order) { struct hstate *h; @@ -2678,8 +2687,8 @@ void __init hugetlb_add_hstate(unsigned int order) for (i = 0; i < MAX_NUMNODES; ++i) INIT_LIST_HEAD(&h->hugepage_freelists[i]); INIT_LIST_HEAD(&h->hugepage_activelist); - h->next_nid_to_alloc = first_node(node_states[N_MEMORY]); - h->next_nid_to_free = first_node(node_states[N_MEMORY]); + h->next_nid_to_alloc = first_memory_node; + h->next_nid_to_free = first_memory_node; snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", huge_page_size(h)/1024); @@ -2691,11 +2700,17 @@ static int __init hugetlb_nrpages_setup(char *s) unsigned long *mhp; static unsigned long *last_mhp; + if (!parsed_valid_hugepagesz) { + pr_warn("hugepages = %s preceded by " + "an unsupported hugepagesz, ignoring\n", s); + parsed_valid_hugepagesz = true; + return 1; + } /* * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet, * so this hugepages= parameter goes to the "default hstate". */ - if (!hugetlb_max_hstate) + else if (!hugetlb_max_hstate) mhp = &default_hstate_max_huge_pages; else mhp = &parsed_hstate->max_huge_pages; |