diff options
author | Joonsoo Kim <iamjoonsoo.kim@lge.com> | 2017-09-08 16:12:59 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-09-08 18:26:47 -0700 |
commit | 9472f23c9eeba3b32e65a62fe2a9b3e827888afa (patch) | |
tree | 89192d7497a5b90f8b232737df2737489f29d1f3 /mm/mlock.c | |
parent | 638032224ed762a29baca1fc37f1168efc2554ae (diff) |
mm/mlock.c: use page_zone() instead of page_zone_id()
page_zone_id() is a specialized function to compare the zone for the pages
that are within the section range. If the section of the pages are
different, page_zone_id() can be different even if their zone is the same.
This wrong usage doesn't cause any actual problem since
__munlock_pagevec_fill() would be called again with failed index.
However, it's better to use more appropriate function here.
Link: http://lkml.kernel.org/r/1503559211-10259-1-git-send-email-iamjoonsoo.kim@lge.com
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Minchan Kim <minchan@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mlock.c')
-rw-r--r-- | mm/mlock.c | 10 |
1 files changed, 4 insertions, 6 deletions
diff --git a/mm/mlock.c b/mm/mlock.c index b562b5523a65..dfc6f1912176 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -365,8 +365,8 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone) * @start + PAGE_SIZE when no page could be added by the pte walk. */ static unsigned long __munlock_pagevec_fill(struct pagevec *pvec, - struct vm_area_struct *vma, int zoneid, unsigned long start, - unsigned long end) + struct vm_area_struct *vma, struct zone *zone, + unsigned long start, unsigned long end) { pte_t *pte; spinlock_t *ptl; @@ -394,7 +394,7 @@ static unsigned long __munlock_pagevec_fill(struct pagevec *pvec, * Break if page could not be obtained or the page's node+zone does not * match */ - if (!page || page_zone_id(page) != zoneid) + if (!page || page_zone(page) != zone) break; /* @@ -446,7 +446,6 @@ void munlock_vma_pages_range(struct vm_area_struct *vma, unsigned long page_increm; struct pagevec pvec; struct zone *zone; - int zoneid; pagevec_init(&pvec, 0); /* @@ -481,7 +480,6 @@ void munlock_vma_pages_range(struct vm_area_struct *vma, */ pagevec_add(&pvec, page); zone = page_zone(page); - zoneid = page_zone_id(page); /* * Try to fill the rest of pagevec using fast @@ -490,7 +488,7 @@ void munlock_vma_pages_range(struct vm_area_struct *vma, * pagevec. */ start = __munlock_pagevec_fill(&pvec, vma, - zoneid, start, end); + zone, start, end); __munlock_pagevec(&pvec, zone); goto next; } |