From 6e919717c82c5773ac671816c8392c70d261685f Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 21 Sep 2009 17:03:32 -0700 Subject: mm: m(un)lock avoid ZERO_PAGE I'm still reluctant to clutter __get_user_pages() with another flag, just to avoid touching ZERO_PAGE count in mlock(); though we can add that later if it shows up as an issue in practice. But when mlocking, we can test page->mapping slightly earlier, to avoid the potentially bouncy rescheduling of lock_page on ZERO_PAGE - mlock didn't lock_page in olden ZERO_PAGE days, so we might have regressed. And when munlocking, it turns out that FOLL_DUMP coincidentally does what's needed to avoid all updates to ZERO_PAGE, so use that here also. Plus add comment suggested by KAMEZAWA Hiroyuki. Signed-off-by: Hugh Dickins Cc: Rik van Riel Cc: KAMEZAWA Hiroyuki Cc: Nick Piggin Acked-by: Mel Gorman Cc: Minchan Kim Reviewed-by: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mlock.c | 49 ++++++++++++++++++++++++++++++++++++------------- 1 file changed, 36 insertions(+), 13 deletions(-) (limited to 'mm') diff --git a/mm/mlock.c b/mm/mlock.c index 22041aa9f5c1..bd6f0e466f6c 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -198,17 +198,26 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma, for (i = 0; i < ret; i++) { struct page *page = pages[i]; - lock_page(page); - /* - * Because we lock page here and migration is blocked - * by the elevated reference, we need only check for - * file-cache page truncation. This page->mapping - * check also neatly skips over the ZERO_PAGE(), - * though if that's common we'd prefer not to lock it. - */ - if (page->mapping) - mlock_vma_page(page); - unlock_page(page); + if (page->mapping) { + /* + * That preliminary check is mainly to avoid + * the pointless overhead of lock_page on the + * ZERO_PAGE: which might bounce very badly if + * there is contention. However, we're still + * dirtying its cacheline with get/put_page: + * we'll add another __get_user_pages flag to + * avoid it if that case turns out to matter. + */ + lock_page(page); + /* + * Because we lock page here and migration is + * blocked by the elevated reference, we need + * only check for file-cache page truncation. + */ + if (page->mapping) + mlock_vma_page(page); + unlock_page(page); + } put_page(page); /* ref from get_user_pages() */ } @@ -309,9 +318,23 @@ void munlock_vma_pages_range(struct vm_area_struct *vma, vma->vm_flags &= ~VM_LOCKED; for (addr = start; addr < end; addr += PAGE_SIZE) { - struct page *page = follow_page(vma, addr, FOLL_GET); - if (page) { + struct page *page; + /* + * Although FOLL_DUMP is intended for get_dump_page(), + * it just so happens that its special treatment of the + * ZERO_PAGE (returning an error instead of doing get_page) + * suits munlock very well (and if somehow an abnormal page + * has sneaked into the range, we won't oops here: great). + */ + page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP); + if (page && !IS_ERR(page)) { lock_page(page); + /* + * Like in __mlock_vma_pages_range(), + * because we lock page here and migration is + * blocked by the elevated reference, we need + * only check for file-cache page truncation. + */ if (page->mapping) munlock_vma_page(page); unlock_page(page); -- cgit v1.2.3-58-ga151