diff options
Diffstat (limited to 'mm/gup.c')
-rw-r--r-- | mm/gup.c | 384 |
1 files changed, 183 insertions, 201 deletions
@@ -111,7 +111,7 @@ retry: * FOLL_GET: folio's refcount will be incremented by @refs. * * FOLL_PIN on large folios: folio's refcount will be incremented by - * @refs, and its compound_pincount will be incremented by @refs. + * @refs, and its pincount will be incremented by @refs. * * FOLL_PIN on single-page folios: folio's refcount will be incremented by * @refs * GUP_PIN_COUNTING_BIAS. @@ -157,7 +157,7 @@ struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags) * try_get_folio() is left intact. */ if (folio_test_large(folio)) - atomic_add(refs, folio_pincount_ptr(folio)); + atomic_add(refs, &folio->_pincount); else folio_ref_add(folio, refs * (GUP_PIN_COUNTING_BIAS - 1)); @@ -182,7 +182,7 @@ static void gup_put_folio(struct folio *folio, int refs, unsigned int flags) if (flags & FOLL_PIN) { node_stat_mod_folio(folio, NR_FOLL_PIN_RELEASED, refs); if (folio_test_large(folio)) - atomic_sub(refs, folio_pincount_ptr(folio)); + atomic_sub(refs, &folio->_pincount); else refs *= GUP_PIN_COUNTING_BIAS; } @@ -215,7 +215,6 @@ int __must_check try_grab_page(struct page *page, unsigned int flags) { struct folio *folio = page_folio(page); - WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == (FOLL_GET | FOLL_PIN)); if (WARN_ON_ONCE(folio_ref_count(folio) <= 0)) return -ENOMEM; @@ -232,7 +231,7 @@ int __must_check try_grab_page(struct page *page, unsigned int flags) */ if (folio_test_large(folio)) { folio_ref_add(folio, 1); - atomic_add(1, folio_pincount_ptr(folio)); + atomic_add(1, &folio->_pincount); } else { folio_ref_add(folio, GUP_PIN_COUNTING_BIAS); } @@ -818,7 +817,7 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address, if (vma_is_secretmem(vma)) return NULL; - if (foll_flags & FOLL_PIN) + if (WARN_ON_ONCE(foll_flags & FOLL_PIN)) return NULL; page = follow_page_mask(vma, address, foll_flags, &ctx); @@ -880,9 +879,9 @@ unmap: } /* - * mmap_lock must be held on entry. If @locked != NULL and *@flags - * does not include FOLL_NOWAIT, the mmap_lock may be released. If it - * is, *@locked will be set to 0 and -EBUSY returned. + * mmap_lock must be held on entry. If @flags has FOLL_UNLOCKABLE but not + * FOLL_NOWAIT, the mmap_lock may be released. If it is, *@locked will be set + * to 0 and -EBUSY returned. */ static int faultin_page(struct vm_area_struct *vma, unsigned long address, unsigned int *flags, bool unshare, @@ -897,7 +896,7 @@ static int faultin_page(struct vm_area_struct *vma, fault_flags |= FAULT_FLAG_WRITE; if (*flags & FOLL_REMOTE) fault_flags |= FAULT_FLAG_REMOTE; - if (locked) { + if (*flags & FOLL_UNLOCKABLE) { fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; /* * FAULT_FLAG_INTERRUPTIBLE is opt-in. GUP callers must set @@ -931,8 +930,8 @@ static int faultin_page(struct vm_area_struct *vma, * mmap lock in the page fault handler. Sanity check this. */ WARN_ON_ONCE(fault_flags & FAULT_FLAG_RETRY_NOWAIT); - if (locked) - *locked = 0; + *locked = 0; + /* * We should do the same as VM_FAULT_RETRY, but let's not * return -EBUSY since that's not reflecting the reality of @@ -952,7 +951,7 @@ static int faultin_page(struct vm_area_struct *vma, } if (ret & VM_FAULT_RETRY) { - if (locked && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT)) + if (!(fault_flags & FAULT_FLAG_RETRY_NOWAIT)) *locked = 0; return -EBUSY; } @@ -975,9 +974,6 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma)) return -EOPNOTSUPP; - if ((gup_flags & FOLL_LONGTERM) && (gup_flags & FOLL_PCI_P2PDMA)) - return -EOPNOTSUPP; - if (vma_is_secretmem(vma)) return -EFAULT; @@ -1055,7 +1051,7 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) * This does not guarantee that the page exists in the user mappings when * __get_user_pages returns, and there may even be a completely different * page there in some cases (eg. if mmapped pagecache has been invalidated - * and subsequently re faulted). However it does guarantee that the page + * and subsequently re-faulted). However it does guarantee that the page * won't be freed completely. And mostly callers simply care that the page * contains data that was valid *at some point in time*. Typically, an IO * or similar operation cannot guarantee anything stronger anyway because @@ -1066,14 +1062,12 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) * appropriate) must be called after the page is finished with, and * before put_page is called. * - * If @locked != NULL, *@locked will be set to 0 when mmap_lock is - * released by an up_read(). That can happen if @gup_flags does not - * have FOLL_NOWAIT. + * If FOLL_UNLOCKABLE is set without FOLL_NOWAIT then the mmap_lock may + * be released. If this happens *@locked will be set to 0 on return. * - * A caller using such a combination of @locked and @gup_flags - * must therefore hold the mmap_lock for reading only, and recognize - * when it's been released. Otherwise, it must be held for either - * reading or writing and will not be released. + * A caller using such a combination of @gup_flags must therefore hold the + * mmap_lock for reading only, and recognize when it's been released. Otherwise, + * it must be held for either reading or writing and will not be released. * * In most cases, get_user_pages or get_user_pages_fast should be used * instead of __get_user_pages. __get_user_pages should be used only if @@ -1125,7 +1119,7 @@ static long __get_user_pages(struct mm_struct *mm, i = follow_hugetlb_page(mm, vma, pages, vmas, &start, &nr_pages, i, gup_flags, locked); - if (locked && *locked == 0) { + if (!*locked) { /* * We've got a VM_FAULT_RETRY * and we've lost mmap_lock. @@ -1331,8 +1325,17 @@ static bool gup_signal_pending(unsigned int flags) } /* - * Please note that this function, unlike __get_user_pages will not - * return 0 for nr_pages > 0 without FOLL_NOWAIT + * Locking: (*locked == 1) means that the mmap_lock has already been acquired by + * the caller. This function may drop the mmap_lock. If it does so, then it will + * set (*locked = 0). + * + * (*locked == 0) means that the caller expects this function to acquire and + * drop the mmap_lock. Therefore, the value of *locked will still be zero when + * the function returns, even though it may have changed temporarily during + * function execution. + * + * Please note that this function, unlike __get_user_pages(), will not return 0 + * for nr_pages > 0, unless FOLL_NOWAIT is used. */ static __always_inline long __get_user_pages_locked(struct mm_struct *mm, unsigned long start, @@ -1343,14 +1346,20 @@ static __always_inline long __get_user_pages_locked(struct mm_struct *mm, unsigned int flags) { long ret, pages_done; - bool lock_dropped; + bool must_unlock = false; - if (locked) { - /* if VM_FAULT_RETRY can be returned, vmas become invalid */ - BUG_ON(vmas); - /* check caller initialized locked */ - BUG_ON(*locked != 1); + /* + * The internal caller expects GUP to manage the lock internally and the + * lock must be released when this returns. + */ + if (!*locked) { + if (mmap_read_lock_killable(mm)) + return -EAGAIN; + must_unlock = true; + *locked = 1; } + else + mmap_assert_locked(mm); if (flags & FOLL_PIN) mm_set_has_pinned_flag(&mm->flags); @@ -1368,13 +1377,14 @@ static __always_inline long __get_user_pages_locked(struct mm_struct *mm, flags |= FOLL_GET; pages_done = 0; - lock_dropped = false; for (;;) { ret = __get_user_pages(mm, start, nr_pages, flags, pages, vmas, locked); - if (!locked) + if (!(flags & FOLL_UNLOCKABLE)) { /* VM_FAULT_RETRY couldn't trigger, bypass */ - return ret; + pages_done = ret; + break; + } /* VM_FAULT_RETRY or VM_FAULT_COMPLETED cannot return errors */ if (!*locked) { @@ -1404,7 +1414,9 @@ static __always_inline long __get_user_pages_locked(struct mm_struct *mm, if (likely(pages)) pages += ret; start += ret << PAGE_SHIFT; - lock_dropped = true; + + /* The lock was temporarily dropped, so we must unlock later */ + must_unlock = true; retry: /* @@ -1451,10 +1463,11 @@ retry: pages++; start += PAGE_SIZE; } - if (lock_dropped && *locked) { + if (must_unlock && *locked) { /* - * We must let the caller know we temporarily dropped the lock - * and so the critical section protected by it was lost. + * We either temporarily dropped the lock, or the caller + * requested that we both acquire and drop the lock. Either way, + * we must now unlock, and notify the caller of that state. */ mmap_read_unlock(mm); *locked = 0; @@ -1487,6 +1500,7 @@ long populate_vma_page_range(struct vm_area_struct *vma, { struct mm_struct *mm = vma->vm_mm; unsigned long nr_pages = (end - start) / PAGE_SIZE; + int local_locked = 1; int gup_flags; long ret; @@ -1519,12 +1533,15 @@ long populate_vma_page_range(struct vm_area_struct *vma, if (vma_is_accessible(vma)) gup_flags |= FOLL_FORCE; + if (locked) + gup_flags |= FOLL_UNLOCKABLE; + /* * We made sure addr is within a VMA, so the following will * not result in a stack expansion that recurses back here. */ ret = __get_user_pages(mm, start, nr_pages, gup_flags, - NULL, NULL, locked); + NULL, NULL, locked ? locked : &local_locked); lru_add_drain(); return ret; } @@ -1545,12 +1562,7 @@ long populate_vma_page_range(struct vm_area_struct *vma, * code on error (see __get_user_pages()). * * vma->vm_mm->mmap_lock must be held. The range must be page-aligned and - * covered by the VMA. - * - * If @locked is NULL, it may be held for read or write and will be unperturbed. - * - * If @locked is non-NULL, it must held for read only and may be released. If - * it's released, *@locked will be set to 0. + * covered by the VMA. If it's released, *@locked will be set to 0. */ long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, bool write, int *locked) @@ -1575,7 +1587,7 @@ long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start, * a poisoned page. * !FOLL_FORCE: Require proper access permissions. */ - gup_flags = FOLL_TOUCH | FOLL_HWPOISON; + gup_flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_UNLOCKABLE; if (write) gup_flags |= FOLL_WRITE; @@ -1659,9 +1671,24 @@ static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start, unsigned int foll_flags) { struct vm_area_struct *vma; + bool must_unlock = false; unsigned long vm_flags; long i; + if (!nr_pages) + return 0; + + /* + * The internal caller expects GUP to manage the lock internally and the + * lock must be released when this returns. + */ + if (!*locked) { + if (mmap_read_lock_killable(mm)) + return -EAGAIN; + must_unlock = true; + *locked = 1; + } + /* calculate required read or write permissions. * If FOLL_FORCE is set, we only require the "MAY" flags. */ @@ -1673,12 +1700,12 @@ static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start, for (i = 0; i < nr_pages; i++) { vma = find_vma(mm, start); if (!vma) - goto finish_or_fault; + break; /* protect what we can, including chardevs */ if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) || !(vm_flags & vma->vm_flags)) - goto finish_or_fault; + break; if (pages) { pages[i] = virt_to_page((void *)start); @@ -1690,9 +1717,11 @@ static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start, start = (start + PAGE_SIZE) & PAGE_MASK; } - return i; + if (must_unlock && *locked) { + mmap_read_unlock(mm); + *locked = 0; + } -finish_or_fault: return i ? : -EFAULT; } #endif /* !CONFIG_MMU */ @@ -1861,17 +1890,13 @@ EXPORT_SYMBOL(fault_in_readable); #ifdef CONFIG_ELF_CORE struct page *get_dump_page(unsigned long addr) { - struct mm_struct *mm = current->mm; struct page *page; - int locked = 1; + int locked = 0; int ret; - if (mmap_read_lock_killable(mm)) - return NULL; - ret = __get_user_pages_locked(mm, addr, 1, &page, NULL, &locked, + ret = __get_user_pages_locked(current->mm, addr, 1, &page, NULL, + &locked, FOLL_FORCE | FOLL_DUMP | FOLL_GET); - if (locked) - mmap_read_unlock(mm); return (ret == 1) ? page : NULL; } #endif /* CONFIG_ELF_CORE */ @@ -1905,7 +1930,7 @@ static unsigned long collect_longterm_unpinnable_pages( continue; if (folio_test_hugetlb(folio)) { - isolate_hugetlb(&folio->page, movable_page_list); + isolate_hugetlb(folio, movable_page_list); continue; } @@ -1914,7 +1939,7 @@ static unsigned long collect_longterm_unpinnable_pages( drain_allow = false; } - if (folio_isolate_lru(folio)) + if (!folio_isolate_lru(folio)) continue; list_add_tail(&folio->lru, movable_page_list); @@ -2047,34 +2072,15 @@ static long __gup_longterm_locked(struct mm_struct *mm, int *locked, unsigned int gup_flags) { - bool must_unlock = false; unsigned int flags; long rc, nr_pinned_pages; - if (locked && WARN_ON_ONCE(!*locked)) - return -EINVAL; - if (!(gup_flags & FOLL_LONGTERM)) return __get_user_pages_locked(mm, start, nr_pages, pages, vmas, locked, gup_flags); - /* - * If we get to this point then FOLL_LONGTERM is set, and FOLL_LONGTERM - * implies FOLL_PIN (although the reverse is not true). Therefore it is - * correct to unconditionally call check_and_migrate_movable_pages() - * which assumes pages have been pinned via FOLL_PIN. - * - * Enforce the above reasoning by asserting that FOLL_PIN is set. - */ - if (WARN_ON(!(gup_flags & FOLL_PIN))) - return -EINVAL; flags = memalloc_pin_save(); do { - if (locked && !*locked) { - mmap_read_lock(mm); - must_unlock = true; - *locked = 1; - } nr_pinned_pages = __get_user_pages_locked(mm, start, nr_pages, pages, vmas, locked, gup_flags); @@ -2082,33 +2088,70 @@ static long __gup_longterm_locked(struct mm_struct *mm, rc = nr_pinned_pages; break; } + + /* FOLL_LONGTERM implies FOLL_PIN */ rc = check_and_migrate_movable_pages(nr_pinned_pages, pages); } while (rc == -EAGAIN); memalloc_pin_restore(flags); - - if (locked && *locked && must_unlock) { - mmap_read_unlock(mm); - *locked = 0; - } return rc ? rc : nr_pinned_pages; } -static bool is_valid_gup_flags(unsigned int gup_flags) +/* + * Check that the given flags are valid for the exported gup/pup interface, and + * update them with the required flags that the caller must have set. + */ +static bool is_valid_gup_args(struct page **pages, struct vm_area_struct **vmas, + int *locked, unsigned int *gup_flags_p, + unsigned int to_set) { + unsigned int gup_flags = *gup_flags_p; + /* - * FOLL_PIN must only be set internally by the pin_user_pages*() APIs, - * never directly by the caller, so enforce that with an assertion: + * These flags not allowed to be specified externally to the gup + * interfaces: + * - FOLL_PIN/FOLL_TRIED/FOLL_FAST_ONLY are internal only + * - FOLL_REMOTE is internal only and used on follow_page() + * - FOLL_UNLOCKABLE is internal only and used if locked is !NULL */ - if (WARN_ON_ONCE(gup_flags & FOLL_PIN)) + if (WARN_ON_ONCE(gup_flags & (FOLL_PIN | FOLL_TRIED | FOLL_UNLOCKABLE | + FOLL_REMOTE | FOLL_FAST_ONLY))) return false; + + gup_flags |= to_set; + if (locked) { + /* At the external interface locked must be set */ + if (WARN_ON_ONCE(*locked != 1)) + return false; + + gup_flags |= FOLL_UNLOCKABLE; + } + + /* FOLL_GET and FOLL_PIN are mutually exclusive. */ + if (WARN_ON_ONCE((gup_flags & (FOLL_PIN | FOLL_GET)) == + (FOLL_PIN | FOLL_GET))) + return false; + + /* LONGTERM can only be specified when pinning */ + if (WARN_ON_ONCE(!(gup_flags & FOLL_PIN) && (gup_flags & FOLL_LONGTERM))) + return false; + + /* Pages input must be given if using GET/PIN */ + if (WARN_ON_ONCE((gup_flags & (FOLL_GET | FOLL_PIN)) && !pages)) + return false; + + /* We want to allow the pgmap to be hot-unplugged at all times */ + if (WARN_ON_ONCE((gup_flags & FOLL_LONGTERM) && + (gup_flags & FOLL_PCI_P2PDMA))) + return false; + /* - * FOLL_PIN is a prerequisite to FOLL_LONGTERM. Another way of saying - * that is, FOLL_LONGTERM is a specific case, more restrictive case of - * FOLL_PIN. + * Can't use VMAs with locked, as locked allows GUP to unlock + * which invalidates the vmas array */ - if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM)) + if (WARN_ON_ONCE(vmas && (gup_flags & FOLL_UNLOCKABLE))) return false; + *gup_flags_p = gup_flags; return true; } @@ -2178,11 +2221,15 @@ long get_user_pages_remote(struct mm_struct *mm, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked) { - if (!is_valid_gup_flags(gup_flags)) + int local_locked = 1; + + if (!is_valid_gup_args(pages, vmas, locked, &gup_flags, + FOLL_TOUCH | FOLL_REMOTE)) return -EINVAL; - return __gup_longterm_locked(mm, start, nr_pages, pages, vmas, locked, - gup_flags | FOLL_TOUCH | FOLL_REMOTE); + return __get_user_pages_locked(mm, start, nr_pages, pages, vmas, + locked ? locked : &local_locked, + gup_flags); } EXPORT_SYMBOL(get_user_pages_remote); @@ -2216,11 +2263,13 @@ long get_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas) { - if (!is_valid_gup_flags(gup_flags)) + int locked = 1; + + if (!is_valid_gup_args(pages, vmas, NULL, &gup_flags, FOLL_TOUCH)) return -EINVAL; - return __gup_longterm_locked(current->mm, start, nr_pages, - pages, vmas, NULL, gup_flags | FOLL_TOUCH); + return __get_user_pages_locked(current->mm, start, nr_pages, pages, + vmas, &locked, gup_flags); } EXPORT_SYMBOL(get_user_pages); @@ -2242,16 +2291,14 @@ EXPORT_SYMBOL(get_user_pages); long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, struct page **pages, unsigned int gup_flags) { - struct mm_struct *mm = current->mm; - int locked = 1; - long ret; + int locked = 0; - mmap_read_lock(mm); - ret = __gup_longterm_locked(mm, start, nr_pages, pages, NULL, &locked, - gup_flags | FOLL_TOUCH); - if (locked) - mmap_read_unlock(mm); - return ret; + if (!is_valid_gup_args(pages, NULL, NULL, &gup_flags, + FOLL_TOUCH | FOLL_UNLOCKABLE)) + return -EINVAL; + + return __get_user_pages_locked(current->mm, start, nr_pages, pages, + NULL, &locked, gup_flags); } EXPORT_SYMBOL(get_user_pages_unlocked); @@ -2904,6 +2951,7 @@ static int internal_get_user_pages_fast(unsigned long start, { unsigned long len, end; unsigned long nr_pinned; + int locked = 0; int ret; if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM | @@ -2932,8 +2980,9 @@ static int internal_get_user_pages_fast(unsigned long start, /* Slow path: try to get the remaining pages with get_user_pages */ start += nr_pinned << PAGE_SHIFT; pages += nr_pinned; - ret = get_user_pages_unlocked(start, nr_pages - nr_pinned, pages, - gup_flags); + ret = __gup_longterm_locked(current->mm, start, nr_pages - nr_pinned, + pages, NULL, &locked, + gup_flags | FOLL_TOUCH | FOLL_UNLOCKABLE); if (ret < 0) { /* * The caller has to unpin the pages we already pinned so @@ -2956,8 +3005,6 @@ static int internal_get_user_pages_fast(unsigned long start, * * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to * the regular GUP. - * Note a difference with get_user_pages_fast: this always returns the - * number of pages pinned, 0 if no pages were pinned. * * If the architecture does not support this function, simply return with no * pages pinned. @@ -2969,7 +3016,6 @@ static int internal_get_user_pages_fast(unsigned long start, int get_user_pages_fast_only(unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages) { - int nr_pinned; /* * Internally (within mm/gup.c), gup fast variants must set FOLL_GET, * because gup fast is always a "pin with a +1 page refcount" request. @@ -2977,21 +3023,11 @@ int get_user_pages_fast_only(unsigned long start, int nr_pages, * FOLL_FAST_ONLY is required in order to match the API description of * this routine: no fall back to regular ("slow") GUP. */ - gup_flags |= FOLL_GET | FOLL_FAST_ONLY; - - nr_pinned = internal_get_user_pages_fast(start, nr_pages, gup_flags, - pages); - - /* - * As specified in the API description above, this routine is not - * allowed to return negative values. However, the common core - * routine internal_get_user_pages_fast() *can* return -errno. - * Therefore, correct for that here: - */ - if (nr_pinned < 0) - nr_pinned = 0; + if (!is_valid_gup_args(pages, NULL, NULL, &gup_flags, + FOLL_GET | FOLL_FAST_ONLY)) + return -EINVAL; - return nr_pinned; + return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages); } EXPORT_SYMBOL_GPL(get_user_pages_fast_only); @@ -3014,16 +3050,14 @@ EXPORT_SYMBOL_GPL(get_user_pages_fast_only); int get_user_pages_fast(unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages) { - if (!is_valid_gup_flags(gup_flags)) - return -EINVAL; - /* * The caller may or may not have explicitly set FOLL_GET; either way is * OK. However, internally (within mm/gup.c), gup fast variants must set * FOLL_GET, because gup fast is always a "pin with a +1 page refcount" * request. */ - gup_flags |= FOLL_GET; + if (!is_valid_gup_args(pages, NULL, NULL, &gup_flags, FOLL_GET)) + return -EINVAL; return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages); } EXPORT_SYMBOL_GPL(get_user_pages_fast); @@ -3047,57 +3081,12 @@ EXPORT_SYMBOL_GPL(get_user_pages_fast); int pin_user_pages_fast(unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages) { - /* FOLL_GET and FOLL_PIN are mutually exclusive. */ - if (WARN_ON_ONCE(gup_flags & FOLL_GET)) - return -EINVAL; - - if (WARN_ON_ONCE(!pages)) + if (!is_valid_gup_args(pages, NULL, NULL, &gup_flags, FOLL_PIN)) return -EINVAL; - - gup_flags |= FOLL_PIN; return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages); } EXPORT_SYMBOL_GPL(pin_user_pages_fast); -/* - * This is the FOLL_PIN equivalent of get_user_pages_fast_only(). Behavior - * is the same, except that this one sets FOLL_PIN instead of FOLL_GET. - * - * The API rules are the same, too: no negative values may be returned. - */ -int pin_user_pages_fast_only(unsigned long start, int nr_pages, - unsigned int gup_flags, struct page **pages) -{ - int nr_pinned; - - /* - * FOLL_GET and FOLL_PIN are mutually exclusive. Note that the API - * rules require returning 0, rather than -errno: - */ - if (WARN_ON_ONCE(gup_flags & FOLL_GET)) - return 0; - - if (WARN_ON_ONCE(!pages)) - return 0; - /* - * FOLL_FAST_ONLY is required in order to match the API description of - * this routine: no fall back to regular ("slow") GUP. - */ - gup_flags |= (FOLL_PIN | FOLL_FAST_ONLY); - nr_pinned = internal_get_user_pages_fast(start, nr_pages, gup_flags, - pages); - /* - * This routine is not allowed to return negative values. However, - * internal_get_user_pages_fast() *can* return -errno. Therefore, - * correct for that here: - */ - if (nr_pinned < 0) - nr_pinned = 0; - - return nr_pinned; -} -EXPORT_SYMBOL_GPL(pin_user_pages_fast_only); - /** * pin_user_pages_remote() - pin pages of a remote process * @@ -3125,16 +3114,14 @@ long pin_user_pages_remote(struct mm_struct *mm, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked) { - /* FOLL_GET and FOLL_PIN are mutually exclusive. */ - if (WARN_ON_ONCE(gup_flags & FOLL_GET)) - return -EINVAL; - - if (WARN_ON_ONCE(!pages)) - return -EINVAL; + int local_locked = 1; - return __gup_longterm_locked(mm, start, nr_pages, pages, vmas, locked, - gup_flags | FOLL_PIN | FOLL_TOUCH | - FOLL_REMOTE); + if (!is_valid_gup_args(pages, vmas, locked, &gup_flags, + FOLL_PIN | FOLL_TOUCH | FOLL_REMOTE)) + return 0; + return __gup_longterm_locked(mm, start, nr_pages, pages, vmas, + locked ? locked : &local_locked, + gup_flags); } EXPORT_SYMBOL(pin_user_pages_remote); @@ -3159,16 +3146,12 @@ long pin_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas) { - /* FOLL_GET and FOLL_PIN are mutually exclusive. */ - if (WARN_ON_ONCE(gup_flags & FOLL_GET)) - return -EINVAL; - - if (WARN_ON_ONCE(!pages)) - return -EINVAL; + int locked = 1; - gup_flags |= FOLL_PIN; + if (!is_valid_gup_args(pages, vmas, NULL, &gup_flags, FOLL_PIN)) + return 0; return __gup_longterm_locked(current->mm, start, nr_pages, - pages, vmas, NULL, gup_flags); + pages, vmas, &locked, gup_flags); } EXPORT_SYMBOL(pin_user_pages); @@ -3180,14 +3163,13 @@ EXPORT_SYMBOL(pin_user_pages); long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages, struct page **pages, unsigned int gup_flags) { - /* FOLL_GET and FOLL_PIN are mutually exclusive. */ - if (WARN_ON_ONCE(gup_flags & FOLL_GET)) - return -EINVAL; + int locked = 0; - if (WARN_ON_ONCE(!pages)) - return -EINVAL; + if (!is_valid_gup_args(pages, NULL, NULL, &gup_flags, + FOLL_PIN | FOLL_TOUCH | FOLL_UNLOCKABLE)) + return 0; - gup_flags |= FOLL_PIN; - return get_user_pages_unlocked(start, nr_pages, pages, gup_flags); + return __gup_longterm_locked(current->mm, start, nr_pages, pages, NULL, + &locked, gup_flags); } EXPORT_SYMBOL(pin_user_pages_unlocked); |