summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/uaccess_32.h36
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c4
2 files changed, 1 insertions, 39 deletions
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index 3fe0eac59462..537cc883ea29 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -33,46 +33,10 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
* the specified block with access_ok() before calling this function.
* The caller should also make sure he pins the user space address
* so that we don't result in page fault and sleep.
- *
- * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault
- * we return the initial request size (1, 2 or 4), as copy_*_user should do.
- * If a store crosses a page boundary and gets a fault, the x86 will not write
- * anything, so this is accurate.
*/
-
static __always_inline unsigned long __must_check
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
{
- if (__builtin_constant_p(n)) {
- unsigned long ret;
-
- switch (n) {
- case 1:
- __uaccess_begin();
- __put_user_size(*(u8 *)from, (u8 __user *)to,
- 1, ret, 1);
- __uaccess_end();
- return ret;
- case 2:
- __uaccess_begin();
- __put_user_size(*(u16 *)from, (u16 __user *)to,
- 2, ret, 2);
- __uaccess_end();
- return ret;
- case 4:
- __uaccess_begin();
- __put_user_size(*(u32 *)from, (u32 __user *)to,
- 4, ret, 4);
- __uaccess_end();
- return ret;
- case 8:
- __uaccess_begin();
- __put_user_size(*(u64 *)from, (u64 __user *)to,
- 8, ret, 8);
- __uaccess_end();
- return ret;
- }
- }
return __copy_to_user_ll(to, from, n);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index a676eedb441b..d4d7c88ab595 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -535,9 +535,7 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
return ret;
if (r->presumed_offset != offset &&
- __copy_to_user_inatomic(&user_relocs->presumed_offset,
- &r->presumed_offset,
- sizeof(r->presumed_offset))) {
+ __put_user(r->presumed_offset, &user_relocs->presumed_offset)) {
return -EFAULT;
}