diff options
Diffstat (limited to 'arch/tile/include')
-rw-r--r-- | arch/tile/include/arch/abi.h | 4 | ||||
-rw-r--r-- | arch/tile/include/asm/atomic_32.h | 37 | ||||
-rw-r--r-- | arch/tile/include/asm/backtrace.h | 4 | ||||
-rw-r--r-- | arch/tile/include/asm/bitops.h | 9 | ||||
-rw-r--r-- | arch/tile/include/asm/cache.h | 5 | ||||
-rw-r--r-- | arch/tile/include/asm/highmem.h | 2 | ||||
-rw-r--r-- | arch/tile/include/asm/page.h | 6 | ||||
-rw-r--r-- | arch/tile/include/asm/scatterlist.h | 21 | ||||
-rw-r--r-- | arch/tile/include/asm/setup.h | 8 | ||||
-rw-r--r-- | arch/tile/include/asm/siginfo.h | 4 | ||||
-rw-r--r-- | arch/tile/include/asm/uaccess.h | 4 | ||||
-rw-r--r-- | arch/tile/include/hv/hypervisor.h | 8 |
12 files changed, 33 insertions, 79 deletions
diff --git a/arch/tile/include/arch/abi.h b/arch/tile/include/arch/abi.h index da8df5b9d914..8affc76f771a 100644 --- a/arch/tile/include/arch/abi.h +++ b/arch/tile/include/arch/abi.h @@ -59,9 +59,7 @@ * The ABI requires callers to allocate a caller state save area of * this many bytes at the bottom of each stack frame. */ -#ifdef __tile__ -#define C_ABI_SAVE_AREA_SIZE (2 * __SIZEOF_POINTER__) -#endif +#define C_ABI_SAVE_AREA_SIZE (2 * (CHIP_WORD_SIZE() / 8)) /** * The operand to an 'info' opcode directing the backtracer to not diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h index 40a5a3a876d9..ed359aee8837 100644 --- a/arch/tile/include/asm/atomic_32.h +++ b/arch/tile/include/asm/atomic_32.h @@ -255,43 +255,6 @@ static inline void atomic64_set(atomic64_t *v, u64 n) #define smp_mb__after_atomic_dec() do { } while (0) #define smp_mb__after_atomic_inc() do { } while (0) - -/* - * Support "tns" atomic integers. These are atomic integers that can - * hold any value but "1". They are more efficient than regular atomic - * operations because the "lock" (aka acquire) step is a single "tns" - * in the uncontended case, and the "unlock" (aka release) step is a - * single "store" without an mf. (However, note that on tilepro the - * "tns" will evict the local cache line, so it's not all upside.) - * - * Note that you can ONLY observe the value stored in the pointer - * using these operations; a direct read of the value may confusingly - * return the special value "1". - */ - -int __tns_atomic_acquire(atomic_t *); -void __tns_atomic_release(atomic_t *p, int v); - -static inline void tns_atomic_set(atomic_t *v, int i) -{ - __tns_atomic_acquire(v); - __tns_atomic_release(v, i); -} - -static inline int tns_atomic_cmpxchg(atomic_t *v, int o, int n) -{ - int ret = __tns_atomic_acquire(v); - __tns_atomic_release(v, (ret == o) ? n : ret); - return ret; -} - -static inline int tns_atomic_xchg(atomic_t *v, int n) -{ - int ret = __tns_atomic_acquire(v); - __tns_atomic_release(v, n); - return ret; -} - #endif /* !__ASSEMBLY__ */ /* diff --git a/arch/tile/include/asm/backtrace.h b/arch/tile/include/asm/backtrace.h index 6970bfcad549..758ca4619d50 100644 --- a/arch/tile/include/asm/backtrace.h +++ b/arch/tile/include/asm/backtrace.h @@ -21,7 +21,9 @@ #include <arch/chip.h> -#if CHIP_VA_WIDTH() > 32 +#if defined(__tile__) +typedef unsigned long VirtualAddress; +#elif CHIP_VA_WIDTH() > 32 typedef unsigned long long VirtualAddress; #else typedef unsigned int VirtualAddress; diff --git a/arch/tile/include/asm/bitops.h b/arch/tile/include/asm/bitops.h index 84600f3514da..6832b4be8990 100644 --- a/arch/tile/include/asm/bitops.h +++ b/arch/tile/include/asm/bitops.h @@ -98,26 +98,27 @@ static inline int fls64(__u64 w) return (sizeof(__u64) * 8) - __builtin_clzll(w); } -static inline unsigned int hweight32(unsigned int w) +static inline unsigned int __arch_hweight32(unsigned int w) { return __builtin_popcount(w); } -static inline unsigned int hweight16(unsigned int w) +static inline unsigned int __arch_hweight16(unsigned int w) { return __builtin_popcount(w & 0xffff); } -static inline unsigned int hweight8(unsigned int w) +static inline unsigned int __arch_hweight8(unsigned int w) { return __builtin_popcount(w & 0xff); } -static inline unsigned long hweight64(__u64 w) +static inline unsigned long __arch_hweight64(__u64 w) { return __builtin_popcountll(w); } +#include <asm-generic/bitops/const_hweight.h> #include <asm-generic/bitops/lock.h> #include <asm-generic/bitops/sched.h> #include <asm-generic/bitops/ext2-non-atomic.h> diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h index f6101840c9e7..08a2815b5e4e 100644 --- a/arch/tile/include/asm/cache.h +++ b/arch/tile/include/asm/cache.h @@ -27,11 +27,10 @@ #define L2_CACHE_ALIGN(x) (((x)+(L2_CACHE_BYTES-1)) & -L2_CACHE_BYTES) /* - * TILE-Gx is fully coherents so we don't need to define - * ARCH_KMALLOC_MINALIGN. + * TILE-Gx is fully coherent so we don't need to define ARCH_DMA_MINALIGN. */ #ifndef __tilegx__ -#define ARCH_KMALLOC_MINALIGN L2_CACHE_BYTES +#define ARCH_DMA_MINALIGN L2_CACHE_BYTES #endif /* use the cache line size for the L2, which is where it counts */ diff --git a/arch/tile/include/asm/highmem.h b/arch/tile/include/asm/highmem.h index efdd12e91020..d155db6fa9bd 100644 --- a/arch/tile/include/asm/highmem.h +++ b/arch/tile/include/asm/highmem.h @@ -60,7 +60,7 @@ void *kmap_fix_kpte(struct page *page, int finished); /* This macro is used only in map_new_virtual() to map "page". */ #define kmap_prot page_to_kpgprot(page) -void kunmap_atomic(void *kvaddr, enum km_type type); +void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type); void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); struct page *kmap_atomic_to_page(void *ptr); diff --git a/arch/tile/include/asm/page.h b/arch/tile/include/asm/page.h index f894a9016da6..7d90641cf18d 100644 --- a/arch/tile/include/asm/page.h +++ b/arch/tile/include/asm/page.h @@ -129,6 +129,11 @@ static inline u64 pmd_val(pmd_t pmd) #endif +static inline __attribute_const__ int get_order(unsigned long size) +{ + return BITS_PER_LONG - __builtin_clzl((size - 1) >> PAGE_SHIFT); +} + #endif /* !__ASSEMBLY__ */ #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) @@ -332,7 +337,6 @@ extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr); (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) #include <asm-generic/memory_model.h> -#include <asm-generic/getorder.h> #endif /* __KERNEL__ */ diff --git a/arch/tile/include/asm/scatterlist.h b/arch/tile/include/asm/scatterlist.h index c5604242c0d5..35d786fe93ae 100644 --- a/arch/tile/include/asm/scatterlist.h +++ b/arch/tile/include/asm/scatterlist.h @@ -1,22 +1 @@ -/* - * Copyright 2010 Tilera Corporation. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation, version 2. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or - * NON INFRINGEMENT. See the GNU General Public License for - * more details. - */ - -#ifndef _ASM_TILE_SCATTERLIST_H -#define _ASM_TILE_SCATTERLIST_H - -#define ISA_DMA_THRESHOLD (~0UL) - #include <asm-generic/scatterlist.h> - -#endif /* _ASM_TILE_SCATTERLIST_H */ diff --git a/arch/tile/include/asm/setup.h b/arch/tile/include/asm/setup.h index 823ddd47ff6e..7caf0f36b030 100644 --- a/arch/tile/include/asm/setup.h +++ b/arch/tile/include/asm/setup.h @@ -15,6 +15,10 @@ #ifndef _ASM_TILE_SETUP_H #define _ASM_TILE_SETUP_H +#define COMMAND_LINE_SIZE 2048 + +#ifdef __KERNEL__ + #include <linux/pfn.h> #include <linux/init.h> @@ -23,10 +27,10 @@ */ #define MAXMEM_PFN PFN_DOWN(MAXMEM) -#define COMMAND_LINE_SIZE 2048 - void early_panic(const char *fmt, ...); void warn_early_printk(void); void __init disable_early_printk(void); +#endif /* __KERNEL__ */ + #endif /* _ASM_TILE_SETUP_H */ diff --git a/arch/tile/include/asm/siginfo.h b/arch/tile/include/asm/siginfo.h index 0c12d1b9ddf2..56d661bb010b 100644 --- a/arch/tile/include/asm/siginfo.h +++ b/arch/tile/include/asm/siginfo.h @@ -17,6 +17,10 @@ #define __ARCH_SI_TRAPNO +#ifdef __LP64__ +# define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) +#endif + #include <asm-generic/siginfo.h> /* diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h index ed17a80ec0ed..ef34d2caa5b1 100644 --- a/arch/tile/include/asm/uaccess.h +++ b/arch/tile/include/asm/uaccess.h @@ -389,14 +389,14 @@ static inline unsigned long __must_check copy_from_user(void *to, * Returns number of bytes that could not be copied. * On success, this will be zero. */ -extern unsigned long __copy_in_user_asm( +extern unsigned long __copy_in_user_inatomic( void __user *to, const void __user *from, unsigned long n); static inline unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n) { might_sleep(); - return __copy_in_user_asm(to, from, n); + return __copy_in_user_inatomic(to, from, n); } static inline unsigned long __must_check diff --git a/arch/tile/include/hv/hypervisor.h b/arch/tile/include/hv/hypervisor.h index 59b46dc53994..9bd303a141b2 100644 --- a/arch/tile/include/hv/hypervisor.h +++ b/arch/tile/include/hv/hypervisor.h @@ -532,11 +532,11 @@ void hv_disable_intr(HV_IntrMask disab_mask); */ void hv_clear_intr(HV_IntrMask clear_mask); -/** Assert a set of device interrupts. +/** Raise a set of device interrupts. * - * @param assert_mask Bitmap of interrupts to clear. + * @param raise_mask Bitmap of interrupts to raise. */ -void hv_assert_intr(HV_IntrMask assert_mask); +void hv_raise_intr(HV_IntrMask raise_mask); /** Trigger a one-shot interrupt on some tile * @@ -1712,7 +1712,7 @@ typedef struct * @param cache_control This argument allows you to specify a length of * physical address space to flush (maximum HV_FLUSH_MAX_CACHE_LEN). * You can "or" in HV_FLUSH_EVICT_L2 to flush the whole L2 cache. - * You can "or" in HV_FLUSH_EVICT_LI1 to flush the whole LII cache. + * You can "or" in HV_FLUSH_EVICT_L1I to flush the whole L1I cache. * HV_FLUSH_ALL flushes all caches. * @param cache_cpumask Bitmask (in row-major order, supervisor-relative) of * tile indices to perform cache flush on. The low bit of the first |