summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-21 18:19:10 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-21 18:19:10 -0700
commit9723d95d1076e9ef394ff26162fb0b47531089b0 (patch)
tree242504391bf7f373cd5198d32df29868ea2996e2
parent4a4f8fdba6f5a34ca90f426021e17491a30202da (diff)
parent7049e6800f40046c384c522a990669024d5f5836 (diff)
Merge rsync://rsync.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6
-rw-r--r--arch/sparc64/solaris/socket.c6
-rw-r--r--include/asm-sparc64/processor.h34
2 files changed, 38 insertions, 2 deletions
diff --git a/arch/sparc64/solaris/socket.c b/arch/sparc64/solaris/socket.c
index ec8e074c4eac..06740582717e 100644
--- a/arch/sparc64/solaris/socket.c
+++ b/arch/sparc64/solaris/socket.c
@@ -317,8 +317,10 @@ asmlinkage int solaris_sendmsg(int fd, struct sol_nmsghdr __user *user_msg, unsi
unsigned long *kcmsg;
compat_size_t cmlen;
- if(kern_msg.msg_controllen > sizeof(ctl) &&
- kern_msg.msg_controllen <= 256) {
+ if (kern_msg.msg_controllen <= sizeof(compat_size_t))
+ return -EINVAL;
+
+ if(kern_msg.msg_controllen > sizeof(ctl)) {
err = -ENOBUFS;
ctl_buf = kmalloc(kern_msg.msg_controllen, GFP_KERNEL);
if(!ctl_buf)
diff --git a/include/asm-sparc64/processor.h b/include/asm-sparc64/processor.h
index bc1445b904ef..d0bee2413560 100644
--- a/include/asm-sparc64/processor.h
+++ b/include/asm-sparc64/processor.h
@@ -192,6 +192,40 @@ extern unsigned long get_wchan(struct task_struct *task);
#define cpu_relax() barrier()
+/* Prefetch support. This is tuned for UltraSPARC-III and later.
+ * UltraSPARC-I will treat these as nops, and UltraSPARC-II has
+ * a shallower prefetch queue than later chips.
+ */
+#define ARCH_HAS_PREFETCH
+#define ARCH_HAS_PREFETCHW
+#define ARCH_HAS_SPINLOCK_PREFETCH
+
+static inline void prefetch(const void *x)
+{
+ /* We do not use the read prefetch mnemonic because that
+ * prefetches into the prefetch-cache which only is accessible
+ * by floating point operations in UltraSPARC-III and later.
+ * By contrast, "#one_write" prefetches into the L2 cache
+ * in shared state.
+ */
+ __asm__ __volatile__("prefetch [%0], #one_write"
+ : /* no outputs */
+ : "r" (x));
+}
+
+static inline void prefetchw(const void *x)
+{
+ /* The most optimal prefetch to use for writes is
+ * "#n_writes". This brings the cacheline into the
+ * L2 cache in "owned" state.
+ */
+ __asm__ __volatile__("prefetch [%0], #n_writes"
+ : /* no outputs */
+ : "r" (x));
+}
+
+#define spin_lock_prefetch(x) prefetchw(x)
+
#endif /* !(__ASSEMBLY__) */
#endif /* !(__ASM_SPARC64_PROCESSOR_H) */