summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorJ. Bruce Fields <bfields@redhat.com>2010-09-19 23:48:00 -0400
committerJ. Bruce Fields <bfields@redhat.com>2010-09-19 23:48:32 -0400
commitc88739b373e4930ed082a607cb78bf82616fd076 (patch)
tree251d969aa816dab5a62e6b36208b189b37de6534 /include/linux
parent3211af1119174fbe8b676422b74870cdd51d7314 (diff)
parent827e3457022d0bb0b1bb8a0eb88501876fe7dcf0 (diff)
Merge remote branch 'trond/bugfixes' into for-2.6.37
Without some client-side fixes, server testing is currently difficult.
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/acpi.h4
-rw-r--r--include/linux/cgroup.h12
-rw-r--r--include/linux/elevator.h1
-rw-r--r--include/linux/i2c/sx150x.h4
-rw-r--r--include/linux/intel-gtt.h20
-rw-r--r--include/linux/io-mapping.h24
-rw-r--r--include/linux/kfifo.h58
-rw-r--r--include/linux/ksm.h20
-rw-r--r--include/linux/lglock.h4
-rw-r--r--include/linux/libata.h4
-rw-r--r--include/linux/mm.h6
-rw-r--r--include/linux/mmc/sdio.h2
-rw-r--r--include/linux/mmzone.h13
-rw-r--r--include/linux/mutex.h8
-rw-r--r--include/linux/pci.h3
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/percpu.h2
-rw-r--r--include/linux/semaphore.h3
-rw-r--r--include/linux/serial.h3
-rw-r--r--include/linux/serial_core.h3
-rw-r--r--include/linux/sunrpc/clnt.h2
-rw-r--r--include/linux/swap.h11
-rw-r--r--include/linux/vmstat.h22
-rw-r--r--include/linux/workqueue.h18
24 files changed, 171 insertions, 78 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index ccf94dc5acdf..c227757feb06 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -304,8 +304,8 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
OSC_PCI_EXPRESS_PME_CONTROL | \
OSC_PCI_EXPRESS_AER_CONTROL | \
OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL)
-
-extern acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 flags);
+extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
+ u32 *mask, u32 req);
extern void acpi_early_init(void);
#else /* !CONFIG_ACPI */
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index ed3e92e41c6e..0c991023ee47 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -578,7 +578,12 @@ struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it);
int cgroup_scan_tasks(struct cgroup_scanner *scan);
int cgroup_attach_task(struct cgroup *, struct task_struct *);
-int cgroup_attach_task_current_cg(struct task_struct *);
+int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
+
+static inline int cgroup_attach_task_current_cg(struct task_struct *tsk)
+{
+ return cgroup_attach_task_all(current, tsk);
+}
/*
* CSS ID is ID for cgroup_subsys_state structs under subsys. This only works
@@ -636,6 +641,11 @@ static inline int cgroupstats_build(struct cgroupstats *stats,
}
/* No cgroups - nothing to do */
+static inline int cgroup_attach_task_all(struct task_struct *from,
+ struct task_struct *t)
+{
+ return 0;
+}
static inline int cgroup_attach_task_current_cg(struct task_struct *t)
{
return 0;
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 2c958f4fce1e..926b50322a46 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -136,6 +136,7 @@ extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
extern int elevator_init(struct request_queue *, char *);
extern void elevator_exit(struct elevator_queue *);
+extern int elevator_change(struct request_queue *, const char *);
extern int elv_rq_merge_ok(struct request *, struct bio *);
/*
diff --git a/include/linux/i2c/sx150x.h b/include/linux/i2c/sx150x.h
index ee3049cb9ba5..52baa79d69a7 100644
--- a/include/linux/i2c/sx150x.h
+++ b/include/linux/i2c/sx150x.h
@@ -63,6 +63,9 @@
* IRQ lines will appear. Similarly to gpio_base, the expander
* will create a block of irqs beginning at this number.
* This value is ignored if irq_summary is < 0.
+ * @reset_during_probe: If set to true, the driver will trigger a full
+ * reset of the chip at the beginning of the probe
+ * in order to place it in a known state.
*/
struct sx150x_platform_data {
unsigned gpio_base;
@@ -73,6 +76,7 @@ struct sx150x_platform_data {
u16 io_polarity;
int irq_summary;
unsigned irq_base;
+ bool reset_during_probe;
};
#endif /* __LINUX_I2C_SX150X_H */
diff --git a/include/linux/intel-gtt.h b/include/linux/intel-gtt.h
new file mode 100644
index 000000000000..1d19ab2afa39
--- /dev/null
+++ b/include/linux/intel-gtt.h
@@ -0,0 +1,20 @@
+/*
+ * Common Intel AGPGART and GTT definitions.
+ */
+#ifndef _INTEL_GTT_H
+#define _INTEL_GTT_H
+
+#include <linux/agp_backend.h>
+
+/* This is for Intel only GTT controls.
+ *
+ * Sandybridge: AGP_USER_CACHED_MEMORY default to LLC only
+ */
+
+#define AGP_USER_CACHED_MEMORY_LLC_MLC (AGP_USER_TYPES + 2)
+#define AGP_USER_UNCACHED_MEMORY (AGP_USER_TYPES + 4)
+
+/* flag for GFDT type */
+#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3)
+
+#endif
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index 0a6b3d5c490c..7fb592793738 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -79,7 +79,7 @@ io_mapping_free(struct io_mapping *mapping)
}
/* Atomic map/unmap */
-static inline void *
+static inline void __iomem *
io_mapping_map_atomic_wc(struct io_mapping *mapping,
unsigned long offset,
int slot)
@@ -94,12 +94,12 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping,
}
static inline void
-io_mapping_unmap_atomic(void *vaddr, int slot)
+io_mapping_unmap_atomic(void __iomem *vaddr, int slot)
{
iounmap_atomic(vaddr, slot);
}
-static inline void *
+static inline void __iomem *
io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
{
resource_size_t phys_addr;
@@ -111,7 +111,7 @@ io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
}
static inline void
-io_mapping_unmap(void *vaddr)
+io_mapping_unmap(void __iomem *vaddr)
{
iounmap(vaddr);
}
@@ -125,38 +125,38 @@ struct io_mapping;
static inline struct io_mapping *
io_mapping_create_wc(resource_size_t base, unsigned long size)
{
- return (struct io_mapping *) ioremap_wc(base, size);
+ return (struct io_mapping __force *) ioremap_wc(base, size);
}
static inline void
io_mapping_free(struct io_mapping *mapping)
{
- iounmap(mapping);
+ iounmap((void __force __iomem *) mapping);
}
/* Atomic map/unmap */
-static inline void *
+static inline void __iomem *
io_mapping_map_atomic_wc(struct io_mapping *mapping,
unsigned long offset,
int slot)
{
- return ((char *) mapping) + offset;
+ return ((char __force __iomem *) mapping) + offset;
}
static inline void
-io_mapping_unmap_atomic(void *vaddr, int slot)
+io_mapping_unmap_atomic(void __iomem *vaddr, int slot)
{
}
/* Non-atomic map/unmap */
-static inline void *
+static inline void __iomem *
io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
{
- return ((char *) mapping) + offset;
+ return ((char __force __iomem *) mapping) + offset;
}
static inline void
-io_mapping_unmap(void *vaddr)
+io_mapping_unmap(void __iomem *vaddr)
{
}
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index 4aa95f203f3e..62dbee554f60 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -214,7 +214,7 @@ __kfifo_must_check_helper(unsigned int val)
*/
#define kfifo_reset(fifo) \
(void)({ \
- typeof(fifo + 1) __tmp = (fifo); \
+ typeof((fifo) + 1) __tmp = (fifo); \
__tmp->kfifo.in = __tmp->kfifo.out = 0; \
})
@@ -228,7 +228,7 @@ __kfifo_must_check_helper(unsigned int val)
*/
#define kfifo_reset_out(fifo) \
(void)({ \
- typeof(fifo + 1) __tmp = (fifo); \
+ typeof((fifo) + 1) __tmp = (fifo); \
__tmp->kfifo.out = __tmp->kfifo.in; \
})
@@ -238,7 +238,7 @@ __kfifo_must_check_helper(unsigned int val)
*/
#define kfifo_len(fifo) \
({ \
- typeof(fifo + 1) __tmpl = (fifo); \
+ typeof((fifo) + 1) __tmpl = (fifo); \
__tmpl->kfifo.in - __tmpl->kfifo.out; \
})
@@ -248,7 +248,7 @@ __kfifo_must_check_helper(unsigned int val)
*/
#define kfifo_is_empty(fifo) \
({ \
- typeof(fifo + 1) __tmpq = (fifo); \
+ typeof((fifo) + 1) __tmpq = (fifo); \
__tmpq->kfifo.in == __tmpq->kfifo.out; \
})
@@ -258,7 +258,7 @@ __kfifo_must_check_helper(unsigned int val)
*/
#define kfifo_is_full(fifo) \
({ \
- typeof(fifo + 1) __tmpq = (fifo); \
+ typeof((fifo) + 1) __tmpq = (fifo); \
kfifo_len(__tmpq) > __tmpq->kfifo.mask; \
})
@@ -269,7 +269,7 @@ __kfifo_must_check_helper(unsigned int val)
#define kfifo_avail(fifo) \
__kfifo_must_check_helper( \
({ \
- typeof(fifo + 1) __tmpq = (fifo); \
+ typeof((fifo) + 1) __tmpq = (fifo); \
const size_t __recsize = sizeof(*__tmpq->rectype); \
unsigned int __avail = kfifo_size(__tmpq) - kfifo_len(__tmpq); \
(__recsize) ? ((__avail <= __recsize) ? 0 : \
@@ -284,7 +284,7 @@ __kfifo_must_check_helper( \
*/
#define kfifo_skip(fifo) \
(void)({ \
- typeof(fifo + 1) __tmp = (fifo); \
+ typeof((fifo) + 1) __tmp = (fifo); \
const size_t __recsize = sizeof(*__tmp->rectype); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
if (__recsize) \
@@ -302,7 +302,7 @@ __kfifo_must_check_helper( \
#define kfifo_peek_len(fifo) \
__kfifo_must_check_helper( \
({ \
- typeof(fifo + 1) __tmp = (fifo); \
+ typeof((fifo) + 1) __tmp = (fifo); \
const size_t __recsize = sizeof(*__tmp->rectype); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
(!__recsize) ? kfifo_len(__tmp) * sizeof(*__tmp->type) : \
@@ -325,7 +325,7 @@ __kfifo_must_check_helper( \
#define kfifo_alloc(fifo, size, gfp_mask) \
__kfifo_must_check_helper( \
({ \
- typeof(fifo + 1) __tmp = (fifo); \
+ typeof((fifo) + 1) __tmp = (fifo); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
__is_kfifo_ptr(__tmp) ? \
__kfifo_alloc(__kfifo, size, sizeof(*__tmp->type), gfp_mask) : \
@@ -339,7 +339,7 @@ __kfifo_must_check_helper( \
*/
#define kfifo_free(fifo) \
({ \
- typeof(fifo + 1) __tmp = (fifo); \
+ typeof((fifo) + 1) __tmp = (fifo); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
if (__is_kfifo_ptr(__tmp)) \
__kfifo_free(__kfifo); \
@@ -358,7 +358,7 @@ __kfifo_must_check_helper( \
*/
#define kfifo_init(fifo, buffer, size) \
({ \
- typeof(fifo + 1) __tmp = (fifo); \
+ typeof((fifo) + 1) __tmp = (fifo); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
__is_kfifo_ptr(__tmp) ? \
__kfifo_init(__kfifo, buffer, size, sizeof(*__tmp->type)) : \
@@ -379,8 +379,8 @@ __kfifo_must_check_helper( \
*/
#define kfifo_put(fifo, val) \
({ \
- typeof(fifo + 1) __tmp = (fifo); \
- typeof(val + 1) __val = (val); \
+ typeof((fifo) + 1) __tmp = (fifo); \
+ typeof((val) + 1) __val = (val); \
unsigned int __ret; \
const size_t __recsize = sizeof(*__tmp->rectype); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -421,8 +421,8 @@ __kfifo_must_check_helper( \
#define kfifo_get(fifo, val) \
__kfifo_must_check_helper( \
({ \
- typeof(fifo + 1) __tmp = (fifo); \
- typeof(val + 1) __val = (val); \
+ typeof((fifo) + 1) __tmp = (fifo); \
+ typeof((val) + 1) __val = (val); \
unsigned int __ret; \
const size_t __recsize = sizeof(*__tmp->rectype); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -462,8 +462,8 @@ __kfifo_must_check_helper( \
#define kfifo_peek(fifo, val) \
__kfifo_must_check_helper( \
({ \
- typeof(fifo + 1) __tmp = (fifo); \
- typeof(val + 1) __val = (val); \
+ typeof((fifo) + 1) __tmp = (fifo); \
+ typeof((val) + 1) __val = (val); \
unsigned int __ret; \
const size_t __recsize = sizeof(*__tmp->rectype); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -501,8 +501,8 @@ __kfifo_must_check_helper( \
*/
#define kfifo_in(fifo, buf, n) \
({ \
- typeof(fifo + 1) __tmp = (fifo); \
- typeof(buf + 1) __buf = (buf); \
+ typeof((fifo) + 1) __tmp = (fifo); \
+ typeof((buf) + 1) __buf = (buf); \
unsigned long __n = (n); \
const size_t __recsize = sizeof(*__tmp->rectype); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -554,8 +554,8 @@ __kfifo_must_check_helper( \
#define kfifo_out(fifo, buf, n) \
__kfifo_must_check_helper( \
({ \
- typeof(fifo + 1) __tmp = (fifo); \
- typeof(buf + 1) __buf = (buf); \
+ typeof((fifo) + 1) __tmp = (fifo); \
+ typeof((buf) + 1) __buf = (buf); \
unsigned long __n = (n); \
const size_t __recsize = sizeof(*__tmp->rectype); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -611,7 +611,7 @@ __kfifo_must_check_helper( \
#define kfifo_from_user(fifo, from, len, copied) \
__kfifo_must_check_helper( \
({ \
- typeof(fifo + 1) __tmp = (fifo); \
+ typeof((fifo) + 1) __tmp = (fifo); \
const void __user *__from = (from); \
unsigned int __len = (len); \
unsigned int *__copied = (copied); \
@@ -639,7 +639,7 @@ __kfifo_must_check_helper( \
#define kfifo_to_user(fifo, to, len, copied) \
__kfifo_must_check_helper( \
({ \
- typeof(fifo + 1) __tmp = (fifo); \
+ typeof((fifo) + 1) __tmp = (fifo); \
void __user *__to = (to); \
unsigned int __len = (len); \
unsigned int *__copied = (copied); \
@@ -666,7 +666,7 @@ __kfifo_must_check_helper( \
*/
#define kfifo_dma_in_prepare(fifo, sgl, nents, len) \
({ \
- typeof(fifo + 1) __tmp = (fifo); \
+ typeof((fifo) + 1) __tmp = (fifo); \
struct scatterlist *__sgl = (sgl); \
int __nents = (nents); \
unsigned int __len = (len); \
@@ -690,7 +690,7 @@ __kfifo_must_check_helper( \
*/
#define kfifo_dma_in_finish(fifo, len) \
(void)({ \
- typeof(fifo + 1) __tmp = (fifo); \
+ typeof((fifo) + 1) __tmp = (fifo); \
unsigned int __len = (len); \
const size_t __recsize = sizeof(*__tmp->rectype); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -717,7 +717,7 @@ __kfifo_must_check_helper( \
*/
#define kfifo_dma_out_prepare(fifo, sgl, nents, len) \
({ \
- typeof(fifo + 1) __tmp = (fifo); \
+ typeof((fifo) + 1) __tmp = (fifo); \
struct scatterlist *__sgl = (sgl); \
int __nents = (nents); \
unsigned int __len = (len); \
@@ -741,7 +741,7 @@ __kfifo_must_check_helper( \
*/
#define kfifo_dma_out_finish(fifo, len) \
(void)({ \
- typeof(fifo + 1) __tmp = (fifo); \
+ typeof((fifo) + 1) __tmp = (fifo); \
unsigned int __len = (len); \
const size_t __recsize = sizeof(*__tmp->rectype); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -766,8 +766,8 @@ __kfifo_must_check_helper( \
#define kfifo_out_peek(fifo, buf, n) \
__kfifo_must_check_helper( \
({ \
- typeof(fifo + 1) __tmp = (fifo); \
- typeof(buf + 1) __buf = (buf); \
+ typeof((fifo) + 1) __tmp = (fifo); \
+ typeof((buf) + 1) __buf = (buf); \
unsigned long __n = (n); \
const size_t __recsize = sizeof(*__tmp->rectype); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 74d691ee9121..3319a6967626 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -16,6 +16,9 @@
struct stable_node;
struct mem_cgroup;
+struct page *ksm_does_need_to_copy(struct page *page,
+ struct vm_area_struct *vma, unsigned long address);
+
#ifdef CONFIG_KSM
int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
unsigned long end, int advice, unsigned long *vm_flags);
@@ -70,19 +73,14 @@ static inline void set_page_stable_node(struct page *page,
* We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
* but what if the vma was unmerged while the page was swapped out?
*/
-struct page *ksm_does_need_to_copy(struct page *page,
- struct vm_area_struct *vma, unsigned long address);
-static inline struct page *ksm_might_need_to_copy(struct page *page,
+static inline int ksm_might_need_to_copy(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
struct anon_vma *anon_vma = page_anon_vma(page);
- if (!anon_vma ||
- (anon_vma->root == vma->anon_vma->root &&
- page->index == linear_page_index(vma, address)))
- return page;
-
- return ksm_does_need_to_copy(page, vma, address);
+ return anon_vma &&
+ (anon_vma->root != vma->anon_vma->root ||
+ page->index != linear_page_index(vma, address));
}
int page_referenced_ksm(struct page *page,
@@ -115,10 +113,10 @@ static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
return 0;
}
-static inline struct page *ksm_might_need_to_copy(struct page *page,
+static inline int ksm_might_need_to_copy(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
- return page;
+ return 0;
}
static inline int page_referenced_ksm(struct page *page,
diff --git a/include/linux/lglock.h b/include/linux/lglock.h
index b288cb713b90..f549056fb20b 100644
--- a/include/linux/lglock.h
+++ b/include/linux/lglock.h
@@ -150,7 +150,7 @@
int i; \
preempt_disable(); \
rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
- for_each_online_cpu(i) { \
+ for_each_possible_cpu(i) { \
arch_spinlock_t *lock; \
lock = &per_cpu(name##_lock, i); \
arch_spin_lock(lock); \
@@ -161,7 +161,7 @@
void name##_global_unlock(void) { \
int i; \
rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
- for_each_online_cpu(i) { \
+ for_each_possible_cpu(i) { \
arch_spinlock_t *lock; \
lock = &per_cpu(name##_lock, i); \
arch_spin_unlock(lock); \
diff --git a/include/linux/libata.h b/include/linux/libata.h
index f010f18a0f86..45fb2967b66d 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -335,6 +335,7 @@ enum {
ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */
ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */
ATA_EHI_QUIET = (1 << 3), /* be quiet */
+ ATA_EHI_NO_RECOVERY = (1 << 4), /* no recovery */
ATA_EHI_DID_SOFTRESET = (1 << 16), /* already soft-reset this port */
ATA_EHI_DID_HARDRESET = (1 << 17), /* already soft-reset this port */
@@ -723,6 +724,7 @@ struct ata_port {
struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */
u8 ctl; /* cache of ATA control register */
u8 last_ctl; /* Cache last written value */
+ struct ata_link* sff_pio_task_link; /* link currently used */
struct delayed_work sff_pio_task;
#ifdef CONFIG_ATA_BMDMA
struct ata_bmdma_prd *bmdma_prd; /* BMDMA SG list */
@@ -1594,7 +1596,7 @@ extern void ata_sff_irq_on(struct ata_port *ap);
extern void ata_sff_irq_clear(struct ata_port *ap);
extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
u8 status, int in_wq);
-extern void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay);
+extern void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay);
extern unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc);
extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc);
extern unsigned int ata_sff_port_intr(struct ata_port *ap,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index e6b1210772ce..74949fbef8c6 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -864,6 +864,12 @@ int set_page_dirty(struct page *page);
int set_page_dirty_lock(struct page *page);
int clear_page_dirty_for_io(struct page *page);
+/* Is the vma a continuation of the stack vma above it? */
+static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
+{
+ return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
+}
+
extern unsigned long move_page_tables(struct vm_area_struct *vma,
unsigned long old_addr, struct vm_area_struct *new_vma,
unsigned long new_addr, unsigned long len);
diff --git a/include/linux/mmc/sdio.h b/include/linux/mmc/sdio.h
index 329a8faa6e37..245cdacee544 100644
--- a/include/linux/mmc/sdio.h
+++ b/include/linux/mmc/sdio.h
@@ -38,6 +38,8 @@
* [8:0] Byte/block count
*/
+#define R4_MEMORY_PRESENT (1 << 27)
+
/*
SDIO status in R5
Type
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 6e6e62648a4d..3984c4eb41fd 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -284,6 +284,13 @@ struct zone {
unsigned long watermark[NR_WMARK];
/*
+ * When free pages are below this point, additional steps are taken
+ * when reading the number of free pages to avoid per-cpu counter
+ * drift allowing watermarks to be breached
+ */
+ unsigned long percpu_drift_mark;
+
+ /*
* We don't know if the memory that we're going to allocate will be freeable
* or/and it will be released eventually, so to avoid totally wasting several
* GB of ram we must reserve some of the lower zone memory (otherwise we risk
@@ -441,6 +448,12 @@ static inline int zone_is_oom_locked(const struct zone *zone)
return test_bit(ZONE_OOM_LOCKED, &zone->flags);
}
+#ifdef CONFIG_SMP
+unsigned long zone_nr_free_pages(struct zone *zone);
+#else
+#define zone_nr_free_pages(zone) zone_page_state(zone, NR_FREE_PAGES)
+#endif /* CONFIG_SMP */
+
/*
* The "priority" of VM scanning is how much of the queues we will scan in one
* go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 878cab4f5fcc..f363bc8fdc74 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -78,6 +78,14 @@ struct mutex_waiter {
# include <linux/mutex-debug.h>
#else
# define __DEBUG_MUTEX_INITIALIZER(lockname)
+/**
+ * mutex_init - initialize the mutex
+ * @mutex: the mutex to be initialized
+ *
+ * Initialize the mutex to unlocked state.
+ *
+ * It is not allowed to initialize an already locked mutex.
+ */
# define mutex_init(mutex) \
do { \
static struct lock_class_key __key; \
diff --git a/include/linux/pci.h b/include/linux/pci.h
index b1d17956a153..c8d95e369ff4 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1214,6 +1214,9 @@ static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
unsigned int devfn)
{ return NULL; }
+static inline int pci_domain_nr(struct pci_bus *bus)
+{ return 0; }
+
#define dev_is_pci(d) (false)
#define dev_is_pf(d) (false)
#define dev_num_vf(d) (0)
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index f6a3b2d36cad..10d33309e9a6 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2300,6 +2300,8 @@
#define PCI_DEVICE_ID_P2010 0x0079
#define PCI_DEVICE_ID_P1020E 0x0100
#define PCI_DEVICE_ID_P1020 0x0101
+#define PCI_DEVICE_ID_P1021E 0x0102
+#define PCI_DEVICE_ID_P1021 0x0103
#define PCI_DEVICE_ID_P1011E 0x0108
#define PCI_DEVICE_ID_P1011 0x0109
#define PCI_DEVICE_ID_P1022E 0x0110
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index b8b9084527b1..49466b13c5c6 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -149,7 +149,7 @@ extern void __init percpu_init_late(void);
#else /* CONFIG_SMP */
-#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
+#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); })
/* can't distinguish from other static vars, always false */
static inline bool is_kernel_percpu_address(unsigned long addr)
diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
index 7415839ac890..5310d27abd2a 100644
--- a/include/linux/semaphore.h
+++ b/include/linux/semaphore.h
@@ -26,6 +26,9 @@ struct semaphore {
.wait_list = LIST_HEAD_INIT((name).wait_list), \
}
+#define DEFINE_SEMAPHORE(name) \
+ struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
+
#define DECLARE_MUTEX(name) \
struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
diff --git a/include/linux/serial.h b/include/linux/serial.h
index 1ebc694a6d52..ef914061511e 100644
--- a/include/linux/serial.h
+++ b/include/linux/serial.h
@@ -77,8 +77,7 @@ struct serial_struct {
#define PORT_16654 11
#define PORT_16850 12
#define PORT_RSA 13 /* RSA-DV II/S card */
-#define PORT_U6_16550A 14
-#define PORT_MAX 14
+#define PORT_MAX 13
#define SERIAL_IO_PORT 0
#define SERIAL_IO_HUB6 1
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 64458a9a8938..563e23400913 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -44,7 +44,8 @@
#define PORT_RM9000 16 /* PMC-Sierra RM9xxx internal UART */
#define PORT_OCTEON 17 /* Cavium OCTEON internal UART */
#define PORT_AR7 18 /* Texas Instruments AR7 internal UART */
-#define PORT_MAX_8250 18 /* max port ID */
+#define PORT_U6_16550A 19 /* ST-Ericsson U6xxx internal UART */
+#define PORT_MAX_8250 19 /* max port ID */
/*
* ARM specific type numbers. These are not currently guaranteed
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 569dc722a600..85f38a63f098 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -30,7 +30,7 @@ struct rpc_inode;
* The high-level client handle
*/
struct rpc_clnt {
- struct kref cl_kref; /* Number of references */
+ atomic_t cl_count; /* Number of references */
struct list_head cl_clients; /* Global list of clients */
struct list_head cl_tasks; /* List of tasks */
spinlock_t cl_lock; /* spinlock */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 2fee51a11b73..7cdd63366f88 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -19,6 +19,7 @@ struct bio;
#define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
#define SWAP_FLAG_PRIO_MASK 0x7fff
#define SWAP_FLAG_PRIO_SHIFT 0
+#define SWAP_FLAG_DISCARD 0x10000 /* discard swap cluster after use */
static inline int current_is_kswapd(void)
{
@@ -142,7 +143,7 @@ struct swap_extent {
enum {
SWP_USED = (1 << 0), /* is slot in swap_info[] used? */
SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */
- SWP_DISCARDABLE = (1 << 2), /* blkdev supports discard */
+ SWP_DISCARDABLE = (1 << 2), /* swapon+blkdev support discard */
SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */
SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */
SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */
@@ -315,6 +316,7 @@ extern long nr_swap_pages;
extern long total_swap_pages;
extern void si_swapinfo(struct sysinfo *);
extern swp_entry_t get_swap_page(void);
+extern swp_entry_t get_swap_page_of_type(int);
extern int valid_swaphandles(swp_entry_t, unsigned long *);
extern int add_swap_count_continuation(swp_entry_t, gfp_t);
extern void swap_shmem_alloc(swp_entry_t);
@@ -331,13 +333,6 @@ extern int reuse_swap_page(struct page *);
extern int try_to_free_swap(struct page *);
struct backing_dev_info;
-#ifdef CONFIG_HIBERNATION
-void hibernation_freeze_swap(void);
-void hibernation_thaw_swap(void);
-swp_entry_t get_swap_for_hibernation(int type);
-void swap_free_for_hibernation(swp_entry_t val);
-#endif
-
/* linux/mm/thrash.c */
extern struct mm_struct *swap_token_mm;
extern void grab_swap_token(struct mm_struct *);
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 7f43ccdc1d38..eaaea37b3b75 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -170,6 +170,28 @@ static inline unsigned long zone_page_state(struct zone *zone,
return x;
}
+/*
+ * More accurate version that also considers the currently pending
+ * deltas. For that we need to loop over all cpus to find the current
+ * deltas. There is no synchronization so the result cannot be
+ * exactly accurate either.
+ */
+static inline unsigned long zone_page_state_snapshot(struct zone *zone,
+ enum zone_stat_item item)
+{
+ long x = atomic_long_read(&zone->vm_stat[item]);
+
+#ifdef CONFIG_SMP
+ int cpu;
+ for_each_online_cpu(cpu)
+ x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
+
+ if (x < 0)
+ x = 0;
+#endif
+ return x;
+}
+
extern unsigned long global_reclaimable_pages(void);
extern unsigned long zone_reclaimable_pages(struct zone *zone);
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 4f9d277bcd9a..f11100f96482 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -25,18 +25,20 @@ typedef void (*work_func_t)(struct work_struct *work);
enum {
WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
- WORK_STRUCT_CWQ_BIT = 1, /* data points to cwq */
- WORK_STRUCT_LINKED_BIT = 2, /* next work is linked to this one */
+ WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */
+ WORK_STRUCT_CWQ_BIT = 2, /* data points to cwq */
+ WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */
#ifdef CONFIG_DEBUG_OBJECTS_WORK
- WORK_STRUCT_STATIC_BIT = 3, /* static initializer (debugobjects) */
- WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */
+ WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */
+ WORK_STRUCT_COLOR_SHIFT = 5, /* color for workqueue flushing */
#else
- WORK_STRUCT_COLOR_SHIFT = 3, /* color for workqueue flushing */
+ WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */
#endif
WORK_STRUCT_COLOR_BITS = 4,
WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
+ WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT,
WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
#ifdef CONFIG_DEBUG_OBJECTS_WORK
@@ -59,8 +61,8 @@ enum {
/*
* Reserve 7 bits off of cwq pointer w/ debugobjects turned
- * off. This makes cwqs aligned to 128 bytes which isn't too
- * excessive while allowing 15 workqueue flush colors.
+ * off. This makes cwqs aligned to 256 bytes and allows 15
+ * workqueue flush colors.
*/
WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
WORK_STRUCT_COLOR_BITS,
@@ -241,6 +243,8 @@ enum {
WQ_HIGHPRI = 1 << 4, /* high priority */
WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
+ WQ_DYING = 1 << 6, /* internal: workqueue is dying */
+
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,