diff options
Diffstat (limited to 'include')
363 files changed, 6999 insertions, 3603 deletions
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h index c728113374f5..f97804bdf1ff 100644 --- a/include/acpi/acnames.h +++ b/include/acpi/acnames.h @@ -59,6 +59,10 @@ #define METHOD_NAME__PRS "_PRS" #define METHOD_NAME__PRT "_PRT" #define METHOD_NAME__PRW "_PRW" +#define METHOD_NAME__PS0 "_PS0" +#define METHOD_NAME__PS1 "_PS1" +#define METHOD_NAME__PS2 "_PS2" +#define METHOD_NAME__PS3 "_PS3" #define METHOD_NAME__REG "_REG" #define METHOD_NAME__SB_ "_SB_" #define METHOD_NAME__SEG "_SEG" diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index c1c9de19edbe..57ee0528aacb 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -118,6 +118,7 @@ struct acpi_device; struct acpi_hotplug_profile { struct kobject kobj; int (*scan_dependent)(struct acpi_device *adev); + void (*notify_online)(struct acpi_device *adev); bool enabled:1; bool demand_offline:1; }; @@ -204,10 +205,9 @@ struct acpi_device_flags { u32 match_driver:1; u32 initialized:1; u32 visited:1; - u32 no_hotplug:1; u32 hotplug_notify:1; u32 is_dock_station:1; - u32 reserved:22; + u32 reserved:23; }; /* File System */ @@ -411,7 +411,6 @@ void acpi_bus_private_data_handler(acpi_handle, void *); int acpi_bus_get_private_data(acpi_handle, void **); int acpi_bus_attach_private_data(acpi_handle, void *); void acpi_bus_detach_private_data(acpi_handle); -void acpi_bus_no_hotplug(acpi_handle handle); extern int acpi_notifier_call_chain(struct acpi_device *, u32, u32); extern int register_acpi_notifier(struct notifier_block *); extern int unregister_acpi_notifier(struct notifier_block *); diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index b7c89d47efbe..9fc1d71c82bc 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -46,7 +46,7 @@ /* Current ACPICA subsystem version in YYYYMMDD format */ -#define ACPI_CA_VERSION 0x20140724 +#define ACPI_CA_VERSION 0x20140828 #include <acpi/acconfig.h> #include <acpi/actypes.h> @@ -692,6 +692,7 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status *event_status)) ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void)) ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void)) +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void)) ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_get_gpe_device(u32 gpe_index, diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h index 7626bfeac2cb..29e79370641d 100644 --- a/include/acpi/actbl1.h +++ b/include/acpi/actbl1.h @@ -952,7 +952,8 @@ enum acpi_srat_type { ACPI_SRAT_TYPE_CPU_AFFINITY = 0, ACPI_SRAT_TYPE_MEMORY_AFFINITY = 1, ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY = 2, - ACPI_SRAT_TYPE_RESERVED = 3 /* 3 and greater are reserved */ + ACPI_SRAT_TYPE_GICC_AFFINITY = 3, + ACPI_SRAT_TYPE_RESERVED = 4 /* 4 and greater are reserved */ }; /* @@ -968,7 +969,7 @@ struct acpi_srat_cpu_affinity { u32 flags; u8 local_sapic_eid; u8 proximity_domain_hi[3]; - u32 reserved; /* Reserved, must be zero */ + u32 clock_domain; }; /* Flags */ @@ -1010,6 +1011,20 @@ struct acpi_srat_x2apic_cpu_affinity { #define ACPI_SRAT_CPU_ENABLED (1) /* 00: Use affinity structure */ +/* 3: GICC Affinity (ACPI 5.1) */ + +struct acpi_srat_gicc_affinity { + struct acpi_subtable_header header; + u32 proximity_domain; + u32 acpi_processor_uid; + u32 flags; + u32 clock_domain; +}; + +/* Flags for struct acpi_srat_gicc_affinity */ + +#define ACPI_SRAT_GICC_ENABLED (1) /* 00: Use affinity structure */ + /* Reset to default packing */ #pragma pack() diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h index 787bcc814463..5480cb2236bf 100644 --- a/include/acpi/actbl3.h +++ b/include/acpi/actbl3.h @@ -310,10 +310,15 @@ struct acpi_gtdt_timer_entry { u32 common_flags; }; +/* Flag Definitions: timer_flags and virtual_timer_flags above */ + +#define ACPI_GTDT_GT_IRQ_MODE (1) +#define ACPI_GTDT_GT_IRQ_POLARITY (1<<1) + /* Flag Definitions: common_flags above */ -#define ACPI_GTDT_GT_IS_SECURE_TIMER (1) -#define ACPI_GTDT_GT_ALWAYS_ON (1<<1) +#define ACPI_GTDT_GT_IS_SECURE_TIMER (1) +#define ACPI_GTDT_GT_ALWAYS_ON (1<<1) /* 1: SBSA Generic Watchdog Structure */ diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index 9c79e7603459..1973ad2b13f4 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h @@ -18,14 +18,100 @@ #include <asm/cmpxchg.h> #include <asm/barrier.h> +/* + * atomic_$op() - $op integer to atomic variable + * @i: integer value to $op + * @v: pointer to the atomic variable + * + * Atomically $ops @i to @v. Does not strictly guarantee a memory-barrier, use + * smp_mb__{before,after}_atomic(). + */ + +/* + * atomic_$op_return() - $op interer to atomic variable and returns the result + * @i: integer value to $op + * @v: pointer to the atomic variable + * + * Atomically $ops @i to @v. Does imply a full memory barrier. + */ + #ifdef CONFIG_SMP -/* Force people to define core atomics */ -# if !defined(atomic_add_return) || !defined(atomic_sub_return) || \ - !defined(atomic_clear_mask) || !defined(atomic_set_mask) -# error "SMP requires a little arch-specific magic" -# endif + +/* we can build all atomic primitives from cmpxchg */ + +#define ATOMIC_OP(op, c_op) \ +static inline void atomic_##op(int i, atomic_t *v) \ +{ \ + int c, old; \ + \ + c = v->counter; \ + while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \ + c = old; \ +} + +#define ATOMIC_OP_RETURN(op, c_op) \ +static inline int atomic_##op##_return(int i, atomic_t *v) \ +{ \ + int c, old; \ + \ + c = v->counter; \ + while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \ + c = old; \ + \ + return c c_op i; \ +} + +#else + +#include <linux/irqflags.h> + +#define ATOMIC_OP(op, c_op) \ +static inline void atomic_##op(int i, atomic_t *v) \ +{ \ + unsigned long flags; \ + \ + raw_local_irq_save(flags); \ + v->counter = v->counter c_op i; \ + raw_local_irq_restore(flags); \ +} + +#define ATOMIC_OP_RETURN(op, c_op) \ +static inline int atomic_##op##_return(int i, atomic_t *v) \ +{ \ + unsigned long flags; \ + int ret; \ + \ + raw_local_irq_save(flags); \ + ret = (v->counter = v->counter c_op i); \ + raw_local_irq_restore(flags); \ + \ + return ret; \ +} + +#endif /* CONFIG_SMP */ + +#ifndef atomic_add_return +ATOMIC_OP_RETURN(add, +) +#endif + +#ifndef atomic_sub_return +ATOMIC_OP_RETURN(sub, -) +#endif + +#ifndef atomic_clear_mask +ATOMIC_OP(and, &) +#define atomic_clear_mask(i, v) atomic_and(~(i), (v)) #endif +#ifndef atomic_set_mask +#define CONFIG_ARCH_HAS_ATOMIC_OR +ATOMIC_OP(or, |) +#define atomic_set_mask(i, v) atomic_or((i), (v)) +#endif + +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP + /* * Atomic operations that C can't guarantee us. Useful for * resource counting etc.. @@ -33,8 +119,6 @@ #define ATOMIC_INIT(i) { (i) } -#ifdef __KERNEL__ - /** * atomic_read - read atomic variable * @v: pointer of type atomic_t @@ -42,7 +126,7 @@ * Atomically reads the value of @v. */ #ifndef atomic_read -#define atomic_read(v) (*(volatile int *)&(v)->counter) +#define atomic_read(v) ACCESS_ONCE((v)->counter) #endif /** @@ -56,52 +140,6 @@ #include <linux/irqflags.h> -/** - * atomic_add_return - add integer to atomic variable - * @i: integer value to add - * @v: pointer of type atomic_t - * - * Atomically adds @i to @v and returns the result - */ -#ifndef atomic_add_return -static inline int atomic_add_return(int i, atomic_t *v) -{ - unsigned long flags; - int temp; - - raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */ - temp = v->counter; - temp += i; - v->counter = temp; - raw_local_irq_restore(flags); - - return temp; -} -#endif - -/** - * atomic_sub_return - subtract integer from atomic variable - * @i: integer value to subtract - * @v: pointer of type atomic_t - * - * Atomically subtracts @i from @v and returns the result - */ -#ifndef atomic_sub_return -static inline int atomic_sub_return(int i, atomic_t *v) -{ - unsigned long flags; - int temp; - - raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */ - temp = v->counter; - temp -= i; - v->counter = temp; - raw_local_irq_restore(flags); - - return temp; -} -#endif - static inline int atomic_add_negative(int i, atomic_t *v) { return atomic_add_return(i, v) < 0; @@ -139,49 +177,11 @@ static inline void atomic_dec(atomic_t *v) static inline int __atomic_add_unless(atomic_t *v, int a, int u) { - int c, old; - c = atomic_read(v); - while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c) - c = old; - return c; -} - -/** - * atomic_clear_mask - Atomically clear bits in atomic variable - * @mask: Mask of the bits to be cleared - * @v: pointer of type atomic_t - * - * Atomically clears the bits set in @mask from @v - */ -#ifndef atomic_clear_mask -static inline void atomic_clear_mask(unsigned long mask, atomic_t *v) -{ - unsigned long flags; - - mask = ~mask; - raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */ - v->counter &= mask; - raw_local_irq_restore(flags); + int c, old; + c = atomic_read(v); + while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c) + c = old; + return c; } -#endif - -/** - * atomic_set_mask - Atomically set bits in atomic variable - * @mask: Mask of the bits to be set - * @v: pointer of type atomic_t - * - * Atomically sets the bits set in @mask in @v - */ -#ifndef atomic_set_mask -static inline void atomic_set_mask(unsigned int mask, atomic_t *v) -{ - unsigned long flags; - - raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */ - v->counter |= mask; - raw_local_irq_restore(flags); -} -#endif -#endif /* __KERNEL__ */ #endif /* __ASM_GENERIC_ATOMIC_H */ diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h index b18ce4f9ee3d..30ad9c86cebb 100644 --- a/include/asm-generic/atomic64.h +++ b/include/asm-generic/atomic64.h @@ -20,10 +20,22 @@ typedef struct { extern long long atomic64_read(const atomic64_t *v); extern void atomic64_set(atomic64_t *v, long long i); -extern void atomic64_add(long long a, atomic64_t *v); -extern long long atomic64_add_return(long long a, atomic64_t *v); -extern void atomic64_sub(long long a, atomic64_t *v); -extern long long atomic64_sub_return(long long a, atomic64_t *v); + +#define ATOMIC64_OP(op) \ +extern void atomic64_##op(long long a, atomic64_t *v); + +#define ATOMIC64_OP_RETURN(op) \ +extern long long atomic64_##op##_return(long long a, atomic64_t *v); + +#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) + +ATOMIC64_OPS(add) +ATOMIC64_OPS(sub) + +#undef ATOMIC64_OPS +#undef ATOMIC64_OP_RETURN +#undef ATOMIC64_OP + extern long long atomic64_dec_if_positive(atomic64_t *v); extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n); extern long long atomic64_xchg(atomic64_t *v, long long new); diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h index de8bf89940f8..3378dcf4c31e 100644 --- a/include/asm-generic/dma-mapping-common.h +++ b/include/asm-generic/dma-mapping-common.h @@ -179,6 +179,15 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size); +void *dma_common_contiguous_remap(struct page *page, size_t size, + unsigned long vm_flags, + pgprot_t prot, const void *caller); + +void *dma_common_pages_remap(struct page **pages, size_t size, + unsigned long vm_flags, pgprot_t prot, + const void *caller); +void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags); + /** * dma_mmap_attrs - map a coherent DMA allocation into user space * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices @@ -205,14 +214,6 @@ dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL) -static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size) -{ - DEFINE_DMA_ATTRS(attrs); - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); - return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs); -} - int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, size_t size); diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h index c1d4105e1c1d..383ade1a211b 100644 --- a/include/asm-generic/gpio.h +++ b/include/asm-generic/gpio.h @@ -27,7 +27,7 @@ */ #ifndef ARCH_NR_GPIOS -#define ARCH_NR_GPIOS 256 +#define ARCH_NR_GPIOS 512 #endif /* diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index 975e1cc75edb..b8fdc57a7335 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -331,7 +331,7 @@ static inline void iounmap(void __iomem *addr) #ifndef CONFIG_GENERIC_IOMAP static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) { - return (void __iomem *) port; + return PCI_IOBASE + (port & IO_SPACE_LIMIT); } static inline void ioport_unmap(void __iomem *p) diff --git a/include/asm-generic/irq_work.h b/include/asm-generic/irq_work.h new file mode 100644 index 000000000000..a44f452c6590 --- /dev/null +++ b/include/asm-generic/irq_work.h @@ -0,0 +1,10 @@ +#ifndef __ASM_IRQ_WORK_H +#define __ASM_IRQ_WORK_H + +static inline bool arch_irq_work_has_interrupt(void) +{ + return false; +} + +#endif /* __ASM_IRQ_WORK_H */ + diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 53b2acc38213..081ff8826bf6 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -249,6 +249,10 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) #define pgprot_writecombine pgprot_noncached #endif +#ifndef pgprot_device +#define pgprot_device pgprot_noncached +#endif + /* * When walking page tables, get the address of the next boundary, * or the end address of the range if that comes earlier. Although no @@ -660,11 +664,12 @@ static inline int pmd_trans_unstable(pmd_t *pmd) } #ifdef CONFIG_NUMA_BALANCING -#ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE /* - * _PAGE_NUMA works identical to _PAGE_PROTNONE (it's actually the - * same bit too). It's set only when _PAGE_PRESET is not set and it's - * never set if _PAGE_PRESENT is set. + * _PAGE_NUMA distinguishes between an unmapped page table entry, an entry that + * is protected for PROT_NONE and a NUMA hinting fault entry. If the + * architecture defines __PAGE_PROTNONE then it should take that into account + * but those that do not can rely on the fact that the NUMA hinting scanner + * skips inaccessible VMAs. * * pte/pmd_present() returns true if pte/pmd_numa returns true. Page * fault triggers on those regions if pte/pmd_numa returns true @@ -673,16 +678,14 @@ static inline int pmd_trans_unstable(pmd_t *pmd) #ifndef pte_numa static inline int pte_numa(pte_t pte) { - return (pte_flags(pte) & - (_PAGE_NUMA|_PAGE_PROTNONE|_PAGE_PRESENT)) == _PAGE_NUMA; + return ptenuma_flags(pte) == _PAGE_NUMA; } #endif #ifndef pmd_numa static inline int pmd_numa(pmd_t pmd) { - return (pmd_flags(pmd) & - (_PAGE_NUMA|_PAGE_PROTNONE|_PAGE_PRESENT)) == _PAGE_NUMA; + return pmdnuma_flags(pmd) == _PAGE_NUMA; } #endif @@ -722,6 +725,8 @@ static inline pte_t pte_mknuma(pte_t pte) { pteval_t val = pte_val(pte); + VM_BUG_ON(!(val & _PAGE_PRESENT)); + val &= ~_PAGE_PRESENT; val |= _PAGE_NUMA; @@ -765,16 +770,6 @@ static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, } #endif #else -extern int pte_numa(pte_t pte); -extern int pmd_numa(pmd_t pmd); -extern pte_t pte_mknonnuma(pte_t pte); -extern pmd_t pmd_mknonnuma(pmd_t pmd); -extern pte_t pte_mknuma(pte_t pte); -extern pmd_t pmd_mknuma(pmd_t pmd); -extern void ptep_set_numa(struct mm_struct *mm, unsigned long addr, pte_t *ptep); -extern void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp); -#endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */ -#else static inline int pmd_numa(pmd_t pmd) { return 0; diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h index f1a24b5c3b90..b58fd667f87b 100644 --- a/include/asm-generic/sections.h +++ b/include/asm-generic/sections.h @@ -3,6 +3,8 @@ /* References to section boundaries */ +#include <linux/compiler.h> + /* * Usage guidelines: * _text, _data: architecture specific, don't use them in arch-independent code @@ -37,6 +39,8 @@ extern char __start_rodata[], __end_rodata[]; /* Start and end of .ctors section - used for constructor calls. */ extern char __ctors_start[], __ctors_end[]; +extern __visible const void __nosave_begin, __nosave_end; + /* function descriptor handling (if any). Override * in asm/sections.h */ #ifndef dereference_function_descriptor diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 5ba0360663a7..aa70cbda327c 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -40,6 +40,8 @@ * } * * [__init_begin, __init_end] is the init section that may be freed after init + * // __init_begin and __init_end should be page aligned, so that we can + * // free the whole .init memory * [_stext, _etext] is the text section * [_sdata, _edata] is the data section * diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h index 831d786976c5..5186f750c713 100644 --- a/include/crypto/drbg.h +++ b/include/crypto/drbg.h @@ -82,15 +82,6 @@ typedef uint32_t drbg_flag_t; struct drbg_core { drbg_flag_t flags; /* flags for the cipher */ __u8 statelen; /* maximum state length */ - /* - * maximum length of personalization string or additional input - * string -- exponent for base 2 - */ - __u8 max_addtllen; - /* maximum bits per RNG request -- exponent for base 2*/ - __u8 max_bits; - /* maximum number of requests -- exponent for base 2 */ - __u8 max_req; __u8 blocklen_bytes; /* block size of output in bytes */ char cra_name[CRYPTO_MAX_ALG_NAME]; /* mapping to kernel crypto API */ /* kernel crypto API backend cipher name */ @@ -156,18 +147,33 @@ static inline __u8 drbg_keylen(struct drbg_state *drbg) static inline size_t drbg_max_request_bytes(struct drbg_state *drbg) { - /* max_bits is in bits, but buflen is in bytes */ - return (1 << (drbg->core->max_bits - 3)); + /* SP800-90A requires the limit 2**19 bits, but we return bytes */ + return (1 << 16); } static inline size_t drbg_max_addtl(struct drbg_state *drbg) { - return (1UL<<(drbg->core->max_addtllen)); + /* SP800-90A requires 2**35 bytes additional info str / pers str */ +#if (__BITS_PER_LONG == 32) + /* + * SP800-90A allows smaller maximum numbers to be returned -- we + * return SIZE_MAX - 1 to allow the verification of the enforcement + * of this value in drbg_healthcheck_sanity. + */ + return (SIZE_MAX - 1); +#else + return (1UL<<35); +#endif } static inline size_t drbg_max_requests(struct drbg_state *drbg) { - return (1UL<<(drbg->core->max_req)); + /* SP800-90A requires 2**48 maximum requests before reseeding */ +#if (__BITS_PER_LONG == 32) + return SIZE_MAX; +#else + return (1UL<<48); +#endif } /* diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index 9b6f32a6cad1..3b4af1d7c7e9 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -117,6 +117,15 @@ int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc); int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc); int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc); +int shash_ahash_mcryptd_update(struct ahash_request *req, + struct shash_desc *desc); +int shash_ahash_mcryptd_final(struct ahash_request *req, + struct shash_desc *desc); +int shash_ahash_mcryptd_finup(struct ahash_request *req, + struct shash_desc *desc); +int shash_ahash_mcryptd_digest(struct ahash_request *req, + struct shash_desc *desc); + int crypto_init_shash_ops_async(struct crypto_tfm *tfm); static inline void *crypto_ahash_ctx(struct crypto_ahash *tfm) diff --git a/include/crypto/mcryptd.h b/include/crypto/mcryptd.h new file mode 100644 index 000000000000..c23ee1f7ee80 --- /dev/null +++ b/include/crypto/mcryptd.h @@ -0,0 +1,112 @@ +/* + * Software async multibuffer crypto daemon headers + * + * Author: + * Tim Chen <tim.c.chen@linux.intel.com> + * + * Copyright (c) 2014, Intel Corporation. + */ + +#ifndef _CRYPTO_MCRYPT_H +#define _CRYPTO_MCRYPT_H + +#include <linux/crypto.h> +#include <linux/kernel.h> +#include <crypto/hash.h> + +struct mcryptd_ahash { + struct crypto_ahash base; +}; + +static inline struct mcryptd_ahash *__mcryptd_ahash_cast( + struct crypto_ahash *tfm) +{ + return (struct mcryptd_ahash *)tfm; +} + +struct mcryptd_cpu_queue { + struct crypto_queue queue; + struct work_struct work; +}; + +struct mcryptd_queue { + struct mcryptd_cpu_queue __percpu *cpu_queue; +}; + +struct mcryptd_instance_ctx { + struct crypto_spawn spawn; + struct mcryptd_queue *queue; +}; + +struct mcryptd_hash_ctx { + struct crypto_shash *child; + struct mcryptd_alg_state *alg_state; +}; + +struct mcryptd_tag { + /* seq number of request */ + unsigned seq_num; + /* arrival time of request */ + unsigned long arrival; + unsigned long expire; + int cpu; +}; + +struct mcryptd_hash_request_ctx { + struct list_head waiter; + crypto_completion_t complete; + struct mcryptd_tag tag; + struct crypto_hash_walk walk; + u8 *out; + int flag; + struct shash_desc desc; +}; + +struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name, + u32 type, u32 mask); +struct crypto_shash *mcryptd_ahash_child(struct mcryptd_ahash *tfm); +struct shash_desc *mcryptd_shash_desc(struct ahash_request *req); +void mcryptd_free_ahash(struct mcryptd_ahash *tfm); +void mcryptd_flusher(struct work_struct *work); + +enum mcryptd_req_type { + MCRYPTD_NONE, + MCRYPTD_UPDATE, + MCRYPTD_FINUP, + MCRYPTD_DIGEST, + MCRYPTD_FINAL +}; + +struct mcryptd_alg_cstate { + unsigned long next_flush; + unsigned next_seq_num; + bool flusher_engaged; + struct delayed_work flush; + int cpu; + struct mcryptd_alg_state *alg_state; + void *mgr; + spinlock_t work_lock; + struct list_head work_list; + struct list_head flush_list; +}; + +struct mcryptd_alg_state { + struct mcryptd_alg_cstate __percpu *alg_cstate; + unsigned long (*flusher)(struct mcryptd_alg_cstate *cstate); +}; + +/* return delay in jiffies from current time */ +static inline unsigned long get_delay(unsigned long t) +{ + long delay; + + delay = (long) t - (long) jiffies; + if (delay <= 0) + return 0; + else + return (unsigned long) delay; +} + +void mcryptd_arm_flusher(struct mcryptd_alg_cstate *cstate, unsigned long delay); + +#endif diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h index 0d164c6af539..54add2069901 100644 --- a/include/crypto/public_key.h +++ b/include/crypto/public_key.h @@ -15,6 +15,7 @@ #define _LINUX_PUBLIC_KEY_H #include <linux/mpi.h> +#include <keys/asymmetric-type.h> #include <crypto/hash_info.h> enum pkey_algo { @@ -98,8 +99,9 @@ struct key; extern int verify_signature(const struct key *key, const struct public_key_signature *sig); +struct asymmetric_key_id; extern struct key *x509_request_asymmetric_key(struct key *keyring, - const char *issuer, - const char *key_id); + const struct asymmetric_key_id *kid, + bool partial); #endif /* _LINUX_PUBLIC_KEY_H */ diff --git a/include/dt-bindings/clock/imx6qdl-clock.h b/include/dt-bindings/clock/imx6qdl-clock.h index 654151e24288..ddaef8620b2c 100644 --- a/include/dt-bindings/clock/imx6qdl-clock.h +++ b/include/dt-bindings/clock/imx6qdl-clock.h @@ -128,7 +128,7 @@ #define IMX6Q_CLK_ECSPI5 116 #define IMX6DL_CLK_I2C4 116 #define IMX6QDL_CLK_ENET 117 -#define IMX6QDL_CLK_ESAI 118 +#define IMX6QDL_CLK_ESAI_EXTAL 118 #define IMX6QDL_CLK_GPT_IPG 119 #define IMX6QDL_CLK_GPT_IPG_PER 120 #define IMX6QDL_CLK_GPU2D_CORE 121 @@ -218,7 +218,36 @@ #define IMX6QDL_CLK_LVDS2_SEL 205 #define IMX6QDL_CLK_LVDS1_GATE 206 #define IMX6QDL_CLK_LVDS2_GATE 207 -#define IMX6QDL_CLK_ESAI_AHB 208 -#define IMX6QDL_CLK_END 209 +#define IMX6QDL_CLK_ESAI_IPG 208 +#define IMX6QDL_CLK_ESAI_MEM 209 +#define IMX6QDL_CLK_ASRC_IPG 210 +#define IMX6QDL_CLK_ASRC_MEM 211 +#define IMX6QDL_CLK_LVDS1_IN 212 +#define IMX6QDL_CLK_LVDS2_IN 213 +#define IMX6QDL_CLK_ANACLK1 214 +#define IMX6QDL_CLK_ANACLK2 215 +#define IMX6QDL_PLL1_BYPASS_SRC 216 +#define IMX6QDL_PLL2_BYPASS_SRC 217 +#define IMX6QDL_PLL3_BYPASS_SRC 218 +#define IMX6QDL_PLL4_BYPASS_SRC 219 +#define IMX6QDL_PLL5_BYPASS_SRC 220 +#define IMX6QDL_PLL6_BYPASS_SRC 221 +#define IMX6QDL_PLL7_BYPASS_SRC 222 +#define IMX6QDL_CLK_PLL1 223 +#define IMX6QDL_CLK_PLL2 224 +#define IMX6QDL_CLK_PLL3 225 +#define IMX6QDL_CLK_PLL4 226 +#define IMX6QDL_CLK_PLL5 227 +#define IMX6QDL_CLK_PLL6 228 +#define IMX6QDL_CLK_PLL7 229 +#define IMX6QDL_PLL1_BYPASS 230 +#define IMX6QDL_PLL2_BYPASS 231 +#define IMX6QDL_PLL3_BYPASS 232 +#define IMX6QDL_PLL4_BYPASS 233 +#define IMX6QDL_PLL5_BYPASS 234 +#define IMX6QDL_PLL6_BYPASS 235 +#define IMX6QDL_PLL7_BYPASS 236 +#define IMX6QDL_CLK_GPT_3M 237 +#define IMX6QDL_CLK_END 238 #endif /* __DT_BINDINGS_CLOCK_IMX6QDL_H */ diff --git a/include/dt-bindings/clock/imx6sl-clock.h b/include/dt-bindings/clock/imx6sl-clock.h index b91dd462ba85..9ce4e421096f 100644 --- a/include/dt-bindings/clock/imx6sl-clock.h +++ b/include/dt-bindings/clock/imx6sl-clock.h @@ -146,6 +146,34 @@ #define IMX6SL_CLK_PLL4_AUDIO_DIV 133 #define IMX6SL_CLK_SPBA 134 #define IMX6SL_CLK_ENET 135 -#define IMX6SL_CLK_END 136 +#define IMX6SL_CLK_LVDS1_SEL 136 +#define IMX6SL_CLK_LVDS1_OUT 137 +#define IMX6SL_CLK_LVDS1_IN 138 +#define IMX6SL_CLK_ANACLK1 139 +#define IMX6SL_PLL1_BYPASS_SRC 140 +#define IMX6SL_PLL2_BYPASS_SRC 141 +#define IMX6SL_PLL3_BYPASS_SRC 142 +#define IMX6SL_PLL4_BYPASS_SRC 143 +#define IMX6SL_PLL5_BYPASS_SRC 144 +#define IMX6SL_PLL6_BYPASS_SRC 145 +#define IMX6SL_PLL7_BYPASS_SRC 146 +#define IMX6SL_CLK_PLL1 147 +#define IMX6SL_CLK_PLL2 148 +#define IMX6SL_CLK_PLL3 149 +#define IMX6SL_CLK_PLL4 150 +#define IMX6SL_CLK_PLL5 151 +#define IMX6SL_CLK_PLL6 152 +#define IMX6SL_CLK_PLL7 153 +#define IMX6SL_PLL1_BYPASS 154 +#define IMX6SL_PLL2_BYPASS 155 +#define IMX6SL_PLL3_BYPASS 156 +#define IMX6SL_PLL4_BYPASS 157 +#define IMX6SL_PLL5_BYPASS 158 +#define IMX6SL_PLL6_BYPASS 159 +#define IMX6SL_PLL7_BYPASS 160 +#define IMX6SL_CLK_SSI1_IPG 161 +#define IMX6SL_CLK_SSI2_IPG 162 +#define IMX6SL_CLK_SSI3_IPG 163 +#define IMX6SL_CLK_END 164 #endif /* __DT_BINDINGS_CLOCK_IMX6SL_H */ diff --git a/include/dt-bindings/clock/imx6sx-clock.h b/include/dt-bindings/clock/imx6sx-clock.h index 421d8bb76f2f..995709119ec5 100644 --- a/include/dt-bindings/clock/imx6sx-clock.h +++ b/include/dt-bindings/clock/imx6sx-clock.h @@ -251,6 +251,29 @@ #define IMX6SX_CLK_SAI2_IPG 238 #define IMX6SX_CLK_ESAI_IPG 239 #define IMX6SX_CLK_ESAI_MEM 240 -#define IMX6SX_CLK_CLK_END 241 +#define IMX6SX_CLK_LVDS1_IN 241 +#define IMX6SX_CLK_ANACLK1 242 +#define IMX6SX_PLL1_BYPASS_SRC 243 +#define IMX6SX_PLL2_BYPASS_SRC 244 +#define IMX6SX_PLL3_BYPASS_SRC 245 +#define IMX6SX_PLL4_BYPASS_SRC 246 +#define IMX6SX_PLL5_BYPASS_SRC 247 +#define IMX6SX_PLL6_BYPASS_SRC 248 +#define IMX6SX_PLL7_BYPASS_SRC 249 +#define IMX6SX_CLK_PLL1 250 +#define IMX6SX_CLK_PLL2 251 +#define IMX6SX_CLK_PLL3 252 +#define IMX6SX_CLK_PLL4 253 +#define IMX6SX_CLK_PLL5 254 +#define IMX6SX_CLK_PLL6 255 +#define IMX6SX_CLK_PLL7 256 +#define IMX6SX_PLL1_BYPASS 257 +#define IMX6SX_PLL2_BYPASS 258 +#define IMX6SX_PLL3_BYPASS 259 +#define IMX6SX_PLL4_BYPASS 260 +#define IMX6SX_PLL5_BYPASS 261 +#define IMX6SX_PLL6_BYPASS 262 +#define IMX6SX_PLL7_BYPASS 263 +#define IMX6SX_CLK_CLK_END 264 #endif /* __DT_BINDINGS_CLOCK_IMX6SX_H */ diff --git a/include/dt-bindings/clock/r8a7740-clock.h b/include/dt-bindings/clock/r8a7740-clock.h new file mode 100644 index 000000000000..f6b4b0fe7a43 --- /dev/null +++ b/include/dt-bindings/clock/r8a7740-clock.h @@ -0,0 +1,77 @@ +/* + * Copyright 2014 Ulrich Hecht + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __DT_BINDINGS_CLOCK_R8A7740_H__ +#define __DT_BINDINGS_CLOCK_R8A7740_H__ + +/* CPG */ +#define R8A7740_CLK_SYSTEM 0 +#define R8A7740_CLK_PLLC0 1 +#define R8A7740_CLK_PLLC1 2 +#define R8A7740_CLK_PLLC2 3 +#define R8A7740_CLK_R 4 +#define R8A7740_CLK_USB24S 5 +#define R8A7740_CLK_I 6 +#define R8A7740_CLK_ZG 7 +#define R8A7740_CLK_B 8 +#define R8A7740_CLK_M1 9 +#define R8A7740_CLK_HP 10 +#define R8A7740_CLK_HPP 11 +#define R8A7740_CLK_USBP 12 +#define R8A7740_CLK_S 13 +#define R8A7740_CLK_ZB 14 +#define R8A7740_CLK_M3 15 +#define R8A7740_CLK_CP 16 + +/* MSTP1 */ +#define R8A7740_CLK_CEU21 28 +#define R8A7740_CLK_CEU20 27 +#define R8A7740_CLK_TMU0 25 +#define R8A7740_CLK_LCDC1 17 +#define R8A7740_CLK_IIC0 16 +#define R8A7740_CLK_TMU1 11 +#define R8A7740_CLK_LCDC0 0 + +/* MSTP2 */ +#define R8A7740_CLK_SCIFA6 30 +#define R8A7740_CLK_SCIFA7 22 +#define R8A7740_CLK_DMAC1 18 +#define R8A7740_CLK_DMAC2 17 +#define R8A7740_CLK_DMAC3 16 +#define R8A7740_CLK_USBDMAC 14 +#define R8A7740_CLK_SCIFA5 7 +#define R8A7740_CLK_SCIFB 6 +#define R8A7740_CLK_SCIFA0 4 +#define R8A7740_CLK_SCIFA1 3 +#define R8A7740_CLK_SCIFA2 2 +#define R8A7740_CLK_SCIFA3 1 +#define R8A7740_CLK_SCIFA4 0 + +/* MSTP3 */ +#define R8A7740_CLK_CMT1 29 +#define R8A7740_CLK_FSI 28 +#define R8A7740_CLK_IIC1 23 +#define R8A7740_CLK_USBF 20 +#define R8A7740_CLK_SDHI0 14 +#define R8A7740_CLK_SDHI1 13 +#define R8A7740_CLK_MMC 12 +#define R8A7740_CLK_GETHER 9 +#define R8A7740_CLK_TPU0 4 + +/* MSTP4 */ +#define R8A7740_CLK_USBH 16 +#define R8A7740_CLK_SDHI2 15 +#define R8A7740_CLK_USBFUNC 7 +#define R8A7740_CLK_USBPHY 6 + +/* SUBCK* */ +#define R8A7740_CLK_SUBCK 9 +#define R8A7740_CLK_SUBCK2 10 + +#endif /* __DT_BINDINGS_CLOCK_R8A7740_H__ */ diff --git a/include/dt-bindings/clock/r8a7790-clock.h b/include/dt-bindings/clock/r8a7790-clock.h index f929a79e6998..8ea7ab0346ad 100644 --- a/include/dt-bindings/clock/r8a7790-clock.h +++ b/include/dt-bindings/clock/r8a7790-clock.h @@ -26,6 +26,7 @@ #define R8A7790_CLK_MSIOF0 0 /* MSTP1 */ +#define R8A7790_CLK_JPU 6 #define R8A7790_CLK_TMU1 11 #define R8A7790_CLK_TMU3 21 #define R8A7790_CLK_TMU2 22 diff --git a/include/dt-bindings/clock/r8a7791-clock.h b/include/dt-bindings/clock/r8a7791-clock.h index f0d4d1049162..58c3f49d068c 100644 --- a/include/dt-bindings/clock/r8a7791-clock.h +++ b/include/dt-bindings/clock/r8a7791-clock.h @@ -25,6 +25,7 @@ #define R8A7791_CLK_MSIOF0 0 /* MSTP1 */ +#define R8A7791_CLK_JPU 6 #define R8A7791_CLK_TMU1 11 #define R8A7791_CLK_TMU3 21 #define R8A7791_CLK_TMU2 22 diff --git a/include/dt-bindings/clock/r8a7794-clock.h b/include/dt-bindings/clock/r8a7794-clock.h new file mode 100644 index 000000000000..9ac1043e25bc --- /dev/null +++ b/include/dt-bindings/clock/r8a7794-clock.h @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2014 Renesas Electronics Corporation + * Copyright 2013 Ideas On Board SPRL + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __DT_BINDINGS_CLOCK_R8A7794_H__ +#define __DT_BINDINGS_CLOCK_R8A7794_H__ + +/* CPG */ +#define R8A7794_CLK_MAIN 0 +#define R8A7794_CLK_PLL0 1 +#define R8A7794_CLK_PLL1 2 +#define R8A7794_CLK_PLL3 3 +#define R8A7794_CLK_LB 4 +#define R8A7794_CLK_QSPI 5 +#define R8A7794_CLK_SDH 6 +#define R8A7794_CLK_SD0 7 +#define R8A7794_CLK_Z 8 + +/* MSTP0 */ +#define R8A7794_CLK_MSIOF0 0 + +/* MSTP1 */ +#define R8A7794_CLK_TMU1 11 +#define R8A7794_CLK_TMU3 21 +#define R8A7794_CLK_TMU2 22 +#define R8A7794_CLK_CMT0 24 +#define R8A7794_CLK_TMU0 25 + +/* MSTP2 */ +#define R8A7794_CLK_SCIFA2 2 +#define R8A7794_CLK_SCIFA1 3 +#define R8A7794_CLK_SCIFA0 4 +#define R8A7794_CLK_MSIOF2 5 +#define R8A7794_CLK_SCIFB0 6 +#define R8A7794_CLK_SCIFB1 7 +#define R8A7794_CLK_MSIOF1 8 +#define R8A7794_CLK_SCIFB2 16 + +/* MSTP3 */ +#define R8A7794_CLK_CMT1 29 + +/* MSTP5 */ +#define R8A7794_CLK_THERMAL 22 +#define R8A7794_CLK_PWM 23 + +/* MSTP7 */ +#define R8A7794_CLK_HSCIF2 13 +#define R8A7794_CLK_SCIF5 14 +#define R8A7794_CLK_SCIF4 15 +#define R8A7794_CLK_HSCIF1 16 +#define R8A7794_CLK_HSCIF0 17 +#define R8A7794_CLK_SCIF3 18 +#define R8A7794_CLK_SCIF2 19 +#define R8A7794_CLK_SCIF1 20 +#define R8A7794_CLK_SCIF0 21 + +/* MSTP8 */ +#define R8A7794_CLK_ETHER 13 + +/* MSTP9 */ +#define R8A7794_CLK_GPIO6 5 +#define R8A7794_CLK_GPIO5 7 +#define R8A7794_CLK_GPIO4 8 +#define R8A7794_CLK_GPIO3 9 +#define R8A7794_CLK_GPIO2 10 +#define R8A7794_CLK_GPIO1 11 +#define R8A7794_CLK_GPIO0 12 + +/* MSTP11 */ +#define R8A7794_CLK_SCIFA3 6 +#define R8A7794_CLK_SCIFA4 7 +#define R8A7794_CLK_SCIFA5 8 + +#endif /* __DT_BINDINGS_CLOCK_R8A7794_H__ */ diff --git a/include/dt-bindings/clock/vf610-clock.h b/include/dt-bindings/clock/vf610-clock.h index 00953d9484cb..d6b56b21539b 100644 --- a/include/dt-bindings/clock/vf610-clock.h +++ b/include/dt-bindings/clock/vf610-clock.h @@ -166,6 +166,9 @@ #define VF610_CLK_DMAMUX3 153 #define VF610_CLK_FLEXCAN0_EN 154 #define VF610_CLK_FLEXCAN1_EN 155 -#define VF610_CLK_END 156 +#define VF610_CLK_PLL7_MAIN 156 +#define VF610_CLK_USBPHY0 157 +#define VF610_CLK_USBPHY1 158 +#define VF610_CLK_END 159 #endif /* __DT_BINDINGS_CLOCK_VF610_H */ diff --git a/include/dt-bindings/input/ti-drv260x.h b/include/dt-bindings/input/ti-drv260x.h new file mode 100644 index 000000000000..2626e6d9f707 --- /dev/null +++ b/include/dt-bindings/input/ti-drv260x.h @@ -0,0 +1,36 @@ +/* + * DRV260X haptics driver family + * + * Author: Dan Murphy <dmurphy@ti.com> + * + * Copyright: (C) 2014 Texas Instruments, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#ifndef _DT_BINDINGS_TI_DRV260X_H +#define _DT_BINDINGS_TI_DRV260X_H + +/* Calibration Types */ +#define DRV260X_LRA_MODE 0x00 +#define DRV260X_LRA_NO_CAL_MODE 0x01 +#define DRV260X_ERM_MODE 0x02 + +/* Library Selection */ +#define DRV260X_LIB_EMPTY 0x00 +#define DRV260X_ERM_LIB_A 0x01 +#define DRV260X_ERM_LIB_B 0x02 +#define DRV260X_ERM_LIB_C 0x03 +#define DRV260X_ERM_LIB_D 0x04 +#define DRV260X_ERM_LIB_E 0x05 +#define DRV260X_LIB_LRA 0x06 +#define DRV260X_ERM_LIB_F 0x07 + +#endif diff --git a/include/dt-bindings/pinctrl/at91.h b/include/dt-bindings/pinctrl/at91.h index 0fee6ff77ffc..bbca3d038900 100644 --- a/include/dt-bindings/pinctrl/at91.h +++ b/include/dt-bindings/pinctrl/at91.h @@ -20,6 +20,11 @@ #define AT91_PINCTRL_PULL_UP_DEGLITCH (AT91_PINCTRL_PULL_UP | AT91_PINCTRL_DEGLITCH) +#define AT91_PINCTRL_DRIVE_STRENGTH_DEFAULT (0x0 << 5) +#define AT91_PINCTRL_DRIVE_STRENGTH_LOW (0x1 << 5) +#define AT91_PINCTRL_DRIVE_STRENGTH_MED (0x2 << 5) +#define AT91_PINCTRL_DRIVE_STRENGTH_HI (0x3 << 5) + #define AT91_PIOA 0 #define AT91_PIOB 1 #define AT91_PIOC 2 diff --git a/include/dt-bindings/pinctrl/rockchip.h b/include/dt-bindings/pinctrl/rockchip.h index cd5788be82ce..743e66a95e13 100644 --- a/include/dt-bindings/pinctrl/rockchip.h +++ b/include/dt-bindings/pinctrl/rockchip.h @@ -28,5 +28,7 @@ #define RK_FUNC_GPIO 0 #define RK_FUNC_1 1 #define RK_FUNC_2 2 +#define RK_FUNC_3 3 +#define RK_FUNC_4 4 #endif diff --git a/include/dt-bindings/sound/cs35l32.h b/include/dt-bindings/sound/cs35l32.h new file mode 100644 index 000000000000..0c6d6a3c15a2 --- /dev/null +++ b/include/dt-bindings/sound/cs35l32.h @@ -0,0 +1,26 @@ +#ifndef __DT_CS35L32_H +#define __DT_CS35L32_H + +#define CS35L32_BOOST_MGR_AUTO 0 +#define CS35L32_BOOST_MGR_AUTO_AUDIO 1 +#define CS35L32_BOOST_MGR_BYPASS 2 +#define CS35L32_BOOST_MGR_FIXED 3 + +#define CS35L32_DATA_CFG_LR_VP 0 +#define CS35L32_DATA_CFG_LR_STAT 1 +#define CS35L32_DATA_CFG_LR 2 +#define CS35L32_DATA_CFG_LR_VPSTAT 3 + +#define CS35L32_BATT_THRESH_3_1V 0 +#define CS35L32_BATT_THRESH_3_2V 1 +#define CS35L32_BATT_THRESH_3_3V 2 +#define CS35L32_BATT_THRESH_3_4V 3 + +#define CS35L32_BATT_RECOV_3_1V 0 +#define CS35L32_BATT_RECOV_3_2V 1 +#define CS35L32_BATT_RECOV_3_3V 2 +#define CS35L32_BATT_RECOV_3_4V 3 +#define CS35L32_BATT_RECOV_3_5V 4 +#define CS35L32_BATT_RECOV_3_6V 5 + +#endif /* __DT_CS35L32_H */ diff --git a/include/keys/asymmetric-type.h b/include/keys/asymmetric-type.h index 7dd473496180..c0754abb2f56 100644 --- a/include/keys/asymmetric-type.h +++ b/include/keys/asymmetric-type.h @@ -19,6 +19,47 @@ extern struct key_type key_type_asymmetric; /* + * Identifiers for an asymmetric key ID. We have three ways of looking up a + * key derived from an X.509 certificate: + * + * (1) Serial Number & Issuer. Non-optional. This is the only valid way to + * map a PKCS#7 signature to an X.509 certificate. + * + * (2) Issuer & Subject Unique IDs. Optional. These were the original way to + * match X.509 certificates, but have fallen into disuse in favour of (3). + * + * (3) Auth & Subject Key Identifiers. Optional. SKIDs are only provided on + * CA keys that are intended to sign other keys, so don't appear in end + * user certificates unless forced. + * + * We could also support an PGP key identifier, which is just a SHA1 sum of the + * public key and certain parameters, but since we don't support PGP keys at + * the moment, we shall ignore those. + * + * What we actually do is provide a place where binary identifiers can be + * stashed and then compare against them when checking for an id match. + */ +struct asymmetric_key_id { + unsigned short len; + unsigned char data[]; +}; + +struct asymmetric_key_ids { + void *id[2]; +}; + +extern bool asymmetric_key_id_same(const struct asymmetric_key_id *kid1, + const struct asymmetric_key_id *kid2); + +extern bool asymmetric_key_id_partial(const struct asymmetric_key_id *kid1, + const struct asymmetric_key_id *kid2); + +extern struct asymmetric_key_id *asymmetric_key_generate_id(const void *val_1, + size_t len_1, + const void *val_2, + size_t len_2); + +/* * The payload is at the discretion of the subtype. */ diff --git a/include/keys/user-type.h b/include/keys/user-type.h index 3ab1873a4bfa..cebefb069c44 100644 --- a/include/keys/user-type.h +++ b/include/keys/user-type.h @@ -40,7 +40,6 @@ struct key_preparsed_payload; extern int user_preparse(struct key_preparsed_payload *prep); extern void user_free_preparse(struct key_preparsed_payload *prep); extern int user_update(struct key *key, struct key_preparsed_payload *prep); -extern int user_match(const struct key *key, const void *criterion); extern void user_revoke(struct key *key); extern void user_destroy(struct key *key); extern void user_describe(const struct key *user, struct seq_file *m); diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 35b0c121bb65..2f2aac8448a4 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -25,26 +25,25 @@ #include <linux/spinlock.h> #include <linux/types.h> -#define VGIC_NR_IRQS 256 +#define VGIC_NR_IRQS_LEGACY 256 #define VGIC_NR_SGIS 16 #define VGIC_NR_PPIS 16 #define VGIC_NR_PRIVATE_IRQS (VGIC_NR_SGIS + VGIC_NR_PPIS) -#define VGIC_NR_SHARED_IRQS (VGIC_NR_IRQS - VGIC_NR_PRIVATE_IRQS) -#define VGIC_MAX_CPUS KVM_MAX_VCPUS #define VGIC_V2_MAX_LRS (1 << 6) #define VGIC_V3_MAX_LRS 16 +#define VGIC_MAX_IRQS 1024 /* Sanity checks... */ -#if (VGIC_MAX_CPUS > 8) +#if (KVM_MAX_VCPUS > 8) #error Invalid number of CPU interfaces #endif -#if (VGIC_NR_IRQS & 31) +#if (VGIC_NR_IRQS_LEGACY & 31) #error "VGIC_NR_IRQS must be a multiple of 32" #endif -#if (VGIC_NR_IRQS > 1024) +#if (VGIC_NR_IRQS_LEGACY > VGIC_MAX_IRQS) #error "VGIC_NR_IRQS must be <= 1024" #endif @@ -54,19 +53,33 @@ * - a bunch of shared interrupts (SPI) */ struct vgic_bitmap { - union { - u32 reg[VGIC_NR_PRIVATE_IRQS / 32]; - DECLARE_BITMAP(reg_ul, VGIC_NR_PRIVATE_IRQS); - } percpu[VGIC_MAX_CPUS]; - union { - u32 reg[VGIC_NR_SHARED_IRQS / 32]; - DECLARE_BITMAP(reg_ul, VGIC_NR_SHARED_IRQS); - } shared; + /* + * - One UL per VCPU for private interrupts (assumes UL is at + * least 32 bits) + * - As many UL as necessary for shared interrupts. + * + * The private interrupts are accessed via the "private" + * field, one UL per vcpu (the state for vcpu n is in + * private[n]). The shared interrupts are accessed via the + * "shared" pointer (IRQn state is at bit n-32 in the bitmap). + */ + unsigned long *private; + unsigned long *shared; }; struct vgic_bytemap { - u32 percpu[VGIC_MAX_CPUS][VGIC_NR_PRIVATE_IRQS / 4]; - u32 shared[VGIC_NR_SHARED_IRQS / 4]; + /* + * - 8 u32 per VCPU for private interrupts + * - As many u32 as necessary for shared interrupts. + * + * The private interrupts are accessed via the "private" + * field, (the state for vcpu n is in private[n*8] to + * private[n*8 + 7]). The shared interrupts are accessed via + * the "shared" pointer (IRQn state is at byte (n-32)%4 of the + * shared[(n-32)/4] word). + */ + u32 *private; + u32 *shared; }; struct kvm_vcpu; @@ -127,6 +140,9 @@ struct vgic_dist { bool in_kernel; bool ready; + int nr_cpus; + int nr_irqs; + /* Virtual control interface mapping */ void __iomem *vctrl_base; @@ -140,11 +156,25 @@ struct vgic_dist { /* Interrupt enabled (one bit per IRQ) */ struct vgic_bitmap irq_enabled; - /* Interrupt 'pin' level */ - struct vgic_bitmap irq_state; + /* Level-triggered interrupt external input is asserted */ + struct vgic_bitmap irq_level; - /* Level-triggered interrupt in progress */ - struct vgic_bitmap irq_active; + /* + * Interrupt state is pending on the distributor + */ + struct vgic_bitmap irq_pending; + + /* + * Tracks writes to GICD_ISPENDRn and GICD_ICPENDRn for level-triggered + * interrupts. Essentially holds the state of the flip-flop in + * Figure 4-10 on page 4-101 in ARM IHI 0048B.b. + * Once set, it is only cleared for level-triggered interrupts on + * guest ACKs (when we queue it) or writes to GICD_ICPENDRn. + */ + struct vgic_bitmap irq_soft_pend; + + /* Level-triggered interrupt queued on VCPU interface */ + struct vgic_bitmap irq_queued; /* Interrupt priority. Not used yet. */ struct vgic_bytemap irq_priority; @@ -152,15 +182,36 @@ struct vgic_dist { /* Level/edge triggered */ struct vgic_bitmap irq_cfg; - /* Source CPU per SGI and target CPU */ - u8 irq_sgi_sources[VGIC_MAX_CPUS][VGIC_NR_SGIS]; - - /* Target CPU for each IRQ */ - u8 irq_spi_cpu[VGIC_NR_SHARED_IRQS]; - struct vgic_bitmap irq_spi_target[VGIC_MAX_CPUS]; + /* + * Source CPU per SGI and target CPU: + * + * Each byte represent a SGI observable on a VCPU, each bit of + * this byte indicating if the corresponding VCPU has + * generated this interrupt. This is a GICv2 feature only. + * + * For VCPUn (n < 8), irq_sgi_sources[n*16] to [n*16 + 15] are + * the SGIs observable on VCPUn. + */ + u8 *irq_sgi_sources; + + /* + * Target CPU for each SPI: + * + * Array of available SPI, each byte indicating the target + * VCPU for SPI. IRQn (n >=32) is at irq_spi_cpu[n-32]. + */ + u8 *irq_spi_cpu; + + /* + * Reverse lookup of irq_spi_cpu for faster compute pending: + * + * Array of bitmaps, one per VCPU, describing if IRQn is + * routed to a particular VCPU. + */ + struct vgic_bitmap *irq_spi_target; /* Bitmap indicating which CPU has something pending */ - unsigned long irq_pending_on_cpu; + unsigned long *irq_pending_on_cpu; #endif }; @@ -190,11 +241,11 @@ struct vgic_v3_cpu_if { struct vgic_cpu { #ifdef CONFIG_KVM_ARM_VGIC /* per IRQ to LR mapping */ - u8 vgic_irq_lr_map[VGIC_NR_IRQS]; + u8 *vgic_irq_lr_map; /* Pending interrupts on this VCPU */ DECLARE_BITMAP( pending_percpu, VGIC_NR_PRIVATE_IRQS); - DECLARE_BITMAP( pending_shared, VGIC_NR_SHARED_IRQS); + unsigned long *pending_shared; /* Bitmap of used/free list registers */ DECLARE_BITMAP( lr_used, VGIC_V2_MAX_LRS); @@ -225,7 +276,8 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write); int kvm_vgic_hyp_init(void); int kvm_vgic_init(struct kvm *kvm); int kvm_vgic_create(struct kvm *kvm); -int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu); +void kvm_vgic_destroy(struct kvm *kvm); +void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu); void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu); void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu); int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num, diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 807cbc46d73e..b7926bb9b444 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -587,7 +587,6 @@ static inline int acpi_subsys_freeze(struct device *dev) { return 0; } #if defined(CONFIG_ACPI) && defined(CONFIG_PM) struct acpi_device *acpi_dev_pm_get_node(struct device *dev); int acpi_dev_pm_attach(struct device *dev, bool power_on); -void acpi_dev_pm_detach(struct device *dev, bool power_off); #else static inline struct acpi_device *acpi_dev_pm_get_node(struct device *dev) { @@ -597,7 +596,6 @@ static inline int acpi_dev_pm_attach(struct device *dev, bool power_on) { return -ENODEV; } -static inline void acpi_dev_pm_detach(struct device *dev, bool power_off) {} #endif #ifdef CONFIG_ACPI diff --git a/include/linux/aer.h b/include/linux/aer.h index c826d1c28f9c..4fef65e57023 100644 --- a/include/linux/aer.h +++ b/include/linux/aer.h @@ -7,6 +7,8 @@ #ifndef _AER_H_ #define _AER_H_ +#include <linux/types.h> + #define AER_NONFATAL 0 #define AER_FATAL 1 #define AER_CORRECTABLE 2 diff --git a/include/linux/ahci_platform.h b/include/linux/ahci_platform.h index 09a947e8bc87..642d6ae4030c 100644 --- a/include/linux/ahci_platform.h +++ b/include/linux/ahci_platform.h @@ -22,19 +22,6 @@ struct ata_port_info; struct ahci_host_priv; struct platform_device; -/* - * Note ahci_platform_data is deprecated, it is only kept around for use - * by the old da850 and spear13xx ahci code. - * New drivers should instead declare their own platform_driver struct, and - * use ahci_platform* functions in their own probe, suspend and resume methods. - */ -struct ahci_platform_data { - int (*init)(struct device *dev, void __iomem *addr); - void (*exit)(struct device *dev); - int (*suspend)(struct device *dev); - int (*resume)(struct device *dev); -}; - int ahci_platform_enable_clks(struct ahci_host_priv *hpriv); void ahci_platform_disable_clks(struct ahci_host_priv *hpriv); int ahci_platform_enable_resources(struct ahci_host_priv *hpriv); diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h index fdd7e1b61f60..c324f5700d1a 100644 --- a/include/linux/amba/bus.h +++ b/include/linux/amba/bus.h @@ -44,10 +44,15 @@ struct amba_driver { const struct amba_id *id_table; }; +/* + * Constants for the designer field of the Peripheral ID register. When bit 7 + * is set to '1', bits [6:0] should be the JEP106 manufacturer identity code. + */ enum amba_vendor { AMBA_VENDOR_ARM = 0x41, AMBA_VENDOR_ST = 0x80, AMBA_VENDOR_QCOM = 0x51, + AMBA_VENDOR_LSI = 0xb6, }; extern struct bus_type amba_bustype; diff --git a/include/linux/ata_platform.h b/include/linux/ata_platform.h index b9fde17f767c..5c618a084225 100644 --- a/include/linux/ata_platform.h +++ b/include/linux/ata_platform.h @@ -8,11 +8,6 @@ struct pata_platform_info { * spacing used by ata_std_ports(). */ unsigned int ioport_shift; - /* - * Indicate platform specific irq types and initial - * IRQ flags when call request_irq() - */ - unsigned int irq_flags; }; extern int __pata_platform_probe(struct device *dev, diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h index 4c7a4b2104bf..91b77f8d495d 100644 --- a/include/linux/atmel-mci.h +++ b/include/linux/atmel-mci.h @@ -1,6 +1,8 @@ #ifndef __LINUX_ATMEL_MCI_H #define __LINUX_ATMEL_MCI_H +#include <linux/types.h> + #define ATMCI_MAX_NR_SLOTS 2 /** diff --git a/include/linux/atmel_tc.h b/include/linux/atmel_tc.h index 89a931babecf..b87c1c7c242a 100644 --- a/include/linux/atmel_tc.h +++ b/include/linux/atmel_tc.h @@ -44,12 +44,13 @@ struct atmel_tcb_config { /** * struct atmel_tc - information about a Timer/Counter Block * @pdev: physical device - * @iomem: resource associated with the I/O register * @regs: mapping through which the I/O registers can be accessed + * @id: block id * @tcb_config: configuration data from SoC * @irq: irq for each of the three channels * @clk: internal clock source for each of the three channels * @node: list node, for tclib internal use + * @allocated: if already used, for tclib internal use * * On some platforms, each TC channel has its own clocks and IRQs, * while on others, all TC channels share the same clock and IRQ. @@ -61,15 +62,16 @@ struct atmel_tcb_config { */ struct atmel_tc { struct platform_device *pdev; - struct resource *iomem; void __iomem *regs; + int id; const struct atmel_tcb_config *tcb_config; int irq[3]; struct clk *clk[3]; struct list_head node; + bool allocated; }; -extern struct atmel_tc *atmel_tc_alloc(unsigned block, const char *name); +extern struct atmel_tc *atmel_tc_alloc(unsigned block); extern void atmel_tc_free(struct atmel_tc *tc); /* platform-specific ATMEL_TC_TIMER_CLOCKx divisors (0 means 32KiHz) */ @@ -258,5 +260,10 @@ extern const u8 atmel_tc_divisors[5]; #define ATMEL_TC_LDRAS (1 << 5) /* RA loading */ #define ATMEL_TC_LDRBS (1 << 6) /* RB loading */ #define ATMEL_TC_ETRGS (1 << 7) /* external trigger */ +#define ATMEL_TC_ALL_IRQ (ATMEL_TC_COVFS | ATMEL_TC_LOVRS | \ + ATMEL_TC_CPAS | ATMEL_TC_CPBS | \ + ATMEL_TC_CPCS | ATMEL_TC_LDRAS | \ + ATMEL_TC_LDRBS | ATMEL_TC_ETRGS) \ + /* all IRQs */ #endif diff --git a/include/linux/atomic.h b/include/linux/atomic.h index fef3a809e7cf..5b08a8540ecf 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h @@ -3,42 +3,6 @@ #define _LINUX_ATOMIC_H #include <asm/atomic.h> -/* - * Provide __deprecated wrappers for the new interface, avoid flag day changes. - * We need the ugly external functions to break header recursion hell. - */ -#ifndef smp_mb__before_atomic_inc -static inline void __deprecated smp_mb__before_atomic_inc(void) -{ - extern void __smp_mb__before_atomic(void); - __smp_mb__before_atomic(); -} -#endif - -#ifndef smp_mb__after_atomic_inc -static inline void __deprecated smp_mb__after_atomic_inc(void) -{ - extern void __smp_mb__after_atomic(void); - __smp_mb__after_atomic(); -} -#endif - -#ifndef smp_mb__before_atomic_dec -static inline void __deprecated smp_mb__before_atomic_dec(void) -{ - extern void __smp_mb__before_atomic(void); - __smp_mb__before_atomic(); -} -#endif - -#ifndef smp_mb__after_atomic_dec -static inline void __deprecated smp_mb__after_atomic_dec(void) -{ - extern void __smp_mb__after_atomic(void); - __smp_mb__after_atomic(); -} -#endif - /** * atomic_add_unless - add unless the number is already a given value * @v: pointer of type atomic_t diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h index 089743ade734..9b0a15d06a4f 100644 --- a/include/linux/balloon_compaction.h +++ b/include/linux/balloon_compaction.h @@ -27,10 +27,13 @@ * counter raised only while it is under our special handling; * * iii. after the lockless scan step have selected a potential balloon page for - * isolation, re-test the page->mapping flags and the page ref counter + * isolation, re-test the PageBalloon mark and the PagePrivate flag * under the proper page lock, to ensure isolating a valid balloon page * (not yet isolated, nor under release procedure) * + * iv. isolation or dequeueing procedure must clear PagePrivate flag under + * page lock together with removing page from balloon device page list. + * * The functions provided by this interface are placed to help on coping with * the aforementioned balloon page corner case, as well as to ensure the simple * set of exposed rules are satisfied while we are dealing with balloon pages @@ -54,43 +57,22 @@ * balloon driver as a page book-keeper for its registered balloon devices. */ struct balloon_dev_info { - void *balloon_device; /* balloon device descriptor */ - struct address_space *mapping; /* balloon special page->mapping */ unsigned long isolated_pages; /* # of isolated pages for migration */ spinlock_t pages_lock; /* Protection to pages list */ struct list_head pages; /* Pages enqueued & handled to Host */ + int (*migratepage)(struct balloon_dev_info *, struct page *newpage, + struct page *page, enum migrate_mode mode); }; extern struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info); extern struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info); -extern struct balloon_dev_info *balloon_devinfo_alloc( - void *balloon_dev_descriptor); -static inline void balloon_devinfo_free(struct balloon_dev_info *b_dev_info) -{ - kfree(b_dev_info); -} - -/* - * balloon_page_free - release a balloon page back to the page free lists - * @page: ballooned page to be set free - * - * This function must be used to properly set free an isolated/dequeued balloon - * page at the end of a sucessful page migration, or at the balloon driver's - * page release procedure. - */ -static inline void balloon_page_free(struct page *page) +static inline void balloon_devinfo_init(struct balloon_dev_info *balloon) { - /* - * Balloon pages always get an extra refcount before being isolated - * and before being dequeued to help on sorting out fortuite colisions - * between a thread attempting to isolate and another thread attempting - * to release the very same balloon page. - * - * Before we handle the page back to Buddy, lets drop its extra refcnt. - */ - put_page(page); - __free_page(page); + balloon->isolated_pages = 0; + spin_lock_init(&balloon->pages_lock); + INIT_LIST_HEAD(&balloon->pages); + balloon->migratepage = NULL; } #ifdef CONFIG_BALLOON_COMPACTION @@ -98,107 +80,58 @@ extern bool balloon_page_isolate(struct page *page); extern void balloon_page_putback(struct page *page); extern int balloon_page_migrate(struct page *newpage, struct page *page, enum migrate_mode mode); -extern struct address_space -*balloon_mapping_alloc(struct balloon_dev_info *b_dev_info, - const struct address_space_operations *a_ops); - -static inline void balloon_mapping_free(struct address_space *balloon_mapping) -{ - kfree(balloon_mapping); -} /* - * page_flags_cleared - helper to perform balloon @page ->flags tests. - * - * As balloon pages are obtained from buddy and we do not play with page->flags - * at driver level (exception made when we get the page lock for compaction), - * we can safely identify a ballooned page by checking if the - * PAGE_FLAGS_CHECK_AT_PREP page->flags are all cleared. This approach also - * helps us skip ballooned pages that are locked for compaction or release, thus - * mitigating their racy check at balloon_page_movable() - */ -static inline bool page_flags_cleared(struct page *page) -{ - return !(page->flags & PAGE_FLAGS_CHECK_AT_PREP); -} - -/* - * __is_movable_balloon_page - helper to perform @page mapping->flags tests + * __is_movable_balloon_page - helper to perform @page PageBalloon tests */ static inline bool __is_movable_balloon_page(struct page *page) { - struct address_space *mapping = page->mapping; - return mapping_balloon(mapping); + return PageBalloon(page); } /* - * balloon_page_movable - test page->mapping->flags to identify balloon pages - * that can be moved by compaction/migration. - * - * This function is used at core compaction's page isolation scheme, therefore - * most pages exposed to it are not enlisted as balloon pages and so, to avoid - * undesired side effects like racing against __free_pages(), we cannot afford - * holding the page locked while testing page->mapping->flags here. + * balloon_page_movable - test PageBalloon to identify balloon pages + * and PagePrivate to check that the page is not + * isolated and can be moved by compaction/migration. * * As we might return false positives in the case of a balloon page being just - * released under us, the page->mapping->flags need to be re-tested later, - * under the proper page lock, at the functions that will be coping with the - * balloon page case. + * released under us, this need to be re-tested later, under the page lock. */ static inline bool balloon_page_movable(struct page *page) { - /* - * Before dereferencing and testing mapping->flags, let's make sure - * this is not a page that uses ->mapping in a different way - */ - if (page_flags_cleared(page) && !page_mapped(page) && - page_count(page) == 1) - return __is_movable_balloon_page(page); - - return false; + return PageBalloon(page) && PagePrivate(page); } /* * isolated_balloon_page - identify an isolated balloon page on private * compaction/migration page lists. - * - * After a compaction thread isolates a balloon page for migration, it raises - * the page refcount to prevent concurrent compaction threads from re-isolating - * the same page. For that reason putback_movable_pages(), or other routines - * that need to identify isolated balloon pages on private pagelists, cannot - * rely on balloon_page_movable() to accomplish the task. */ static inline bool isolated_balloon_page(struct page *page) { - /* Already isolated balloon pages, by default, have a raised refcount */ - if (page_flags_cleared(page) && !page_mapped(page) && - page_count(page) >= 2) - return __is_movable_balloon_page(page); - - return false; + return PageBalloon(page); } /* * balloon_page_insert - insert a page into the balloon's page list and make - * the page->mapping assignment accordingly. + * the page->private assignment accordingly. + * @balloon : pointer to balloon device * @page : page to be assigned as a 'balloon page' - * @mapping : allocated special 'balloon_mapping' - * @head : balloon's device page list head * * Caller must ensure the page is locked and the spin_lock protecting balloon * pages list is held before inserting a page into the balloon device. */ -static inline void balloon_page_insert(struct page *page, - struct address_space *mapping, - struct list_head *head) +static inline void balloon_page_insert(struct balloon_dev_info *balloon, + struct page *page) { - page->mapping = mapping; - list_add(&page->lru, head); + __SetPageBalloon(page); + SetPagePrivate(page); + set_page_private(page, (unsigned long)balloon); + list_add(&page->lru, &balloon->pages); } /* * balloon_page_delete - delete a page from balloon's page list and clear - * the page->mapping assignement accordingly. + * the page->private assignement accordingly. * @page : page to be released from balloon's page list * * Caller must ensure the page is locked and the spin_lock protecting balloon @@ -206,8 +139,12 @@ static inline void balloon_page_insert(struct page *page, */ static inline void balloon_page_delete(struct page *page) { - page->mapping = NULL; - list_del(&page->lru); + __ClearPageBalloon(page); + set_page_private(page, 0); + if (PagePrivate(page)) { + ClearPagePrivate(page); + list_del(&page->lru); + } } /* @@ -216,11 +153,7 @@ static inline void balloon_page_delete(struct page *page) */ static inline struct balloon_dev_info *balloon_page_device(struct page *page) { - struct address_space *mapping = page->mapping; - if (likely(mapping)) - return mapping->private_data; - - return NULL; + return (struct balloon_dev_info *)page_private(page); } static inline gfp_t balloon_mapping_gfp_mask(void) @@ -228,34 +161,24 @@ static inline gfp_t balloon_mapping_gfp_mask(void) return GFP_HIGHUSER_MOVABLE; } -static inline bool balloon_compaction_check(void) -{ - return true; -} - #else /* !CONFIG_BALLOON_COMPACTION */ -static inline void *balloon_mapping_alloc(void *balloon_device, - const struct address_space_operations *a_ops) -{ - return ERR_PTR(-EOPNOTSUPP); -} - -static inline void balloon_mapping_free(struct address_space *balloon_mapping) +static inline void balloon_page_insert(struct balloon_dev_info *balloon, + struct page *page) { - return; + __SetPageBalloon(page); + list_add(&page->lru, &balloon->pages); } -static inline void balloon_page_insert(struct page *page, - struct address_space *mapping, - struct list_head *head) +static inline void balloon_page_delete(struct page *page) { - list_add(&page->lru, head); + __ClearPageBalloon(page); + list_del(&page->lru); } -static inline void balloon_page_delete(struct page *page) +static inline bool __is_movable_balloon_page(struct page *page) { - list_del(&page->lru); + return false; } static inline bool balloon_page_movable(struct page *page) @@ -289,9 +212,5 @@ static inline gfp_t balloon_mapping_gfp_mask(void) return GFP_HIGHUSER; } -static inline bool balloon_compaction_check(void) -{ - return false; -} #endif /* CONFIG_BALLOON_COMPACTION */ #endif /* _LINUX_BALLOON_COMPACTION_H */ diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index 0272e49135d0..729f48e6b20b 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h @@ -267,7 +267,7 @@ struct bcma_device { u8 core_unit; u32 addr; - u32 addr1; + u32 addr_s[8]; u32 wrap; void __iomem *io_addr; @@ -323,6 +323,8 @@ struct bcma_bus { struct pci_dev *host_pci; /* Pointer to the SDIO device (only for BCMA_HOSTTYPE_SDIO) */ struct sdio_func *host_sdio; + /* Pointer to platform device (only for BCMA_HOSTTYPE_SOC) */ + struct platform_device *host_pdev; }; struct bcma_chipinfo chipinfo; @@ -332,10 +334,10 @@ struct bcma_bus { struct bcma_device *mapped_core; struct list_head cores; u8 nr_cores; - u8 init_done:1; u8 num; struct bcma_drv_cc drv_cc; + struct bcma_drv_cc_b drv_cc_b; struct bcma_drv_pci drv_pci[2]; struct bcma_drv_pcie2 drv_pcie2; struct bcma_drv_mips drv_mips; diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h index 63d105cd14a3..db6fa217f98b 100644 --- a/include/linux/bcma/bcma_driver_chipcommon.h +++ b/include/linux/bcma/bcma_driver_chipcommon.h @@ -644,6 +644,12 @@ struct bcma_drv_cc { #endif }; +struct bcma_drv_cc_b { + struct bcma_device *core; + u8 setup_done:1; + void __iomem *mii; +}; + /* Register access */ #define bcma_cc_read32(cc, offset) \ bcma_read32((cc)->core, offset) @@ -699,4 +705,6 @@ extern void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid); extern u32 bcma_pmu_get_bus_clock(struct bcma_drv_cc *cc); +void bcma_chipco_b_mii_write(struct bcma_drv_cc_b *ccb, u32 offset, u32 value); + #endif /* LINUX_BCMA_DRIVER_CC_H_ */ diff --git a/include/linux/bcma/bcma_regs.h b/include/linux/bcma/bcma_regs.h index 917dcd7965e7..e64ae7bf80a1 100644 --- a/include/linux/bcma/bcma_regs.h +++ b/include/linux/bcma/bcma_regs.h @@ -39,6 +39,11 @@ #define BCMA_RESET_CTL_RESET 0x0001 #define BCMA_RESET_ST 0x0804 +#define BCMA_NS_ROM_IOST_BOOT_DEV_MASK 0x0003 +#define BCMA_NS_ROM_IOST_BOOT_DEV_NOR 0x0000 +#define BCMA_NS_ROM_IOST_BOOT_DEV_NAND 0x0001 +#define BCMA_NS_ROM_IOST_BOOT_DEV_ROM 0x0002 + /* BCMA PCI config space registers. */ #define BCMA_PCI_PMCSR 0x44 #define BCMA_PCI_PE 0x100 diff --git a/include/linux/bcma/bcma_soc.h b/include/linux/bcma/bcma_soc.h index 4203c5593b9f..f24d245f8394 100644 --- a/include/linux/bcma/bcma_soc.h +++ b/include/linux/bcma/bcma_soc.h @@ -10,6 +10,7 @@ struct bcma_soc { }; int __init bcma_host_soc_register(struct bcma_soc *soc); +int __init bcma_host_soc_init(struct bcma_soc *soc); int bcma_bus_register(struct bcma_bus *bus); diff --git a/include/linux/bitops.h b/include/linux/bitops.h index cbc5833fb221..be5fd38bd5a0 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -32,26 +32,6 @@ extern unsigned long __sw_hweight64(__u64 w); */ #include <asm/bitops.h> -/* - * Provide __deprecated wrappers for the new interface, avoid flag day changes. - * We need the ugly external functions to break header recursion hell. - */ -#ifndef smp_mb__before_clear_bit -static inline void __deprecated smp_mb__before_clear_bit(void) -{ - extern void __smp_mb__before_atomic(void); - __smp_mb__before_atomic(); -} -#endif - -#ifndef smp_mb__after_clear_bit -static inline void __deprecated smp_mb__after_clear_bit(void) -{ - extern void __smp_mb__after_atomic(void); - __smp_mb__after_atomic(); -} -#endif - #define for_each_set_bit(bit, addr, size) \ for ((bit) = find_first_bit((addr), (size)); \ (bit) < (size); \ diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index a1e31f274fcd..c13a0c09faea 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -140,6 +140,7 @@ enum { }; struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); +void blk_mq_finish_init(struct request_queue *q); int blk_mq_register_disk(struct gendisk *); void blk_mq_unregister_disk(struct gendisk *); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 518b46555b80..87be398166d3 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1564,7 +1564,7 @@ static inline int blk_rq_map_integrity_sg(struct request_queue *q, } static inline struct blk_integrity *bdev_get_integrity(struct block_device *b) { - return 0; + return NULL; } static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) { diff --git a/include/linux/bpf.h b/include/linux/bpf.h new file mode 100644 index 000000000000..3cf91754a957 --- /dev/null +++ b/include/linux/bpf.h @@ -0,0 +1,136 @@ +/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ +#ifndef _LINUX_BPF_H +#define _LINUX_BPF_H 1 + +#include <uapi/linux/bpf.h> +#include <linux/workqueue.h> +#include <linux/file.h> + +struct bpf_map; + +/* map is generic key/value storage optionally accesible by eBPF programs */ +struct bpf_map_ops { + /* funcs callable from userspace (via syscall) */ + struct bpf_map *(*map_alloc)(union bpf_attr *attr); + void (*map_free)(struct bpf_map *); + int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key); + + /* funcs callable from userspace and from eBPF programs */ + void *(*map_lookup_elem)(struct bpf_map *map, void *key); + int (*map_update_elem)(struct bpf_map *map, void *key, void *value); + int (*map_delete_elem)(struct bpf_map *map, void *key); +}; + +struct bpf_map { + atomic_t refcnt; + enum bpf_map_type map_type; + u32 key_size; + u32 value_size; + u32 max_entries; + struct bpf_map_ops *ops; + struct work_struct work; +}; + +struct bpf_map_type_list { + struct list_head list_node; + struct bpf_map_ops *ops; + enum bpf_map_type type; +}; + +void bpf_register_map_type(struct bpf_map_type_list *tl); +void bpf_map_put(struct bpf_map *map); +struct bpf_map *bpf_map_get(struct fd f); + +/* function argument constraints */ +enum bpf_arg_type { + ARG_ANYTHING = 0, /* any argument is ok */ + + /* the following constraints used to prototype + * bpf_map_lookup/update/delete_elem() functions + */ + ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */ + ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */ + ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */ + + /* the following constraints used to prototype bpf_memcmp() and other + * functions that access data on eBPF program stack + */ + ARG_PTR_TO_STACK, /* any pointer to eBPF program stack */ + ARG_CONST_STACK_SIZE, /* number of bytes accessed from stack */ +}; + +/* type of values returned from helper functions */ +enum bpf_return_type { + RET_INTEGER, /* function returns integer */ + RET_VOID, /* function doesn't return anything */ + RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */ +}; + +/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs + * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL + * instructions after verifying + */ +struct bpf_func_proto { + u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); + bool gpl_only; + enum bpf_return_type ret_type; + enum bpf_arg_type arg1_type; + enum bpf_arg_type arg2_type; + enum bpf_arg_type arg3_type; + enum bpf_arg_type arg4_type; + enum bpf_arg_type arg5_type; +}; + +/* bpf_context is intentionally undefined structure. Pointer to bpf_context is + * the first argument to eBPF programs. + * For socket filters: 'struct bpf_context *' == 'struct sk_buff *' + */ +struct bpf_context; + +enum bpf_access_type { + BPF_READ = 1, + BPF_WRITE = 2 +}; + +struct bpf_verifier_ops { + /* return eBPF function prototype for verification */ + const struct bpf_func_proto *(*get_func_proto)(enum bpf_func_id func_id); + + /* return true if 'size' wide access at offset 'off' within bpf_context + * with 'type' (read or write) is allowed + */ + bool (*is_valid_access)(int off, int size, enum bpf_access_type type); +}; + +struct bpf_prog_type_list { + struct list_head list_node; + struct bpf_verifier_ops *ops; + enum bpf_prog_type type; +}; + +void bpf_register_prog_type(struct bpf_prog_type_list *tl); + +struct bpf_prog; + +struct bpf_prog_aux { + atomic_t refcnt; + bool is_gpl_compatible; + enum bpf_prog_type prog_type; + struct bpf_verifier_ops *ops; + struct bpf_map **used_maps; + u32 used_map_cnt; + struct bpf_prog *prog; + struct work_struct work; +}; + +void bpf_prog_put(struct bpf_prog *prog); +struct bpf_prog *bpf_prog_get(u32 ufd); +/* verify correctness of eBPF program */ +int bpf_check(struct bpf_prog *fp, union bpf_attr *attr); + +#endif /* _LINUX_BPF_H */ diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index 61219b9b3445..7ccd928cc1f2 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -13,7 +13,11 @@ #define PHY_ID_BCM5461 0x002060c0 #define PHY_ID_BCM57780 0x03625d90 +#define PHY_ID_BCM7250 0xae025280 +#define PHY_ID_BCM7364 0xae025260 #define PHY_ID_BCM7366 0x600d8490 +#define PHY_ID_BCM7425 0x03625e60 +#define PHY_ID_BCM7429 0x600d8730 #define PHY_ID_BCM7439 0x600d8480 #define PHY_ID_BCM7445 0x600d8510 @@ -21,9 +25,9 @@ #define PHY_BCM_OUI_1 0x00206000 #define PHY_BCM_OUI_2 0x0143bc00 #define PHY_BCM_OUI_3 0x03625c00 -#define PHY_BCM_OUI_4 0x600d0000 +#define PHY_BCM_OUI_4 0x600d8400 #define PHY_BCM_OUI_5 0x03625e00 - +#define PHY_BCM_OUI_6 0xae025000 #define PHY_BCM_FLAGS_MODE_COPPER 0x00000001 #define PHY_BCM_FLAGS_MODE_1000BX 0x00000002 @@ -38,7 +42,8 @@ #define PHY_BRCM_CLEAR_RGMII_MODE 0x00004000 #define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00008000 /* Broadcom BCM7xxx specific workarounds */ -#define PHY_BRCM_100MBPS_WAR 0x00010000 +#define PHY_BRCM_7XXX_REV(x) (((x) >> 8) & 0xff) +#define PHY_BRCM_7XXX_PATCH(x) ((x) & 0xff) #define PHY_BCM_FLAGS_VALID 0x80000000 /* Broadcom BCM54XX register definitions, common to most Broadcom PHYs */ @@ -92,4 +97,130 @@ #define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000 +/* + * Broadcom LED source encodings. These are used in BCM5461, BCM5481, + * BCM5482, and possibly some others. + */ +#define BCM_LED_SRC_LINKSPD1 0x0 +#define BCM_LED_SRC_LINKSPD2 0x1 +#define BCM_LED_SRC_XMITLED 0x2 +#define BCM_LED_SRC_ACTIVITYLED 0x3 +#define BCM_LED_SRC_FDXLED 0x4 +#define BCM_LED_SRC_SLAVE 0x5 +#define BCM_LED_SRC_INTR 0x6 +#define BCM_LED_SRC_QUALITY 0x7 +#define BCM_LED_SRC_RCVLED 0x8 +#define BCM_LED_SRC_MULTICOLOR1 0xa +#define BCM_LED_SRC_OPENSHORT 0xb +#define BCM_LED_SRC_OFF 0xe /* Tied high */ +#define BCM_LED_SRC_ON 0xf /* Tied low */ + + +/* + * BCM5482: Shadow registers + * Shadow values go into bits [14:10] of register 0x1c to select a shadow + * register to access. + */ +/* 00101: Spare Control Register 3 */ +#define BCM54XX_SHD_SCR3 0x05 +#define BCM54XX_SHD_SCR3_DEF_CLK125 0x0001 +#define BCM54XX_SHD_SCR3_DLLAPD_DIS 0x0002 +#define BCM54XX_SHD_SCR3_TRDDAPD 0x0004 + +/* 01010: Auto Power-Down */ +#define BCM54XX_SHD_APD 0x0a +#define BCM54XX_SHD_APD_EN 0x0020 + +#define BCM5482_SHD_LEDS1 0x0d /* 01101: LED Selector 1 */ + /* LED3 / ~LINKSPD[2] selector */ +#define BCM5482_SHD_LEDS1_LED3(src) ((src & 0xf) << 4) + /* LED1 / ~LINKSPD[1] selector */ +#define BCM5482_SHD_LEDS1_LED1(src) ((src & 0xf) << 0) +#define BCM54XX_SHD_RGMII_MODE 0x0b /* 01011: RGMII Mode Selector */ +#define BCM5482_SHD_SSD 0x14 /* 10100: Secondary SerDes control */ +#define BCM5482_SHD_SSD_LEDM 0x0008 /* SSD LED Mode enable */ +#define BCM5482_SHD_SSD_EN 0x0001 /* SSD enable */ +#define BCM5482_SHD_MODE 0x1f /* 11111: Mode Control Register */ +#define BCM5482_SHD_MODE_1000BX 0x0001 /* Enable 1000BASE-X registers */ + + +/* + * EXPANSION SHADOW ACCESS REGISTERS. (PHY REG 0x15, 0x16, and 0x17) + */ +#define MII_BCM54XX_EXP_AADJ1CH0 0x001f +#define MII_BCM54XX_EXP_AADJ1CH0_SWP_ABCD_OEN 0x0200 +#define MII_BCM54XX_EXP_AADJ1CH0_SWSEL_THPF 0x0100 +#define MII_BCM54XX_EXP_AADJ1CH3 0x601f +#define MII_BCM54XX_EXP_AADJ1CH3_ADCCKADJ 0x0002 +#define MII_BCM54XX_EXP_EXP08 0x0F08 +#define MII_BCM54XX_EXP_EXP08_RJCT_2MHZ 0x0001 +#define MII_BCM54XX_EXP_EXP08_EARLY_DAC_WAKE 0x0200 +#define MII_BCM54XX_EXP_EXP75 0x0f75 +#define MII_BCM54XX_EXP_EXP75_VDACCTRL 0x003c +#define MII_BCM54XX_EXP_EXP75_CM_OSC 0x0001 +#define MII_BCM54XX_EXP_EXP96 0x0f96 +#define MII_BCM54XX_EXP_EXP96_MYST 0x0010 +#define MII_BCM54XX_EXP_EXP97 0x0f97 +#define MII_BCM54XX_EXP_EXP97_MYST 0x0c0c + +/* + * BCM5482: Secondary SerDes registers + */ +#define BCM5482_SSD_1000BX_CTL 0x00 /* 1000BASE-X Control */ +#define BCM5482_SSD_1000BX_CTL_PWRDOWN 0x0800 /* Power-down SSD */ +#define BCM5482_SSD_SGMII_SLAVE 0x15 /* SGMII Slave Register */ +#define BCM5482_SSD_SGMII_SLAVE_EN 0x0002 /* Slave mode enable */ +#define BCM5482_SSD_SGMII_SLAVE_AD 0x0001 /* Slave auto-detection */ + + +/*****************************************************************************/ +/* Fast Ethernet Transceiver definitions. */ +/*****************************************************************************/ + +#define MII_BRCM_FET_INTREG 0x1a /* Interrupt register */ +#define MII_BRCM_FET_IR_MASK 0x0100 /* Mask all interrupts */ +#define MII_BRCM_FET_IR_LINK_EN 0x0200 /* Link status change enable */ +#define MII_BRCM_FET_IR_SPEED_EN 0x0400 /* Link speed change enable */ +#define MII_BRCM_FET_IR_DUPLEX_EN 0x0800 /* Duplex mode change enable */ +#define MII_BRCM_FET_IR_ENABLE 0x4000 /* Interrupt enable */ + +#define MII_BRCM_FET_BRCMTEST 0x1f /* Brcm test register */ +#define MII_BRCM_FET_BT_SRE 0x0080 /* Shadow register enable */ + + +/*** Shadow register definitions ***/ + +#define MII_BRCM_FET_SHDW_MISCCTRL 0x10 /* Shadow misc ctrl */ +#define MII_BRCM_FET_SHDW_MC_FAME 0x4000 /* Force Auto MDIX enable */ + +#define MII_BRCM_FET_SHDW_AUXMODE4 0x1a /* Auxiliary mode 4 */ +#define MII_BRCM_FET_SHDW_AM4_LED_MASK 0x0003 +#define MII_BRCM_FET_SHDW_AM4_LED_MODE1 0x0001 + +#define MII_BRCM_FET_SHDW_AUXSTAT2 0x1b /* Auxiliary status 2 */ +#define MII_BRCM_FET_SHDW_AS2_APDE 0x0020 /* Auto power down enable */ + +/* + * Indirect register access functions for the 1000BASE-T/100BASE-TX/10BASE-T + * 0x1c shadow registers. + */ +static inline int bcm54xx_shadow_read(struct phy_device *phydev, u16 shadow) +{ + phy_write(phydev, MII_BCM54XX_SHD, MII_BCM54XX_SHD_VAL(shadow)); + return MII_BCM54XX_SHD_DATA(phy_read(phydev, MII_BCM54XX_SHD)); +} + +static inline int bcm54xx_shadow_write(struct phy_device *phydev, u16 shadow, + u16 val) +{ + return phy_write(phydev, MII_BCM54XX_SHD, + MII_BCM54XX_SHD_WRITE | + MII_BCM54XX_SHD_VAL(shadow) | + MII_BCM54XX_SHD_DATA(val)); +} + +#define BRCM_CL45VEN_EEE_CONTROL 0x803d +#define LPI_FEATURE_EN 0x8000 +#define LPI_FEATURE_EN_DIG1000X 0x4000 + #endif /* _LINUX_BRCMPHY_H */ diff --git a/include/linux/ccp.h b/include/linux/ccp.h index ebcc9d146219..7f437036baa4 100644 --- a/include/linux/ccp.h +++ b/include/linux/ccp.h @@ -27,6 +27,13 @@ struct ccp_cmd; defined(CONFIG_CRYPTO_DEV_CCP_DD_MODULE) /** + * ccp_present - check if a CCP device is present + * + * Returns zero if a CCP device is present, -ENODEV otherwise. + */ +int ccp_present(void); + +/** * ccp_enqueue_cmd - queue an operation for processing by the CCP * * @cmd: ccp_cmd struct to be processed @@ -53,6 +60,11 @@ int ccp_enqueue_cmd(struct ccp_cmd *cmd); #else /* CONFIG_CRYPTO_DEV_CCP_DD is not enabled */ +static inline int ccp_present(void) +{ + return -ENODEV; +} + static inline int ccp_enqueue_cmd(struct ccp_cmd *cmd) { return -ENODEV; diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index b5223c570eba..1d5196889048 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -27,7 +27,6 @@ struct cgroup_root; struct cgroup_subsys; -struct inode; struct cgroup; extern int cgroup_init_early(void); @@ -38,7 +37,8 @@ extern void cgroup_exit(struct task_struct *p); extern int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry); -extern int proc_cgroup_show(struct seq_file *, void *); +extern int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *tsk); /* define the enumeration of all cgroup subsystems */ #define SUBSYS(_x) _x ## _cgrp_id, @@ -161,11 +161,6 @@ static inline void css_put(struct cgroup_subsys_state *css) /* bits in struct cgroup flags field */ enum { - /* - * Control Group has previously had a child cgroup or a task, - * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set) - */ - CGRP_RELEASABLE, /* Control Group requires release notifications to userspace */ CGRP_NOTIFY_ON_RELEASE, /* @@ -235,13 +230,6 @@ struct cgroup { struct list_head e_csets[CGROUP_SUBSYS_COUNT]; /* - * Linked list running through all cgroups that can - * potentially be reaped by the release agent. Protected by - * release_list_lock - */ - struct list_head release_list; - - /* * list of pidlists, up to two for each namespace (one for procs, one * for tasks); created on demand. */ @@ -250,6 +238,9 @@ struct cgroup { /* used to wait for offlining of csses */ wait_queue_head_t offline_waitq; + + /* used to schedule release agent */ + struct work_struct release_agent_work; }; #define MAX_CGROUP_ROOT_NAMELEN 64 @@ -536,13 +527,10 @@ static inline bool cgroup_has_tasks(struct cgroup *cgrp) return !list_empty(&cgrp->cset_links); } -/* returns ino associated with a cgroup, 0 indicates unmounted root */ +/* returns ino associated with a cgroup */ static inline ino_t cgroup_ino(struct cgroup *cgrp) { - if (cgrp->kn) - return cgrp->kn->ino; - else - return 0; + return cgrp->kn->ino; } /* cft/css accessors for cftype->write() operation */ diff --git a/include/linux/clk.h b/include/linux/clk.h index fb5e097d8f72..afb44bfaf8d1 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -238,7 +238,7 @@ void clk_put(struct clk *clk); /** * devm_clk_put - "free" a managed clock source - * @dev: device used to acuqire the clock + * @dev: device used to acquire the clock * @clk: clock source acquired with devm_clk_get() * * Note: drivers must ensure that all clk_enable calls made on this diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h index de4268d4987a..c8e3b3d1eded 100644 --- a/include/linux/clk/at91_pmc.h +++ b/include/linux/clk/at91_pmc.h @@ -125,6 +125,7 @@ extern void __iomem *at91_pmc_base; #define AT91_PMC_PLLADIV2 (1 << 12) /* PLLA divisor by 2 [some SAM9 only] */ #define AT91_PMC_PLLADIV2_OFF (0 << 12) #define AT91_PMC_PLLADIV2_ON (1 << 12) +#define AT91_PMC_H32MXDIV BIT(24) #define AT91_PMC_USB 0x38 /* USB Clock Register [some SAM9 only] */ #define AT91_PMC_USBS (0x1 << 0) /* USB OHCI Input clock selection */ diff --git a/include/linux/com20020.h b/include/linux/com20020.h index 5dcfb944b6ce..85898995b234 100644 --- a/include/linux/com20020.h +++ b/include/linux/com20020.h @@ -41,6 +41,35 @@ extern const struct net_device_ops com20020_netdev_ops; #define BUS_ALIGN 1 #endif +#define PLX_PCI_MAX_CARDS 2 + +struct com20020_pci_channel_map { + u32 bar; + u32 offset; + u32 size; /* 0x00 - auto, e.g. length of entire bar */ +}; + +struct com20020_pci_card_info { + const char *name; + int devcount; + + struct com20020_pci_channel_map chan_map_tbl[PLX_PCI_MAX_CARDS]; + + unsigned int flags; +}; + +struct com20020_priv { + struct com20020_pci_card_info *ci; + struct list_head list_dev; +}; + +struct com20020_dev { + struct list_head list; + struct net_device *dev; + + struct com20020_priv *pci_priv; + int index; +}; #define _INTMASK (ioaddr+BUS_ALIGN*0) /* writable */ #define _STATUS (ioaddr+BUS_ALIGN*0) /* readable */ diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 01e3132820da..60bdf8dc02a3 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -2,14 +2,24 @@ #define _LINUX_COMPACTION_H /* Return values for compact_zone() and try_to_compact_pages() */ +/* compaction didn't start as it was deferred due to past failures */ +#define COMPACT_DEFERRED 0 /* compaction didn't start as it was not possible or direct reclaim was more suitable */ -#define COMPACT_SKIPPED 0 +#define COMPACT_SKIPPED 1 /* compaction should continue to another pageblock */ -#define COMPACT_CONTINUE 1 +#define COMPACT_CONTINUE 2 /* direct compaction partially compacted a zone and there are suitable pages */ -#define COMPACT_PARTIAL 2 +#define COMPACT_PARTIAL 3 /* The full zone was compacted */ -#define COMPACT_COMPLETE 3 +#define COMPACT_COMPLETE 4 + +/* Used to signal whether compaction detected need_sched() or lock contention */ +/* No contention detected */ +#define COMPACT_CONTENDED_NONE 0 +/* Either need_sched() was true or fatal signal pending */ +#define COMPACT_CONTENDED_SCHED 1 +/* Zone lock or lru_lock was contended in async compaction */ +#define COMPACT_CONTENDED_LOCK 2 #ifdef CONFIG_COMPACTION extern int sysctl_compact_memory; @@ -22,7 +32,8 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write, extern int fragmentation_index(struct zone *zone, unsigned int order); extern unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask, - enum migrate_mode mode, bool *contended); + enum migrate_mode mode, int *contended, + struct zone **candidate_zone); extern void compact_pgdat(pg_data_t *pgdat, int order); extern void reset_isolation_suitable(pg_data_t *pgdat); extern unsigned long compaction_suitable(struct zone *zone, int order); @@ -91,7 +102,8 @@ static inline bool compaction_restarting(struct zone *zone, int order) #else static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask, - enum migrate_mode mode, bool *contended) + enum migrate_mode mode, int *contended, + struct zone **candidate_zone) { return COMPACT_CONTINUE; } diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 95978ad7fcdd..b2d9a43012b2 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -213,6 +213,7 @@ extern struct bus_type cpu_subsys; extern void cpu_hotplug_begin(void); extern void cpu_hotplug_done(void); extern void get_online_cpus(void); +extern bool try_get_online_cpus(void); extern void put_online_cpus(void); extern void cpu_hotplug_disable(void); extern void cpu_hotplug_enable(void); @@ -230,6 +231,7 @@ int cpu_down(unsigned int cpu); static inline void cpu_hotplug_begin(void) {} static inline void cpu_hotplug_done(void) {} #define get_online_cpus() do { } while (0) +#define try_get_online_cpus() true #define put_online_cpus() do { } while (0) #define cpu_hotplug_disable() do { } while (0) #define cpu_hotplug_enable() do { } while (0) diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 7d1955afa62c..138336b6bb04 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -112,6 +112,9 @@ struct cpufreq_policy { spinlock_t transition_lock; wait_queue_head_t transition_wait; struct task_struct *transition_task; /* Task which is doing the transition */ + + /* For cpufreq driver's internal use */ + void *driver_data; }; /* Only for ACPI */ diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index ade2390ffe92..2f073db7392e 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -86,19 +86,20 @@ extern void __cpuset_memory_pressure_bump(void); extern void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task); -extern int proc_cpuset_show(struct seq_file *, void *); +extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *tsk); extern int cpuset_mem_spread_node(void); extern int cpuset_slab_spread_node(void); static inline int cpuset_do_page_mem_spread(void) { - return current->flags & PF_SPREAD_PAGE; + return task_spread_page(current); } static inline int cpuset_do_slab_mem_spread(void) { - return current->flags & PF_SPREAD_SLAB; + return task_spread_slab(current); } extern int current_cpuset_is_being_rebound(void); diff --git a/include/linux/cycx_x25.h b/include/linux/cycx_x25.h deleted file mode 100644 index 362bf19d6cf1..000000000000 --- a/include/linux/cycx_x25.h +++ /dev/null @@ -1,125 +0,0 @@ -#ifndef _CYCX_X25_H -#define _CYCX_X25_H -/* -* cycx_x25.h Cyclom X.25 firmware API definitions. -* -* Author: Arnaldo Carvalho de Melo <acme@conectiva.com.br> -* -* Copyright: (c) 1998-2003 Arnaldo Carvalho de Melo -* -* Based on sdla_x25.h by Gene Kozin <74604.152@compuserve.com> -* -* This program is free software; you can redistribute it and/or -* modify it under the terms of the GNU General Public License -* as published by the Free Software Foundation; either version -* 2 of the License, or (at your option) any later version. -* ============================================================================ -* 2000/04/02 acme dprintk and cycx_debug -* 1999/01/03 acme judicious use of data types -* 1999/01/02 acme #define X25_ACK_N3 0x4411 -* 1998/12/28 acme cleanup: lot'o'things removed -* commands listed, -* TX25Cmd & TX25Config structs -* typedef'ed -*/ -#ifndef PACKED -#define PACKED __attribute__((packed)) -#endif - -/* X.25 shared memory layout. */ -#define X25_MBOX_OFFS 0x300 /* general mailbox block */ -#define X25_RXMBOX_OFFS 0x340 /* receive mailbox */ - -/* Debug */ -#define dprintk(level, format, a...) if (cycx_debug >= level) printk(format, ##a) - -extern unsigned int cycx_debug; - -/* Data Structures */ -/* X.25 Command Block. */ -struct cycx_x25_cmd { - u16 command; - u16 link; /* values: 0 or 1 */ - u16 len; /* values: 0 thru 0x205 (517) */ - u32 buf; -} PACKED; - -/* Defines for the 'command' field. */ -#define X25_CONNECT_REQUEST 0x4401 -#define X25_CONNECT_RESPONSE 0x4402 -#define X25_DISCONNECT_REQUEST 0x4403 -#define X25_DISCONNECT_RESPONSE 0x4404 -#define X25_DATA_REQUEST 0x4405 -#define X25_ACK_TO_VC 0x4406 -#define X25_INTERRUPT_RESPONSE 0x4407 -#define X25_CONFIG 0x4408 -#define X25_CONNECT_INDICATION 0x4409 -#define X25_CONNECT_CONFIRM 0x440A -#define X25_DISCONNECT_INDICATION 0x440B -#define X25_DISCONNECT_CONFIRM 0x440C -#define X25_DATA_INDICATION 0x440E -#define X25_INTERRUPT_INDICATION 0x440F -#define X25_ACK_FROM_VC 0x4410 -#define X25_ACK_N3 0x4411 -#define X25_CONNECT_COLLISION 0x4413 -#define X25_N3WIN 0x4414 -#define X25_LINE_ON 0x4415 -#define X25_LINE_OFF 0x4416 -#define X25_RESET_REQUEST 0x4417 -#define X25_LOG 0x4500 -#define X25_STATISTIC 0x4600 -#define X25_TRACE 0x4700 -#define X25_N2TRACEXC 0x4702 -#define X25_N3TRACEXC 0x4703 - -/** - * struct cycx_x25_config - cyclom2x x25 firmware configuration - * @link - link number - * @speed - line speed - * @clock - internal/external - * @n2 - # of level 2 retransm.(values: 1 thru FF) - * @n2win - level 2 window (values: 1 thru 7) - * @n3win - level 3 window (values: 1 thru 7) - * @nvc - # of logical channels (values: 1 thru 64) - * @pktlen - level 3 packet length - log base 2 of size - * @locaddr - my address - * @remaddr - remote address - * @t1 - time, in seconds - * @t2 - time, in seconds - * @t21 - time, in seconds - * @npvc - # of permanent virt. circuits (1 thru nvc) - * @t23 - time, in seconds - * @flags - see dosx25.doc, in portuguese, for details - */ -struct cycx_x25_config { - u8 link; - u8 speed; - u8 clock; - u8 n2; - u8 n2win; - u8 n3win; - u8 nvc; - u8 pktlen; - u8 locaddr; - u8 remaddr; - u16 t1; - u16 t2; - u8 t21; - u8 npvc; - u8 t23; - u8 flags; -} PACKED; - -struct cycx_x25_stats { - u16 rx_crc_errors; - u16 rx_over_errors; - u16 n2_tx_frames; - u16 n2_rx_frames; - u16 tx_timeouts; - u16 rx_timeouts; - u16 n3_tx_packets; - u16 n3_rx_packets; - u16 tx_aborts; - u16 rx_aborts; -} PACKED; -#endif /* _CYCX_X25_H */ diff --git a/include/linux/dcache.h b/include/linux/dcache.h index e4ae2ad48d07..b2a2a08523bf 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -11,7 +11,6 @@ #include <linux/rcupdate.h> #include <linux/lockref.h> -struct nameidata; struct path; struct vfsmount; @@ -55,6 +54,7 @@ struct qstr { #define QSTR_INIT(n,l) { { { .len = l } }, .name = n } #define hashlen_hash(hashlen) ((u32) (hashlen)) #define hashlen_len(hashlen) ((u32)((hashlen) >> 32)) +#define hashlen_create(hash,len) (((u64)(len)<<32)|(u32)(hash)) struct dentry_stat_t { long nr_dentry; @@ -225,11 +225,6 @@ struct dentry_operations { extern seqlock_t rename_lock; -static inline int dname_external(const struct dentry *dentry) -{ - return dentry->d_name.name != dentry->d_iname; -} - /* * These are the low-level FS interfaces to the dcache.. */ @@ -253,7 +248,7 @@ extern struct dentry * d_obtain_root(struct inode *); extern void shrink_dcache_sb(struct super_block *); extern void shrink_dcache_parent(struct dentry *); extern void shrink_dcache_for_umount(struct super_block *); -extern int d_invalidate(struct dentry *); +extern void d_invalidate(struct dentry *); /* only used at mount-time */ extern struct dentry * d_make_root(struct inode *); @@ -268,7 +263,6 @@ extern void d_prune_aliases(struct inode *); /* test whether we have any submounts in a subdir tree */ extern int have_submounts(struct dentry *); -extern int check_submounts_and_drop(struct dentry *); /* * This adds the entry to the hash queues. diff --git a/include/linux/devcoredump.h b/include/linux/devcoredump.h new file mode 100644 index 000000000000..c0a360e99f64 --- /dev/null +++ b/include/linux/devcoredump.h @@ -0,0 +1,35 @@ +#ifndef __DEVCOREDUMP_H +#define __DEVCOREDUMP_H + +#include <linux/device.h> +#include <linux/module.h> +#include <linux/vmalloc.h> + +#ifdef CONFIG_DEV_COREDUMP +void dev_coredumpv(struct device *dev, const void *data, size_t datalen, + gfp_t gfp); + +void dev_coredumpm(struct device *dev, struct module *owner, + const void *data, size_t datalen, gfp_t gfp, + ssize_t (*read)(char *buffer, loff_t offset, size_t count, + const void *data, size_t datalen), + void (*free)(const void *data)); +#else +static inline void dev_coredumpv(struct device *dev, const void *data, + size_t datalen, gfp_t gfp) +{ + vfree(data); +} + +static inline void +dev_coredumpm(struct device *dev, struct module *owner, + const void *data, size_t datalen, gfp_t gfp, + ssize_t (*read)(char *buffer, loff_t offset, size_t count, + const void *data, size_t datalen), + void (*free)(const void *data)) +{ + free(data); +} +#endif /* CONFIG_DEV_COREDUMP */ + +#endif /* __DEVCOREDUMP_H */ diff --git a/include/linux/device.h b/include/linux/device.h index 43d183aeb25b..a608e237f0a8 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -607,8 +607,8 @@ extern int devres_release_group(struct device *dev, void *id); extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp); extern char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, va_list ap); -extern char *devm_kasprintf(struct device *dev, gfp_t gfp, - const char *fmt, ...); +extern __printf(3, 4) +char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...); static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp) { return devm_kmalloc(dev, size, gfp | __GFP_ZERO); diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 931b70986272..d5d388160f42 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -263,6 +263,32 @@ struct dma_attrs; #define dma_unmap_sg_attrs(dev, sgl, nents, dir, attrs) \ dma_unmap_sg(dev, sgl, nents, dir) +#else +static inline void *dma_alloc_writecombine(struct device *dev, size_t size, + dma_addr_t *dma_addr, gfp_t gfp) +{ + DEFINE_DMA_ATTRS(attrs); + dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); + return dma_alloc_attrs(dev, size, dma_addr, gfp, &attrs); +} + +static inline void dma_free_writecombine(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_addr) +{ + DEFINE_DMA_ATTRS(attrs); + dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); + return dma_free_attrs(dev, size, cpu_addr, dma_addr, &attrs); +} + +static inline int dma_mmap_writecombine(struct device *dev, + struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, + size_t size) +{ + DEFINE_DMA_ATTRS(attrs); + dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); + return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs); +} #endif /* CONFIG_HAVE_DMA_ATTRS */ #ifdef CONFIG_NEED_DMA_MAP_STATE diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 1f9e642c66ad..212c5b9ac106 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -900,18 +900,6 @@ static inline void dmaengine_put(void) } #endif -#ifdef CONFIG_NET_DMA -#define net_dmaengine_get() dmaengine_get() -#define net_dmaengine_put() dmaengine_put() -#else -static inline void net_dmaengine_get(void) -{ -} -static inline void net_dmaengine_put(void) -{ -} -#endif - #ifdef CONFIG_ASYNC_TX_DMA #define async_dmaengine_get() dmaengine_get() #define async_dmaengine_put() dmaengine_put() @@ -933,16 +921,8 @@ async_dma_find_channel(enum dma_transaction_type type) return NULL; } #endif /* CONFIG_ASYNC_TX_DMA */ - -dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, - void *dest, void *src, size_t len); -dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, - struct page *page, unsigned int offset, void *kdata, size_t len); -dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan, - struct page *dest_pg, unsigned int dest_off, struct page *src_pg, - unsigned int src_off, size_t len); void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, - struct dma_chan *chan); + struct dma_chan *chan); static inline void async_tx_ack(struct dma_async_tx_descriptor *tx) { diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h index 2fe93b26b42f..4f1bbc68cd1b 100644 --- a/include/linux/dynamic_debug.h +++ b/include/linux/dynamic_debug.h @@ -42,7 +42,7 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n, #if defined(CONFIG_DYNAMIC_DEBUG) extern int ddebug_remove_module(const char *mod_name); extern __printf(2, 3) -int __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...); +void __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...); extern int ddebug_dyndbg_module_param_cb(char *param, char *val, const char *modname); @@ -50,15 +50,15 @@ extern int ddebug_dyndbg_module_param_cb(char *param, char *val, struct device; extern __printf(3, 4) -int __dynamic_dev_dbg(struct _ddebug *descriptor, const struct device *dev, - const char *fmt, ...); +void __dynamic_dev_dbg(struct _ddebug *descriptor, const struct device *dev, + const char *fmt, ...); struct net_device; extern __printf(3, 4) -int __dynamic_netdev_dbg(struct _ddebug *descriptor, - const struct net_device *dev, - const char *fmt, ...); +void __dynamic_netdev_dbg(struct _ddebug *descriptor, + const struct net_device *dev, + const char *fmt, ...); #define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \ static struct _ddebug __aligned(8) \ diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h index 5621547d631b..a4be70398ce1 100644 --- a/include/linux/dynamic_queue_limits.h +++ b/include/linux/dynamic_queue_limits.h @@ -73,14 +73,22 @@ static inline void dql_queued(struct dql *dql, unsigned int count) { BUG_ON(count > DQL_MAX_OBJECT); - dql->num_queued += count; dql->last_obj_cnt = count; + + /* We want to force a write first, so that cpu do not attempt + * to get cache line containing last_obj_cnt, num_queued, adj_limit + * in Shared state, but directly does a Request For Ownership + * It is only a hint, we use barrier() only. + */ + barrier(); + + dql->num_queued += count; } /* Returns how many objects can be queued, < 0 indicates over limit. */ static inline int dql_avail(const struct dql *dql) { - return dql->adj_limit - dql->num_queued; + return ACCESS_ONCE(dql->adj_limit) - ACCESS_ONCE(dql->num_queued); } /* Record number of completed objects and recalculate the limit. */ diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 9c5529dc6d07..733980fce8e3 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -29,6 +29,7 @@ #include <asm/bitsperlong.h> #ifdef __KERNEL__ +u32 eth_get_headlen(void *data, unsigned int max_len); __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); extern const struct header_ops eth_header_ops; diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index e658229fee39..c1a2d60dfb82 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -257,6 +257,10 @@ struct ethtool_ops { struct ethtool_eeprom *, u8 *); int (*get_eee)(struct net_device *, struct ethtool_eee *); int (*set_eee)(struct net_device *, struct ethtool_eee *); + int (*get_tunable)(struct net_device *, + const struct ethtool_tunable *, void *); + int (*set_tunable)(struct net_device *, + const struct ethtool_tunable *, const void *); }; diff --git a/include/linux/extcon/extcon-gpio.h b/include/linux/extcon/extcon-gpio.h index 8900fdf511c6..0b17ad43fbfc 100644 --- a/include/linux/extcon/extcon-gpio.h +++ b/include/linux/extcon/extcon-gpio.h @@ -34,8 +34,10 @@ * @irq_flags: IRQ Flags (e.g., IRQF_TRIGGER_LOW). * @state_on: print_state is overriden with state_on if attached. * If NULL, default method of extcon class is used. - * @state_off: print_state is overriden with state_on if detached. + * @state_off: print_state is overriden with state_off if detached. * If NUll, default method of extcon class is used. + * @check_on_resume: Boolean describing whether to check the state of gpio + * while resuming from sleep. * * Note that in order for state_on or state_off to be valid, both state_on * and state_off should be not NULL. If at least one of them is NULL, diff --git a/include/linux/extcon/sm5502.h b/include/linux/extcon/sm5502.h deleted file mode 100644 index 030526bf8d79..000000000000 --- a/include/linux/extcon/sm5502.h +++ /dev/null @@ -1,287 +0,0 @@ -/* - * sm5502.h - * - * Copyright (c) 2014 Samsung Electronics Co., Ltd - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef __LINUX_EXTCON_SM5502_H -#define __LINUX_EXTCON_SM5502_H - -enum sm5502_types { - TYPE_SM5502, -}; - -/* SM5502 registers */ -enum sm5502_reg { - SM5502_REG_DEVICE_ID = 0x01, - SM5502_REG_CONTROL, - SM5502_REG_INT1, - SM5502_REG_INT2, - SM5502_REG_INTMASK1, - SM5502_REG_INTMASK2, - SM5502_REG_ADC, - SM5502_REG_TIMING_SET1, - SM5502_REG_TIMING_SET2, - SM5502_REG_DEV_TYPE1, - SM5502_REG_DEV_TYPE2, - SM5502_REG_BUTTON1, - SM5502_REG_BUTTON2, - SM5502_REG_CAR_KIT_STATUS, - SM5502_REG_RSVD1, - SM5502_REG_RSVD2, - SM5502_REG_RSVD3, - SM5502_REG_RSVD4, - SM5502_REG_MANUAL_SW1, - SM5502_REG_MANUAL_SW2, - SM5502_REG_DEV_TYPE3, - SM5502_REG_RSVD5, - SM5502_REG_RSVD6, - SM5502_REG_RSVD7, - SM5502_REG_RSVD8, - SM5502_REG_RSVD9, - SM5502_REG_RESET, - SM5502_REG_RSVD10, - SM5502_REG_RESERVED_ID1, - SM5502_REG_RSVD11, - SM5502_REG_RSVD12, - SM5502_REG_RESERVED_ID2, - SM5502_REG_RSVD13, - SM5502_REG_OCP, - SM5502_REG_RSVD14, - SM5502_REG_RSVD15, - SM5502_REG_RSVD16, - SM5502_REG_RSVD17, - SM5502_REG_RSVD18, - SM5502_REG_RSVD19, - SM5502_REG_RSVD20, - SM5502_REG_RSVD21, - SM5502_REG_RSVD22, - SM5502_REG_RSVD23, - SM5502_REG_RSVD24, - SM5502_REG_RSVD25, - SM5502_REG_RSVD26, - SM5502_REG_RSVD27, - SM5502_REG_RSVD28, - SM5502_REG_RSVD29, - SM5502_REG_RSVD30, - SM5502_REG_RSVD31, - SM5502_REG_RSVD32, - SM5502_REG_RSVD33, - SM5502_REG_RSVD34, - SM5502_REG_RSVD35, - SM5502_REG_RSVD36, - SM5502_REG_RESERVED_ID3, - - SM5502_REG_END, -}; - -/* Define SM5502 MASK/SHIFT constant */ -#define SM5502_REG_DEVICE_ID_VENDOR_SHIFT 0 -#define SM5502_REG_DEVICE_ID_VERSION_SHIFT 3 -#define SM5502_REG_DEVICE_ID_VENDOR_MASK (0x3 << SM5502_REG_DEVICE_ID_VENDOR_SHIFT) -#define SM5502_REG_DEVICE_ID_VERSION_MASK (0x1f << SM5502_REG_DEVICE_ID_VERSION_SHIFT) - -#define SM5502_REG_CONTROL_MASK_INT_SHIFT 0 -#define SM5502_REG_CONTROL_WAIT_SHIFT 1 -#define SM5502_REG_CONTROL_MANUAL_SW_SHIFT 2 -#define SM5502_REG_CONTROL_RAW_DATA_SHIFT 3 -#define SM5502_REG_CONTROL_SW_OPEN_SHIFT 4 -#define SM5502_REG_CONTROL_MASK_INT_MASK (0x1 << SM5502_REG_CONTROL_MASK_INT_SHIFT) -#define SM5502_REG_CONTROL_WAIT_MASK (0x1 << SM5502_REG_CONTROL_WAIT_SHIFT) -#define SM5502_REG_CONTROL_MANUAL_SW_MASK (0x1 << SM5502_REG_CONTROL_MANUAL_SW_SHIFT) -#define SM5502_REG_CONTROL_RAW_DATA_MASK (0x1 << SM5502_REG_CONTROL_RAW_DATA_SHIFT) -#define SM5502_REG_CONTROL_SW_OPEN_MASK (0x1 << SM5502_REG_CONTROL_SW_OPEN_SHIFT) - -#define SM5502_REG_INTM1_ATTACH_SHIFT 0 -#define SM5502_REG_INTM1_DETACH_SHIFT 1 -#define SM5502_REG_INTM1_KP_SHIFT 2 -#define SM5502_REG_INTM1_LKP_SHIFT 3 -#define SM5502_REG_INTM1_LKR_SHIFT 4 -#define SM5502_REG_INTM1_OVP_EVENT_SHIFT 5 -#define SM5502_REG_INTM1_OCP_EVENT_SHIFT 6 -#define SM5502_REG_INTM1_OVP_OCP_DIS_SHIFT 7 -#define SM5502_REG_INTM1_ATTACH_MASK (0x1 << SM5502_REG_INTM1_ATTACH_SHIFT) -#define SM5502_REG_INTM1_DETACH_MASK (0x1 << SM5502_REG_INTM1_DETACH_SHIFT) -#define SM5502_REG_INTM1_KP_MASK (0x1 << SM5502_REG_INTM1_KP_SHIFT) -#define SM5502_REG_INTM1_LKP_MASK (0x1 << SM5502_REG_INTM1_LKP_SHIFT) -#define SM5502_REG_INTM1_LKR_MASK (0x1 << SM5502_REG_INTM1_LKR_SHIFT) -#define SM5502_REG_INTM1_OVP_EVENT_MASK (0x1 << SM5502_REG_INTM1_OVP_EVENT_SHIFT) -#define SM5502_REG_INTM1_OCP_EVENT_MASK (0x1 << SM5502_REG_INTM1_OCP_EVENT_SHIFT) -#define SM5502_REG_INTM1_OVP_OCP_DIS_MASK (0x1 << SM5502_REG_INTM1_OVP_OCP_DIS_SHIFT) - -#define SM5502_REG_INTM2_VBUS_DET_SHIFT 0 -#define SM5502_REG_INTM2_REV_ACCE_SHIFT 1 -#define SM5502_REG_INTM2_ADC_CHG_SHIFT 2 -#define SM5502_REG_INTM2_STUCK_KEY_SHIFT 3 -#define SM5502_REG_INTM2_STUCK_KEY_RCV_SHIFT 4 -#define SM5502_REG_INTM2_MHL_SHIFT 5 -#define SM5502_REG_INTM2_VBUS_DET_MASK (0x1 << SM5502_REG_INTM2_VBUS_DET_SHIFT) -#define SM5502_REG_INTM2_REV_ACCE_MASK (0x1 << SM5502_REG_INTM2_REV_ACCE_SHIFT) -#define SM5502_REG_INTM2_ADC_CHG_MASK (0x1 << SM5502_REG_INTM2_ADC_CHG_SHIFT) -#define SM5502_REG_INTM2_STUCK_KEY_MASK (0x1 << SM5502_REG_INTM2_STUCK_KEY_SHIFT) -#define SM5502_REG_INTM2_STUCK_KEY_RCV_MASK (0x1 << SM5502_REG_INTM2_STUCK_KEY_RCV_SHIFT) -#define SM5502_REG_INTM2_MHL_MASK (0x1 << SM5502_REG_INTM2_MHL_SHIFT) - -#define SM5502_REG_ADC_SHIFT 0 -#define SM5502_REG_ADC_MASK (0x1f << SM5502_REG_ADC_SHIFT) - -#define SM5502_REG_TIMING_SET1_KEY_PRESS_SHIFT 4 -#define SM5502_REG_TIMING_SET1_KEY_PRESS_MASK (0xf << SM5502_REG_TIMING_SET1_KEY_PRESS_SHIFT) -#define TIMING_KEY_PRESS_100MS 0x0 -#define TIMING_KEY_PRESS_200MS 0x1 -#define TIMING_KEY_PRESS_300MS 0x2 -#define TIMING_KEY_PRESS_400MS 0x3 -#define TIMING_KEY_PRESS_500MS 0x4 -#define TIMING_KEY_PRESS_600MS 0x5 -#define TIMING_KEY_PRESS_700MS 0x6 -#define TIMING_KEY_PRESS_800MS 0x7 -#define TIMING_KEY_PRESS_900MS 0x8 -#define TIMING_KEY_PRESS_1000MS 0x9 -#define SM5502_REG_TIMING_SET1_ADC_DET_SHIFT 0 -#define SM5502_REG_TIMING_SET1_ADC_DET_MASK (0xf << SM5502_REG_TIMING_SET1_ADC_DET_SHIFT) -#define TIMING_ADC_DET_50MS 0x0 -#define TIMING_ADC_DET_100MS 0x1 -#define TIMING_ADC_DET_150MS 0x2 -#define TIMING_ADC_DET_200MS 0x3 -#define TIMING_ADC_DET_300MS 0x4 -#define TIMING_ADC_DET_400MS 0x5 -#define TIMING_ADC_DET_500MS 0x6 -#define TIMING_ADC_DET_600MS 0x7 -#define TIMING_ADC_DET_700MS 0x8 -#define TIMING_ADC_DET_800MS 0x9 -#define TIMING_ADC_DET_900MS 0xA -#define TIMING_ADC_DET_1000MS 0xB - -#define SM5502_REG_TIMING_SET2_SW_WAIT_SHIFT 4 -#define SM5502_REG_TIMING_SET2_SW_WAIT_MASK (0xf << SM5502_REG_TIMING_SET2_SW_WAIT_SHIFT) -#define TIMING_SW_WAIT_10MS 0x0 -#define TIMING_SW_WAIT_30MS 0x1 -#define TIMING_SW_WAIT_50MS 0x2 -#define TIMING_SW_WAIT_70MS 0x3 -#define TIMING_SW_WAIT_90MS 0x4 -#define TIMING_SW_WAIT_110MS 0x5 -#define TIMING_SW_WAIT_130MS 0x6 -#define TIMING_SW_WAIT_150MS 0x7 -#define TIMING_SW_WAIT_170MS 0x8 -#define TIMING_SW_WAIT_190MS 0x9 -#define TIMING_SW_WAIT_210MS 0xA -#define SM5502_REG_TIMING_SET2_LONG_KEY_SHIFT 0 -#define SM5502_REG_TIMING_SET2_LONG_KEY_MASK (0xf << SM5502_REG_TIMING_SET2_LONG_KEY_SHIFT) -#define TIMING_LONG_KEY_300MS 0x0 -#define TIMING_LONG_KEY_400MS 0x1 -#define TIMING_LONG_KEY_500MS 0x2 -#define TIMING_LONG_KEY_600MS 0x3 -#define TIMING_LONG_KEY_700MS 0x4 -#define TIMING_LONG_KEY_800MS 0x5 -#define TIMING_LONG_KEY_900MS 0x6 -#define TIMING_LONG_KEY_1000MS 0x7 -#define TIMING_LONG_KEY_1100MS 0x8 -#define TIMING_LONG_KEY_1200MS 0x9 -#define TIMING_LONG_KEY_1300MS 0xA -#define TIMING_LONG_KEY_1400MS 0xB -#define TIMING_LONG_KEY_1500MS 0xC - -#define SM5502_REG_DEV_TYPE1_AUDIO_TYPE1_SHIFT 0 -#define SM5502_REG_DEV_TYPE1_AUDIO_TYPE2_SHIFT 1 -#define SM5502_REG_DEV_TYPE1_USB_SDP_SHIFT 2 -#define SM5502_REG_DEV_TYPE1_UART_SHIFT 3 -#define SM5502_REG_DEV_TYPE1_CAR_KIT_CHARGER_SHIFT 4 -#define SM5502_REG_DEV_TYPE1_USB_CHG_SHIFT 5 -#define SM5502_REG_DEV_TYPE1_DEDICATED_CHG_SHIFT 6 -#define SM5502_REG_DEV_TYPE1_USB_OTG_SHIFT 7 -#define SM5502_REG_DEV_TYPE1_AUDIO_TYPE1_MASK (0x1 << SM5502_REG_DEV_TYPE1_AUDIO_TYPE1_SHIFT) -#define SM5502_REG_DEV_TYPE1_AUDIO_TYPE1__MASK (0x1 << SM5502_REG_DEV_TYPE1_AUDIO_TYPE2_SHIFT) -#define SM5502_REG_DEV_TYPE1_USB_SDP_MASK (0x1 << SM5502_REG_DEV_TYPE1_USB_SDP_SHIFT) -#define SM5502_REG_DEV_TYPE1_UART_MASK (0x1 << SM5502_REG_DEV_TYPE1_UART_SHIFT) -#define SM5502_REG_DEV_TYPE1_CAR_KIT_CHARGER_MASK (0x1 << SM5502_REG_DEV_TYPE1_CAR_KIT_CHARGER_SHIFT) -#define SM5502_REG_DEV_TYPE1_USB_CHG_MASK (0x1 << SM5502_REG_DEV_TYPE1_USB_CHG_SHIFT) -#define SM5502_REG_DEV_TYPE1_DEDICATED_CHG_MASK (0x1 << SM5502_REG_DEV_TYPE1_DEDICATED_CHG_SHIFT) -#define SM5502_REG_DEV_TYPE1_USB_OTG_MASK (0x1 << SM5502_REG_DEV_TYPE1_USB_OTG_SHIFT) - -#define SM5502_REG_DEV_TYPE2_JIG_USB_ON_SHIFT 0 -#define SM5502_REG_DEV_TYPE2_JIG_USB_OFF_SHIFT 1 -#define SM5502_REG_DEV_TYPE2_JIG_UART_ON_SHIFT 2 -#define SM5502_REG_DEV_TYPE2_JIG_UART_OFF_SHIFT 3 -#define SM5502_REG_DEV_TYPE2_PPD_SHIFT 4 -#define SM5502_REG_DEV_TYPE2_TTY_SHIFT 5 -#define SM5502_REG_DEV_TYPE2_AV_CABLE_SHIFT 6 -#define SM5502_REG_DEV_TYPE2_JIG_USB_ON_MASK (0x1 << SM5502_REG_DEV_TYPE2_JIG_USB_ON_SHIFT) -#define SM5502_REG_DEV_TYPE2_JIG_USB_OFF_MASK (0x1 << SM5502_REG_DEV_TYPE2_JIG_USB_OFF_SHIFT) -#define SM5502_REG_DEV_TYPE2_JIG_UART_ON_MASK (0x1 << SM5502_REG_DEV_TYPE2_JIG_UART_ON_SHIFT) -#define SM5502_REG_DEV_TYPE2_JIG_UART_OFF_MASK (0x1 << SM5502_REG_DEV_TYPE2_JIG_UART_OFF_SHIFT) -#define SM5502_REG_DEV_TYPE2_PPD_MASK (0x1 << SM5502_REG_DEV_TYPE2_PPD_SHIFT) -#define SM5502_REG_DEV_TYPE2_TTY_MASK (0x1 << SM5502_REG_DEV_TYPE2_TTY_SHIFT) -#define SM5502_REG_DEV_TYPE2_AV_CABLE_MASK (0x1 << SM5502_REG_DEV_TYPE2_AV_CABLE_SHIFT) - -#define SM5502_REG_MANUAL_SW1_VBUSIN_SHIFT 0 -#define SM5502_REG_MANUAL_SW1_DP_SHIFT 2 -#define SM5502_REG_MANUAL_SW1_DM_SHIFT 5 -#define SM5502_REG_MANUAL_SW1_VBUSIN_MASK (0x3 << SM5502_REG_MANUAL_SW1_VBUSIN_SHIFT) -#define SM5502_REG_MANUAL_SW1_DP_MASK (0x7 << SM5502_REG_MANUAL_SW1_DP_SHIFT) -#define SM5502_REG_MANUAL_SW1_DM_MASK (0x7 << SM5502_REG_MANUAL_SW1_DM_SHIFT) -#define VBUSIN_SWITCH_OPEN 0x0 -#define VBUSIN_SWITCH_VBUSOUT 0x1 -#define VBUSIN_SWITCH_MIC 0x2 -#define VBUSIN_SWITCH_VBUSOUT_WITH_USB 0x3 -#define DM_DP_CON_SWITCH_OPEN 0x0 -#define DM_DP_CON_SWITCH_USB 0x1 -#define DM_DP_CON_SWITCH_AUDIO 0x2 -#define DM_DP_CON_SWITCH_UART 0x3 -#define DM_DP_SWITCH_OPEN ((DM_DP_CON_SWITCH_OPEN <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \ - | (DM_DP_CON_SWITCH_OPEN <<SM5502_REG_MANUAL_SW1_DM_SHIFT)) -#define DM_DP_SWITCH_USB ((DM_DP_CON_SWITCH_USB <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \ - | (DM_DP_CON_SWITCH_USB <<SM5502_REG_MANUAL_SW1_DM_SHIFT)) -#define DM_DP_SWITCH_AUDIO ((DM_DP_CON_SWITCH_AUDIO <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \ - | (DM_DP_CON_SWITCH_AUDIO <<SM5502_REG_MANUAL_SW1_DM_SHIFT)) -#define DM_DP_SWITCH_UART ((DM_DP_CON_SWITCH_UART <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \ - | (DM_DP_CON_SWITCH_UART <<SM5502_REG_MANUAL_SW1_DM_SHIFT)) - -/* SM5502 Interrupts */ -enum sm5502_irq { - /* INT1 */ - SM5502_IRQ_INT1_ATTACH, - SM5502_IRQ_INT1_DETACH, - SM5502_IRQ_INT1_KP, - SM5502_IRQ_INT1_LKP, - SM5502_IRQ_INT1_LKR, - SM5502_IRQ_INT1_OVP_EVENT, - SM5502_IRQ_INT1_OCP_EVENT, - SM5502_IRQ_INT1_OVP_OCP_DIS, - - /* INT2 */ - SM5502_IRQ_INT2_VBUS_DET, - SM5502_IRQ_INT2_REV_ACCE, - SM5502_IRQ_INT2_ADC_CHG, - SM5502_IRQ_INT2_STUCK_KEY, - SM5502_IRQ_INT2_STUCK_KEY_RCV, - SM5502_IRQ_INT2_MHL, - - SM5502_IRQ_NUM, -}; - -#define SM5502_IRQ_INT1_ATTACH_MASK BIT(0) -#define SM5502_IRQ_INT1_DETACH_MASK BIT(1) -#define SM5502_IRQ_INT1_KP_MASK BIT(2) -#define SM5502_IRQ_INT1_LKP_MASK BIT(3) -#define SM5502_IRQ_INT1_LKR_MASK BIT(4) -#define SM5502_IRQ_INT1_OVP_EVENT_MASK BIT(5) -#define SM5502_IRQ_INT1_OCP_EVENT_MASK BIT(6) -#define SM5502_IRQ_INT1_OVP_OCP_DIS_MASK BIT(7) -#define SM5502_IRQ_INT2_VBUS_DET_MASK BIT(0) -#define SM5502_IRQ_INT2_REV_ACCE_MASK BIT(1) -#define SM5502_IRQ_INT2_ADC_CHG_MASK BIT(2) -#define SM5502_IRQ_INT2_STUCK_KEY_MASK BIT(3) -#define SM5502_IRQ_INT2_STUCK_KEY_RCV_MASK BIT(4) -#define SM5502_IRQ_INT2_MHL_MASK BIT(5) - -#endif /* __LINUX_EXTCON_SM5502_H */ diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index 08ed2b0a96e6..860313a33a43 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -15,8 +15,9 @@ #include <linux/types.h> #define F2FS_SUPER_OFFSET 1024 /* byte-size offset */ -#define F2FS_LOG_SECTOR_SIZE 9 /* 9 bits for 512 byte */ -#define F2FS_LOG_SECTORS_PER_BLOCK 3 /* 4KB: F2FS_BLKSIZE */ +#define F2FS_MIN_LOG_SECTOR_SIZE 9 /* 9 bits for 512 bytes */ +#define F2FS_MAX_LOG_SECTOR_SIZE 12 /* 12 bits for 4096 bytes */ +#define F2FS_LOG_SECTORS_PER_BLOCK 3 /* log number for sector/blk */ #define F2FS_BLKSIZE 4096 /* support only 4KB block */ #define F2FS_MAX_EXTENSION 64 /* # of extension entries */ #define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) / F2FS_BLKSIZE) @@ -85,6 +86,7 @@ struct f2fs_super_block { /* * For checkpoint */ +#define CP_FSCK_FLAG 0x00000010 #define CP_ERROR_FLAG 0x00000008 #define CP_COMPACT_SUM_FLAG 0x00000004 #define CP_ORPHAN_PRESENT_FLAG 0x00000002 diff --git a/include/linux/filter.h b/include/linux/filter.h index a5227ab8ccb1..ca95abd2bed1 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -4,58 +4,24 @@ #ifndef __LINUX_FILTER_H__ #define __LINUX_FILTER_H__ +#include <stdarg.h> + #include <linux/atomic.h> #include <linux/compat.h> #include <linux/skbuff.h> +#include <linux/linkage.h> +#include <linux/printk.h> #include <linux/workqueue.h> -#include <uapi/linux/filter.h> -/* Internally used and optimized filter representation with extended - * instruction set based on top of classic BPF. - */ +#include <asm/cacheflush.h> -/* instruction classes */ -#define BPF_ALU64 0x07 /* alu mode in double word width */ - -/* ld/ldx fields */ -#define BPF_DW 0x18 /* double word */ -#define BPF_XADD 0xc0 /* exclusive add */ - -/* alu/jmp fields */ -#define BPF_MOV 0xb0 /* mov reg to reg */ -#define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */ - -/* change endianness of a register */ -#define BPF_END 0xd0 /* flags for endianness conversion: */ -#define BPF_TO_LE 0x00 /* convert to little-endian */ -#define BPF_TO_BE 0x08 /* convert to big-endian */ -#define BPF_FROM_LE BPF_TO_LE -#define BPF_FROM_BE BPF_TO_BE - -#define BPF_JNE 0x50 /* jump != */ -#define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */ -#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */ -#define BPF_CALL 0x80 /* function call */ -#define BPF_EXIT 0x90 /* function return */ - -/* Register numbers */ -enum { - BPF_REG_0 = 0, - BPF_REG_1, - BPF_REG_2, - BPF_REG_3, - BPF_REG_4, - BPF_REG_5, - BPF_REG_6, - BPF_REG_7, - BPF_REG_8, - BPF_REG_9, - BPF_REG_10, - __MAX_BPF_REG, -}; +#include <uapi/linux/filter.h> +#include <uapi/linux/bpf.h> -/* BPF has 10 general purpose 64-bit registers and stack frame. */ -#define MAX_BPF_REG __MAX_BPF_REG +struct sk_buff; +struct sock; +struct seccomp_data; +struct bpf_prog_aux; /* ArgX, context and stack frame pointer register positions. Note, * Arg1, Arg2, Arg3, etc are used as argument mappings of function @@ -161,6 +127,30 @@ enum { .off = 0, \ .imm = IMM }) +/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */ +#define BPF_LD_IMM64(DST, IMM) \ + BPF_LD_IMM64_RAW(DST, 0, IMM) + +#define BPF_LD_IMM64_RAW(DST, SRC, IMM) \ + ((struct bpf_insn) { \ + .code = BPF_LD | BPF_DW | BPF_IMM, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = 0, \ + .imm = (__u32) (IMM) }), \ + ((struct bpf_insn) { \ + .code = 0, /* zero is reserved opcode */ \ + .dst_reg = 0, \ + .src_reg = 0, \ + .off = 0, \ + .imm = ((__u64) (IMM)) >> 32 }) + +#define BPF_PSEUDO_MAP_FD 1 + +/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */ +#define BPF_LD_MAP_FD(DST, MAP_FD) \ + BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD) + /* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */ #define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \ @@ -299,14 +289,6 @@ enum { #define SK_RUN_FILTER(filter, ctx) \ (*filter->prog->bpf_func)(ctx, filter->prog->insnsi) -struct bpf_insn { - __u8 code; /* opcode */ - __u8 dst_reg:4; /* dest register */ - __u8 src_reg:4; /* source register */ - __s16 off; /* signed offset */ - __s32 imm; /* signed immediate constant */ -}; - #ifdef CONFIG_COMPAT /* A struct sock_filter is architecture independent. */ struct compat_sock_fprog { @@ -320,20 +302,23 @@ struct sock_fprog_kern { struct sock_filter *filter; }; -struct sk_buff; -struct sock; -struct seccomp_data; +struct bpf_binary_header { + unsigned int pages; + u8 image[]; +}; struct bpf_prog { - u32 jited:1, /* Is our filter JIT'ed? */ - len:31; /* Number of filter blocks */ + u16 pages; /* Number of allocated pages */ + bool jited; /* Is our filter JIT'ed? */ + u32 len; /* Number of filter blocks */ struct sock_fprog_kern *orig_prog; /* Original BPF program */ + struct bpf_prog_aux *aux; /* Auxiliary fields */ unsigned int (*bpf_func)(const struct sk_buff *skb, const struct bpf_insn *filter); + /* Instructions for interpreter */ union { struct sock_filter insns[0]; struct bpf_insn insnsi[0]; - struct work_struct work; }; }; @@ -353,6 +338,26 @@ static inline unsigned int bpf_prog_size(unsigned int proglen) #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0])) +#ifdef CONFIG_DEBUG_SET_MODULE_RONX +static inline void bpf_prog_lock_ro(struct bpf_prog *fp) +{ + set_memory_ro((unsigned long)fp, fp->pages); +} + +static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) +{ + set_memory_rw((unsigned long)fp, fp->pages); +} +#else +static inline void bpf_prog_lock_ro(struct bpf_prog *fp) +{ +} + +static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) +{ +} +#endif /* CONFIG_DEBUG_SET_MODULE_RONX */ + int sk_filter(struct sock *sk, struct sk_buff *skb); void bpf_prog_select_runtime(struct bpf_prog *fp); @@ -361,6 +366,17 @@ void bpf_prog_free(struct bpf_prog *fp); int bpf_convert_filter(struct sock_filter *prog, int len, struct bpf_insn *new_prog, int *new_len); +struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags); +struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, + gfp_t gfp_extra_flags); +void __bpf_prog_free(struct bpf_prog *fp); + +static inline void bpf_prog_unlock_free(struct bpf_prog *fp) +{ + bpf_prog_unlock_ro(fp); + __bpf_prog_free(fp); +} + int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog); void bpf_prog_destroy(struct bpf_prog *fp); @@ -377,6 +393,38 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); void bpf_int_jit_compile(struct bpf_prog *fp); +#ifdef CONFIG_BPF_JIT +typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); + +struct bpf_binary_header * +bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, + unsigned int alignment, + bpf_jit_fill_hole_t bpf_fill_ill_insns); +void bpf_jit_binary_free(struct bpf_binary_header *hdr); + +void bpf_jit_compile(struct bpf_prog *fp); +void bpf_jit_free(struct bpf_prog *fp); + +static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, + u32 pass, void *image) +{ + pr_err("flen=%u proglen=%u pass=%u image=%pK\n", + flen, proglen, pass, image); + if (image) + print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET, + 16, 1, image, proglen, false); +} +#else +static inline void bpf_jit_compile(struct bpf_prog *fp) +{ +} + +static inline void bpf_jit_free(struct bpf_prog *fp) +{ + bpf_prog_unlock_free(fp); +} +#endif /* CONFIG_BPF_JIT */ + #define BPF_ANC BIT(15) static inline u16 bpf_anc_helper(const struct sock_filter *ftest) @@ -424,36 +472,6 @@ static inline void *bpf_load_pointer(const struct sk_buff *skb, int k, return bpf_internal_load_pointer_neg_helper(skb, k, size); } -#ifdef CONFIG_BPF_JIT -#include <stdarg.h> -#include <linux/linkage.h> -#include <linux/printk.h> - -void bpf_jit_compile(struct bpf_prog *fp); -void bpf_jit_free(struct bpf_prog *fp); - -static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, - u32 pass, void *image) -{ - pr_err("flen=%u proglen=%u pass=%u image=%pK\n", - flen, proglen, pass, image); - if (image) - print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET, - 16, 1, image, proglen, false); -} -#else -#include <linux/slab.h> - -static inline void bpf_jit_compile(struct bpf_prog *fp) -{ -} - -static inline void bpf_jit_free(struct bpf_prog *fp) -{ - kfree(fp); -} -#endif /* CONFIG_BPF_JIT */ - static inline int bpf_tell_extensions(void) { return SKF_AD_MAX; diff --git a/include/linux/flex_proportions.h b/include/linux/flex_proportions.h index 4ebc49fae391..0d348e011a6e 100644 --- a/include/linux/flex_proportions.h +++ b/include/linux/flex_proportions.h @@ -10,6 +10,7 @@ #include <linux/percpu_counter.h> #include <linux/spinlock.h> #include <linux/seqlock.h> +#include <linux/gfp.h> /* * When maximum proportion of some event type is specified, this is the @@ -32,7 +33,7 @@ struct fprop_global { seqcount_t sequence; }; -int fprop_global_init(struct fprop_global *p); +int fprop_global_init(struct fprop_global *p, gfp_t gfp); void fprop_global_destroy(struct fprop_global *p); bool fprop_new_period(struct fprop_global *p, int periods); @@ -79,7 +80,7 @@ struct fprop_local_percpu { raw_spinlock_t lock; /* Protect period and numerator */ }; -int fprop_local_init_percpu(struct fprop_local_percpu *pl); +int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp); void fprop_local_destroy_percpu(struct fprop_local_percpu *pl); void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl); void __fprop_inc_percpu_max(struct fprop_global *p, struct fprop_local_percpu *pl, diff --git a/include/linux/fs.h b/include/linux/fs.h index 94187721ad41..ab4f1a10da20 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -851,13 +851,7 @@ static inline struct file *get_file(struct file *f) */ #define FILE_LOCK_DEFERRED 1 -/* - * The POSIX file lock owner is determined by - * the "struct files_struct" in the thread group - * (or NULL for no owner - BSD locks). - * - * Lockd stuffs a "host" pointer into this. - */ +/* legacy typedef, should eventually be removed */ typedef void *fl_owner_t; struct file_lock_operations { @@ -868,10 +862,13 @@ struct file_lock_operations { struct lock_manager_operations { int (*lm_compare_owner)(struct file_lock *, struct file_lock *); unsigned long (*lm_owner_key)(struct file_lock *); + void (*lm_get_owner)(struct file_lock *, struct file_lock *); + void (*lm_put_owner)(struct file_lock *); void (*lm_notify)(struct file_lock *); /* unblock callback */ - int (*lm_grant)(struct file_lock *, struct file_lock *, int); - void (*lm_break)(struct file_lock *); - int (*lm_change)(struct file_lock **, int); + int (*lm_grant)(struct file_lock *, int); + bool (*lm_break)(struct file_lock *); + int (*lm_change)(struct file_lock **, int, struct list_head *); + void (*lm_setup)(struct file_lock *, void **); }; struct lock_manager { @@ -966,7 +963,7 @@ void locks_free_lock(struct file_lock *fl); extern void locks_init_lock(struct file_lock *); extern struct file_lock * locks_alloc_lock(void); extern void locks_copy_lock(struct file_lock *, struct file_lock *); -extern void __locks_copy_lock(struct file_lock *, const struct file_lock *); +extern void locks_copy_conflock(struct file_lock *, struct file_lock *); extern void locks_remove_posix(struct file *, fl_owner_t); extern void locks_remove_file(struct file *); extern void locks_release_private(struct file_lock *); @@ -980,11 +977,9 @@ extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl); extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl); extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type); extern void lease_get_mtime(struct inode *, struct timespec *time); -extern int generic_setlease(struct file *, long, struct file_lock **); -extern int vfs_setlease(struct file *, long, struct file_lock **); -extern int lease_modify(struct file_lock **, int); -extern int lock_may_read(struct inode *, loff_t start, unsigned long count); -extern int lock_may_write(struct inode *, loff_t start, unsigned long count); +extern int generic_setlease(struct file *, long, struct file_lock **, void **priv); +extern int vfs_setlease(struct file *, long, struct file_lock **, void **); +extern int lease_modify(struct file_lock **, int, struct list_head *); #else /* !CONFIG_FILE_LOCKING */ static inline int fcntl_getlk(struct file *file, unsigned int cmd, struct flock __user *user) @@ -1013,12 +1008,12 @@ static inline int fcntl_setlk64(unsigned int fd, struct file *file, #endif static inline int fcntl_setlease(unsigned int fd, struct file *filp, long arg) { - return 0; + return -EINVAL; } static inline int fcntl_getlease(struct file *filp) { - return 0; + return F_UNLCK; } static inline void locks_init_lock(struct file_lock *fl) @@ -1026,7 +1021,7 @@ static inline void locks_init_lock(struct file_lock *fl) return; } -static inline void __locks_copy_lock(struct file_lock *new, struct file_lock *fl) +static inline void locks_copy_conflock(struct file_lock *new, struct file_lock *fl) { return; } @@ -1100,33 +1095,22 @@ static inline void lease_get_mtime(struct inode *inode, struct timespec *time) } static inline int generic_setlease(struct file *filp, long arg, - struct file_lock **flp) + struct file_lock **flp, void **priv) { return -EINVAL; } static inline int vfs_setlease(struct file *filp, long arg, - struct file_lock **lease) + struct file_lock **lease, void **priv) { return -EINVAL; } -static inline int lease_modify(struct file_lock **before, int arg) +static inline int lease_modify(struct file_lock **before, int arg, + struct list_head *dispose) { return -EINVAL; } - -static inline int lock_may_read(struct inode *inode, loff_t start, - unsigned long len) -{ - return 1; -} - -static inline int lock_may_write(struct inode *inode, loff_t start, - unsigned long len) -{ - return 1; -} #endif /* !CONFIG_FILE_LOCKING */ @@ -1151,8 +1135,8 @@ extern void fasync_free(struct fasync_struct *); /* can be called from interrupts */ extern void kill_fasync(struct fasync_struct **, int, int); -extern int __f_setown(struct file *filp, struct pid *, enum pid_type, int force); -extern int f_setown(struct file *filp, unsigned long arg, int force); +extern void __f_setown(struct file *filp, struct pid *, enum pid_type, int force); +extern void f_setown(struct file *filp, unsigned long arg, int force); extern void f_delown(struct file *filp); extern pid_t f_getown(struct file *filp); extern int send_sigurg(struct fown_struct *fown); @@ -1506,7 +1490,7 @@ struct file_operations { int (*flock) (struct file *, int, struct file_lock *); ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); - int (*setlease)(struct file *, long, struct file_lock **); + int (*setlease)(struct file *, long, struct file_lock **, void **); long (*fallocate)(struct file *file, int mode, loff_t offset, loff_t len); int (*show_fdinfo)(struct seq_file *m, struct file *f); @@ -1855,7 +1839,8 @@ extern struct vfsmount *kern_mount_data(struct file_system_type *, void *data); extern void kern_unmount(struct vfsmount *mnt); extern int may_umount_tree(struct vfsmount *); extern int may_umount(struct vfsmount *); -extern long do_mount(const char *, const char *, const char *, unsigned long, void *); +extern long do_mount(const char *, const char __user *, + const char *, unsigned long, void *); extern struct vfsmount *collect_mounts(struct path *); extern void drop_collected_mounts(struct vfsmount *); extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *, @@ -1874,7 +1859,7 @@ extern int current_umask(void); extern void ihold(struct inode * inode); extern void iput(struct inode *); -static inline struct inode *file_inode(struct file *f) +static inline struct inode *file_inode(const struct file *f) { return f->f_inode; } @@ -2611,6 +2596,7 @@ extern int simple_write_end(struct file *file, struct address_space *mapping, struct page *page, void *fsdata); extern int always_delete_dentry(const struct dentry *); extern struct inode *alloc_anon_inode(struct super_block *); +extern int simple_nosetlease(struct file *, long, struct file_lock **, void **); extern const struct dentry_operations simple_dentry_operations; extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags); diff --git a/include/linux/fs_enet_pd.h b/include/linux/fs_enet_pd.h index efb05961bdd8..77d783f71527 100644 --- a/include/linux/fs_enet_pd.h +++ b/include/linux/fs_enet_pd.h @@ -139,7 +139,6 @@ struct fs_platform_info { int rx_ring, tx_ring; /* number of buffers on rx */ __u8 macaddr[ETH_ALEN]; /* mac address */ int rx_copybreak; /* limit we copy small frames */ - int use_napi; /* use NAPI */ int napi_weight; /* NAPI weight */ int use_rmii; /* use RMII mode */ diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h index f49ddb1b2273..84d60cb841b1 100644 --- a/include/linux/fsl_ifc.h +++ b/include/linux/fsl_ifc.h @@ -781,13 +781,13 @@ struct fsl_ifc_regs { __be32 amask; u32 res4[0x2]; } amask_cs[FSL_IFC_BANK_COUNT]; - u32 res5[0x17]; + u32 res5[0x18]; struct { - __be32 csor_ext; __be32 csor; + __be32 csor_ext; u32 res6; } csor_cs[FSL_IFC_BANK_COUNT]; - u32 res7[0x19]; + u32 res7[0x18]; struct { __be32 ftim[4]; u32 res8[0x8]; diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index f0b0edbf55a9..662697babd48 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -56,6 +56,8 @@ struct ftrace_ops; typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct pt_regs *regs); +ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); + /* * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are * set in the flags member. @@ -89,6 +91,9 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, * INITIALIZED - The ftrace_ops has already been initialized (first use time * register_ftrace_function() is called, it will initialized the ops) * DELETED - The ops are being deleted, do not let them be registered again. + * ADDING - The ops is in the process of being added. + * REMOVING - The ops is in the process of being removed. + * MODIFYING - The ops is in the process of changing its filter functions. */ enum { FTRACE_OPS_FL_ENABLED = 1 << 0, @@ -100,6 +105,9 @@ enum { FTRACE_OPS_FL_STUB = 1 << 6, FTRACE_OPS_FL_INITIALIZED = 1 << 7, FTRACE_OPS_FL_DELETED = 1 << 8, + FTRACE_OPS_FL_ADDING = 1 << 9, + FTRACE_OPS_FL_REMOVING = 1 << 10, + FTRACE_OPS_FL_MODIFYING = 1 << 11, }; #ifdef CONFIG_DYNAMIC_FTRACE @@ -132,7 +140,7 @@ struct ftrace_ops { int nr_trampolines; struct ftrace_ops_hash local_hash; struct ftrace_ops_hash *func_hash; - struct ftrace_hash *tramp_hash; + struct ftrace_ops_hash old_hash; unsigned long trampoline; #endif }; diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h index 1c2fdaa2ffc3..1ccaab44abcc 100644 --- a/include/linux/genalloc.h +++ b/include/linux/genalloc.h @@ -110,6 +110,10 @@ extern void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, extern unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, void *data); +extern unsigned long gen_pool_first_fit_order_align(unsigned long *map, + unsigned long size, unsigned long start, unsigned int nr, + void *data); + extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, void *data); @@ -117,6 +121,9 @@ extern struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order, int nid); extern struct gen_pool *dev_get_gen_pool(struct device *dev); +bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start, + size_t size); + #ifdef CONFIG_OF extern struct gen_pool *of_get_named_gen_pool(struct device_node *np, const char *propname, int index); diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 5e7219dc0fae..41b30fd4d041 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -156,7 +156,7 @@ struct vm_area_struct; #define GFP_DMA32 __GFP_DMA32 /* Convert GFP flags to their corresponding migrate type */ -static inline int allocflags_to_migratetype(gfp_t gfp_flags) +static inline int gfpflags_to_migratetype(const gfp_t gfp_flags) { WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index e78a2373e374..249db3057e4d 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -56,6 +56,8 @@ struct seq_file; * as the chip access may sleep when e.g. reading out the IRQ status * registers. * @exported: flags if the gpiochip is exported for use from sysfs. Private. + * @irq_not_threaded: flag must be set if @can_sleep is set but the + * IRQs don't need to be threaded * * A gpio_chip can help platforms abstract various sources of GPIOs so * they can all be accessed through a common programing interface. @@ -101,11 +103,12 @@ struct gpio_chip { struct gpio_desc *desc; const char *const *names; bool can_sleep; + bool irq_not_threaded; bool exported; #ifdef CONFIG_GPIOLIB_IRQCHIP /* - * With CONFIG_GPIO_IRQCHIP we get an irqchip inside the gpiolib + * With CONFIG_GPIOLIB_IRQCHIP we get an irqchip inside the gpiolib * to handle IRQs for most practical cases. */ struct irq_chip *irqchip; @@ -141,7 +144,7 @@ extern const char *gpiochip_is_requested(struct gpio_chip *chip, /* add/remove chips */ extern int gpiochip_add(struct gpio_chip *chip); -extern int gpiochip_remove(struct gpio_chip *chip); +extern void gpiochip_remove(struct gpio_chip *chip); extern struct gpio_chip *gpiochip_find(void *data, int (*match)(struct gpio_chip *chip, void *data)); @@ -164,9 +167,10 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip, irq_flow_handler_t handler, unsigned int type); -#endif /* CONFIG_GPIO_IRQCHIP */ +#endif /* CONFIG_GPIOLIB_IRQCHIP */ -int gpiochip_request_own_desc(struct gpio_desc *desc, const char *label); +struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, u16 hwnum, + const char *label); void gpiochip_free_own_desc(struct gpio_desc *desc); #else /* CONFIG_GPIOLIB */ diff --git a/include/linux/hash.h b/include/linux/hash.h index bd1754c7ecef..d0494c399392 100644 --- a/include/linux/hash.h +++ b/include/linux/hash.h @@ -37,6 +37,9 @@ static __always_inline u64 hash_64(u64 val, unsigned int bits) { u64 hash = val; +#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64 + hash = hash * GOLDEN_RATIO_PRIME_64; +#else /* Sigh, gcc can't optimise this alone like it does for 32 bits. */ u64 n = hash; n <<= 18; @@ -51,6 +54,7 @@ static __always_inline u64 hash_64(u64 val, unsigned int bits) hash += n; n <<= 2; hash += n; +#endif /* High bits are more random, so use them. */ return hash >> (64 - bits); diff --git a/include/linux/hid.h b/include/linux/hid.h index f53c4a9cca1d..78ea9bf941cd 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -265,6 +265,7 @@ struct hid_item { #define HID_CONNECT_HIDDEV 0x08 #define HID_CONNECT_HIDDEV_FORCE 0x10 #define HID_CONNECT_FF 0x20 +#define HID_CONNECT_DRIVER 0x40 #define HID_CONNECT_DEFAULT (HID_CONNECT_HIDINPUT|HID_CONNECT_HIDRAW| \ HID_CONNECT_HIDDEV|HID_CONNECT_FF) @@ -287,6 +288,7 @@ struct hid_item { #define HID_QUIRK_HIDINPUT_FORCE 0x00000080 #define HID_QUIRK_NO_EMPTY_INPUT 0x00000100 #define HID_QUIRK_NO_INIT_INPUT_REPORTS 0x00000200 +#define HID_QUIRK_ALWAYS_POLL 0x00000400 #define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000 #define HID_QUIRK_SKIP_OUTPUT_REPORT_ID 0x00020000 #define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP 0x00040000 @@ -440,6 +442,7 @@ struct hid_output_fifo { #define HID_CLAIMED_INPUT 1 #define HID_CLAIMED_HIDDEV 2 #define HID_CLAIMED_HIDRAW 4 +#define HID_CLAIMED_DRIVER 8 #define HID_STAT_ADDED 1 #define HID_STAT_PARSED 2 diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 63579cb8d3dc..ad9051bab267 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -132,7 +132,7 @@ extern int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, spinlock_t **ptl) { - VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem)); + VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); if (pmd_trans_huge(*pmd)) return __pmd_trans_huge_lock(pmd, vma, ptl); else diff --git a/include/linux/i2c.h b/include/linux/i2c.h index a95efeb53a8b..b556e0ab946f 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -577,20 +577,4 @@ static inline struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node } #endif /* CONFIG_OF */ -#ifdef CONFIG_ACPI -void acpi_i2c_register_devices(struct i2c_adapter *adap); -#else -static inline void acpi_i2c_register_devices(struct i2c_adapter *adap) { } -#endif /* CONFIG_ACPI */ - -#ifdef CONFIG_ACPI_I2C_OPREGION -int acpi_i2c_install_space_handler(struct i2c_adapter *adapter); -void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter); -#else -static inline void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter) -{ } -static inline int acpi_i2c_install_space_handler(struct i2c_adapter *adapter) -{ return 0; } -#endif /* CONFIG_ACPI_I2C_OPREGION */ - #endif /* _LINUX_I2C_H */ diff --git a/include/linux/i82593.h b/include/linux/i82593.h deleted file mode 100644 index afac5c7a323d..000000000000 --- a/include/linux/i82593.h +++ /dev/null @@ -1,229 +0,0 @@ -/* - * Definitions for Intel 82593 CSMA/CD Core LAN Controller - * The definitions are taken from the 1992 users manual with Intel - * order number 297125-001. - * - * /usr/src/pc/RCS/i82593.h,v 1.1 1996/07/17 15:23:12 root Exp - * - * Copyright 1994, Anders Klemets <klemets@it.kth.se> - * - * HISTORY - * i82593.h,v - * Revision 1.4 2005/11/4 09:15:00 baroniunas - * Modified copyright with permission of author as follows: - * - * "If I82539.H is the only file with my copyright statement - * that is included in the Source Forge project, then you have - * my approval to change the copyright statement to be a GPL - * license, in the way you proposed on October 10." - * - * Revision 1.1 1996/07/17 15:23:12 root - * Initial revision - * - * Revision 1.3 1995/04/05 15:13:58 adj - * Initial alpha release - * - * Revision 1.2 1994/06/16 23:57:31 klemets - * Mirrored all the fields in the configuration block. - * - * Revision 1.1 1994/06/02 20:25:34 klemets - * Initial revision - * - * - */ -#ifndef _I82593_H -#define _I82593_H - -/* Intel 82593 CSMA/CD Core LAN Controller */ - -/* Port 0 Command Register definitions */ - -/* Execution operations */ -#define OP0_NOP 0 /* CHNL = 0 */ -#define OP0_SWIT_TO_PORT_1 0 /* CHNL = 1 */ -#define OP0_IA_SETUP 1 -#define OP0_CONFIGURE 2 -#define OP0_MC_SETUP 3 -#define OP0_TRANSMIT 4 -#define OP0_TDR 5 -#define OP0_DUMP 6 -#define OP0_DIAGNOSE 7 -#define OP0_TRANSMIT_NO_CRC 9 -#define OP0_RETRANSMIT 12 -#define OP0_ABORT 13 -/* Reception operations */ -#define OP0_RCV_ENABLE 8 -#define OP0_RCV_DISABLE 10 -#define OP0_STOP_RCV 11 -/* Status pointer control operations */ -#define OP0_FIX_PTR 15 /* CHNL = 1 */ -#define OP0_RLS_PTR 15 /* CHNL = 0 */ -#define OP0_RESET 14 - -#define CR0_CHNL (1 << 4) /* 0=Channel 0, 1=Channel 1 */ -#define CR0_STATUS_0 0x00 -#define CR0_STATUS_1 0x20 -#define CR0_STATUS_2 0x40 -#define CR0_STATUS_3 0x60 -#define CR0_INT_ACK (1 << 7) /* 0=No ack, 1=acknowledge */ - -/* Port 0 Status Register definitions */ - -#define SR0_NO_RESULT 0 /* dummy */ -#define SR0_EVENT_MASK 0x0f -#define SR0_IA_SETUP_DONE 1 -#define SR0_CONFIGURE_DONE 2 -#define SR0_MC_SETUP_DONE 3 -#define SR0_TRANSMIT_DONE 4 -#define SR0_TDR_DONE 5 -#define SR0_DUMP_DONE 6 -#define SR0_DIAGNOSE_PASSED 7 -#define SR0_TRANSMIT_NO_CRC_DONE 9 -#define SR0_RETRANSMIT_DONE 12 -#define SR0_EXECUTION_ABORTED 13 -#define SR0_END_OF_FRAME 8 -#define SR0_RECEPTION_ABORTED 10 -#define SR0_DIAGNOSE_FAILED 15 -#define SR0_STOP_REG_HIT 11 - -#define SR0_CHNL (1 << 4) -#define SR0_EXECUTION (1 << 5) -#define SR0_RECEPTION (1 << 6) -#define SR0_INTERRUPT (1 << 7) -#define SR0_BOTH_RX_TX (SR0_EXECUTION | SR0_RECEPTION) - -#define SR3_EXEC_STATE_MASK 0x03 -#define SR3_EXEC_IDLE 0 -#define SR3_TX_ABORT_IN_PROGRESS 1 -#define SR3_EXEC_ACTIVE 2 -#define SR3_ABORT_IN_PROGRESS 3 -#define SR3_EXEC_CHNL (1 << 2) -#define SR3_STP_ON_NO_RSRC (1 << 3) -#define SR3_RCVING_NO_RSRC (1 << 4) -#define SR3_RCV_STATE_MASK 0x60 -#define SR3_RCV_IDLE 0x00 -#define SR3_RCV_READY 0x20 -#define SR3_RCV_ACTIVE 0x40 -#define SR3_RCV_STOP_IN_PROG 0x60 -#define SR3_RCV_CHNL (1 << 7) - -/* Port 1 Command Register definitions */ - -#define OP1_NOP 0 -#define OP1_SWIT_TO_PORT_0 1 -#define OP1_INT_DISABLE 2 -#define OP1_INT_ENABLE 3 -#define OP1_SET_TS 5 -#define OP1_RST_TS 7 -#define OP1_POWER_DOWN 8 -#define OP1_RESET_RING_MNGMT 11 -#define OP1_RESET 14 -#define OP1_SEL_RST 15 - -#define CR1_STATUS_4 0x00 -#define CR1_STATUS_5 0x20 -#define CR1_STATUS_6 0x40 -#define CR1_STOP_REG_UPDATE (1 << 7) - -/* Receive frame status bits */ - -#define RX_RCLD (1 << 0) -#define RX_IA_MATCH (1 << 1) -#define RX_NO_AD_MATCH (1 << 2) -#define RX_NO_SFD (1 << 3) -#define RX_SRT_FRM (1 << 7) -#define RX_OVRRUN (1 << 8) -#define RX_ALG_ERR (1 << 10) -#define RX_CRC_ERR (1 << 11) -#define RX_LEN_ERR (1 << 12) -#define RX_RCV_OK (1 << 13) -#define RX_TYP_LEN (1 << 15) - -/* Transmit status bits */ - -#define TX_NCOL_MASK 0x0f -#define TX_FRTL (1 << 4) -#define TX_MAX_COL (1 << 5) -#define TX_HRT_BEAT (1 << 6) -#define TX_DEFER (1 << 7) -#define TX_UND_RUN (1 << 8) -#define TX_LOST_CTS (1 << 9) -#define TX_LOST_CRS (1 << 10) -#define TX_LTCOL (1 << 11) -#define TX_OK (1 << 13) -#define TX_COLL (1 << 15) - -struct i82593_conf_block { - u_char fifo_limit : 4, - forgnesi : 1, - fifo_32 : 1, - d6mod : 1, - throttle_enb : 1; - u_char throttle : 6, - cntrxint : 1, - contin : 1; - u_char addr_len : 3, - acloc : 1, - preamb_len : 2, - loopback : 2; - u_char lin_prio : 3, - tbofstop : 1, - exp_prio : 3, - bof_met : 1; - u_char : 4, - ifrm_spc : 4; - u_char : 5, - slottim_low : 3; - u_char slottim_hi : 3, - : 1, - max_retr : 4; - u_char prmisc : 1, - bc_dis : 1, - : 1, - crs_1 : 1, - nocrc_ins : 1, - crc_1632 : 1, - : 1, - crs_cdt : 1; - u_char cs_filter : 3, - crs_src : 1, - cd_filter : 3, - : 1; - u_char : 2, - min_fr_len : 6; - u_char lng_typ : 1, - lng_fld : 1, - rxcrc_xf : 1, - artx : 1, - sarec : 1, - tx_jabber : 1, /* why is this called max_len in the manual? */ - hash_1 : 1, - lbpkpol : 1; - u_char : 6, - fdx : 1, - : 1; - u_char dummy_6 : 6, /* supposed to be ones */ - mult_ia : 1, - dis_bof : 1; - u_char dummy_1 : 1, /* supposed to be one */ - tx_ifs_retrig : 2, - mc_all : 1, - rcv_mon : 2, - frag_acpt : 1, - tstrttrs : 1; - u_char fretx : 1, - runt_eop : 1, - hw_sw_pin : 1, - big_endn : 1, - syncrqs : 1, - sttlen : 1, - tx_eop : 1, - rx_eop : 1; - u_char rbuf_size : 5, - rcvstop : 1, - : 2; -}; - -#define I82593_MAX_MULTICAST_ADDRESSES 128 /* Hardware hashed filter */ - -#endif /* _I82593_H */ diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 63ab3873c5ed..b1be39c76931 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -6,6 +6,7 @@ * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi> * Copyright (c) 2005, Devicescape Software, Inc. * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net> + * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -165,8 +166,12 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2) #define IEEE80211_MAX_MESH_ID_LEN 32 +#define IEEE80211_FIRST_TSPEC_TSID 8 #define IEEE80211_NUM_TIDS 16 +/* number of user priorities 802.11 uses */ +#define IEEE80211_NUM_UPS 8 + #define IEEE80211_QOS_CTL_LEN 2 /* 1d tag mask */ #define IEEE80211_QOS_CTL_TAG1D_MASK 0x0007 @@ -838,6 +843,16 @@ enum ieee80211_vht_opmode_bits { #define WLAN_SA_QUERY_TR_ID_LEN 2 +/** + * struct ieee80211_tpc_report_ie + * + * This structure refers to "TPC Report element" + */ +struct ieee80211_tpc_report_ie { + u8 tx_power; + u8 link_margin; +} __packed; + struct ieee80211_mgmt { __le16 frame_control; __le16 duration; @@ -973,6 +988,13 @@ struct ieee80211_mgmt { u8 action_code; u8 operating_mode; } __packed vht_opmode_notif; + struct { + u8 action_code; + u8 dialog_token; + u8 tpc_elem_id; + u8 tpc_elem_length; + struct ieee80211_tpc_report_ie tpc; + } __packed tpc_report; } u; } __packed action; } u; @@ -1806,7 +1828,8 @@ enum ieee80211_eid { WLAN_EID_DMG_TSPEC = 146, WLAN_EID_DMG_AT = 147, WLAN_EID_DMG_CAP = 148, - /* 149-150 reserved for Cisco */ + /* 149 reserved for Cisco */ + WLAN_EID_CISCO_VENDOR_SPECIFIC = 150, WLAN_EID_DMG_OPERATION = 151, WLAN_EID_DMG_BSS_PARAM_CHANGE = 152, WLAN_EID_DMG_BEAM_REFINEMENT = 153, @@ -1865,6 +1888,7 @@ enum ieee80211_category { WLAN_CATEGORY_DLS = 2, WLAN_CATEGORY_BACK = 3, WLAN_CATEGORY_PUBLIC = 4, + WLAN_CATEGORY_RADIO_MEASUREMENT = 5, WLAN_CATEGORY_HT = 7, WLAN_CATEGORY_SA_QUERY = 8, WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION = 9, @@ -2378,4 +2402,51 @@ static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim, #define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024)) #define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x)) +/** + * ieee80211_action_contains_tpc - checks if the frame contains TPC element + * @skb: the skb containing the frame, length will be checked + * + * This function checks if it's either TPC report action frame or Link + * Measurement report action frame as defined in IEEE Std. 802.11-2012 8.5.2.5 + * and 8.5.7.5 accordingly. + */ +static inline bool ieee80211_action_contains_tpc(struct sk_buff *skb) +{ + struct ieee80211_mgmt *mgmt = (void *)skb->data; + + if (!ieee80211_is_action(mgmt->frame_control)) + return false; + + if (skb->len < IEEE80211_MIN_ACTION_SIZE + + sizeof(mgmt->u.action.u.tpc_report)) + return false; + + /* + * TPC report - check that: + * category = 0 (Spectrum Management) or 5 (Radio Measurement) + * spectrum management action = 3 (TPC/Link Measurement report) + * TPC report EID = 35 + * TPC report element length = 2 + * + * The spectrum management's tpc_report struct is used here both for + * parsing tpc_report and radio measurement's link measurement report + * frame, since the relevant part is identical in both frames. + */ + if (mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT && + mgmt->u.action.category != WLAN_CATEGORY_RADIO_MEASUREMENT) + return false; + + /* both spectrum mgmt and link measurement have same action code */ + if (mgmt->u.action.u.tpc_report.action_code != + WLAN_ACTION_SPCT_TPC_RPRT) + return false; + + if (mgmt->u.action.u.tpc_report.tpc_elem_id != WLAN_EID_TPC_REPORT || + mgmt->u.action.u.tpc_report.tpc_elem_length != + sizeof(struct ieee80211_tpc_report_ie)) + return false; + + return true; +} + #endif /* LINUX_IEEE80211_H */ diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h index 6b2c7cf352a5..6f6929ea8a0c 100644 --- a/include/linux/if_macvlan.h +++ b/include/linux/if_macvlan.h @@ -60,6 +60,7 @@ struct macvlan_dev { #ifdef CONFIG_NET_POLL_CONTROLLER struct netpoll *netpoll; #endif + unsigned int macaddr_count; }; static inline void macvlan_count_rx(const struct macvlan_dev *vlan, diff --git a/include/linux/igmp.h b/include/linux/igmp.h index f47550d75f85..2c677afeea47 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h @@ -39,6 +39,7 @@ static inline struct igmpv3_query * extern int sysctl_igmp_max_memberships; extern int sysctl_igmp_max_msf; +extern int sysctl_igmp_qrv; struct ip_sf_socklist { unsigned int sl_max; diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h index 4b79ffe7b188..fa76c79a52a1 100644 --- a/include/linux/iio/trigger.h +++ b/include/linux/iio/trigger.h @@ -84,10 +84,12 @@ static inline void iio_trigger_put(struct iio_trigger *trig) put_device(&trig->dev); } -static inline void iio_trigger_get(struct iio_trigger *trig) +static inline struct iio_trigger *iio_trigger_get(struct iio_trigger *trig) { get_device(&trig->dev); __module_get(trig->ops->owner); + + return trig; } /** diff --git a/include/linux/ima.h b/include/linux/ima.h index 7cf5e9b32550..120ccc53fcb7 100644 --- a/include/linux/ima.h +++ b/include/linux/ima.h @@ -15,7 +15,7 @@ struct linux_binprm; #ifdef CONFIG_IMA extern int ima_bprm_check(struct linux_binprm *bprm); -extern int ima_file_check(struct file *file, int mask); +extern int ima_file_check(struct file *file, int mask, int opened); extern void ima_file_free(struct file *file); extern int ima_file_mmap(struct file *file, unsigned long prot); extern int ima_module_check(struct file *file); @@ -27,7 +27,7 @@ static inline int ima_bprm_check(struct linux_binprm *bprm) return 0; } -static inline int ima_file_check(struct file *file, int mask) +static inline int ima_file_check(struct file *file, int mask, int opened) { return 0; } diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 2bb4c4f3531a..77fc43f8fb72 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -111,12 +111,21 @@ extern struct group_info init_groups; #ifdef CONFIG_PREEMPT_RCU #define INIT_TASK_RCU_PREEMPT(tsk) \ .rcu_read_lock_nesting = 0, \ - .rcu_read_unlock_special = 0, \ + .rcu_read_unlock_special.s = 0, \ .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \ INIT_TASK_RCU_TREE_PREEMPT() #else #define INIT_TASK_RCU_PREEMPT(tsk) #endif +#ifdef CONFIG_TASKS_RCU +#define INIT_TASK_RCU_TASKS(tsk) \ + .rcu_tasks_holdout = false, \ + .rcu_tasks_holdout_list = \ + LIST_HEAD_INIT(tsk.rcu_tasks_holdout_list), \ + .rcu_tasks_idle_cpu = -1, +#else +#define INIT_TASK_RCU_TASKS(tsk) +#endif extern struct cred init_cred; @@ -224,6 +233,7 @@ extern struct task_group root_task_group; INIT_FTRACE_GRAPH \ INIT_TRACE_RECURSION \ INIT_TASK_RCU_PREEMPT(tsk) \ + INIT_TASK_RCU_TASKS(tsk) \ INIT_CPUSET_SEQ(tsk) \ INIT_RT_MUTEXES(tsk) \ INIT_VTIME(tsk) \ diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 698ad053d064..69517a24bc50 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -193,11 +193,6 @@ extern void irq_wake_thread(unsigned int irq, void *dev_id); /* The following three functions are for the core kernel use only. */ extern void suspend_device_irqs(void); extern void resume_device_irqs(void); -#ifdef CONFIG_PM_SLEEP -extern int check_wakeup_irqs(void); -#else -static inline int check_wakeup_irqs(void) { return 0; } -#endif /** * struct irq_affinity_notify - context for notification of IRQ affinity changes diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 20f9a527922a..7b02bcc85b9e 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -80,6 +80,7 @@ enum iommu_attr { DOMAIN_ATTR_FSL_PAMU_STASH, DOMAIN_ATTR_FSL_PAMU_ENABLE, DOMAIN_ATTR_FSL_PAMUV1, + DOMAIN_ATTR_NESTING, /* two stages of translation */ DOMAIN_ATTR_MAX, }; diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 142ec544167c..2c5250222278 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -215,6 +215,11 @@ static inline int __deprecated check_region(resource_size_t s, /* Wrappers for managed devices */ struct device; + +extern int devm_request_resource(struct device *dev, struct resource *root, + struct resource *new); +extern void devm_release_resource(struct device *dev, struct resource *new); + #define devm_request_region(dev,start,n,name) \ __devm_request_region(dev, &ioport_resource, (start), (n), (name)) #define devm_request_mem_region(dev,start,n,name) \ diff --git a/include/linux/ipack.h b/include/linux/ipack.h index 1888e06ddf64..8bddc3fbdddf 100644 --- a/include/linux/ipack.h +++ b/include/linux/ipack.h @@ -172,6 +172,7 @@ struct ipack_bus_ops { * @ops: bus operations for the mezzanine drivers */ struct ipack_bus_device { + struct module *owner; struct device *parent; int slots; int bus_nr; @@ -189,7 +190,8 @@ struct ipack_bus_device { * available bus device in ipack. */ struct ipack_bus_device *ipack_bus_register(struct device *parent, int slots, - const struct ipack_bus_ops *ops); + const struct ipack_bus_ops *ops, + struct module *owner); /** * ipack_bus_unregister -- unregister an ipack bus @@ -265,3 +267,23 @@ void ipack_put_device(struct ipack_device *dev); .format = (_format), \ .vendor = (vend), \ .device = (dev) + +/** + * ipack_get_carrier - it increase the carrier ref. counter of + * the carrier module + * @dev: mezzanine device which wants to get the carrier + */ +static inline int ipack_get_carrier(struct ipack_device *dev) +{ + return try_module_get(dev->bus->owner); +} + +/** + * ipack_get_carrier - it decrease the carrier ref. counter of + * the carrier module + * @dev: mezzanine device which wants to get the carrier + */ +static inline void ipack_put_carrier(struct ipack_device *dev) +{ + module_put(dev->bus->owner); +} diff --git a/include/linux/irq.h b/include/linux/irq.h index 62af59242ddc..03f48d936f66 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -173,6 +173,7 @@ struct irq_data { * IRQD_IRQ_DISABLED - Disabled state of the interrupt * IRQD_IRQ_MASKED - Masked state of the interrupt * IRQD_IRQ_INPROGRESS - In progress state of the interrupt + * IRQD_WAKEUP_ARMED - Wakeup mode armed */ enum { IRQD_TRIGGER_MASK = 0xf, @@ -186,6 +187,7 @@ enum { IRQD_IRQ_DISABLED = (1 << 16), IRQD_IRQ_MASKED = (1 << 17), IRQD_IRQ_INPROGRESS = (1 << 18), + IRQD_WAKEUP_ARMED = (1 << 19), }; static inline bool irqd_is_setaffinity_pending(struct irq_data *d) @@ -257,6 +259,12 @@ static inline bool irqd_irq_inprogress(struct irq_data *d) return d->state_use_accessors & IRQD_IRQ_INPROGRESS; } +static inline bool irqd_is_wakeup_armed(struct irq_data *d) +{ + return d->state_use_accessors & IRQD_WAKEUP_ARMED; +} + + /* * Functions for chained handlers which can be enabled/disabled by the * standard disable_irq/enable_irq calls. Must be called with diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h index bf9422c3aefe..bf3fe719c7ce 100644 --- a/include/linux/irq_work.h +++ b/include/linux/irq_work.h @@ -39,9 +39,12 @@ bool irq_work_queue_on(struct irq_work *work, int cpu); #endif void irq_work_run(void); +void irq_work_tick(void); void irq_work_sync(struct irq_work *work); #ifdef CONFIG_IRQ_WORK +#include <asm/irq_work.h> + bool irq_work_needs_cpu(void); #else static inline bool irq_work_needs_cpu(void) { return false; } diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h index 45e2d8c15bd2..13eed92c7d24 100644 --- a/include/linux/irqchip/arm-gic.h +++ b/include/linux/irqchip/arm-gic.h @@ -21,7 +21,11 @@ #define GIC_CPU_ACTIVEPRIO 0xd0 #define GIC_CPU_IDENT 0xfc +#define GICC_ENABLE 0x1 +#define GICC_INT_PRI_THRESHOLD 0xf0 #define GICC_IAR_INT_ID_MASK 0x3ff +#define GICC_INT_SPURIOUS 1023 +#define GICC_DIS_BYPASS_MASK 0x1e0 #define GIC_DIST_CTRL 0x000 #define GIC_DIST_CTR 0x004 @@ -39,6 +43,18 @@ #define GIC_DIST_SGI_PENDING_CLEAR 0xf10 #define GIC_DIST_SGI_PENDING_SET 0xf20 +#define GICD_ENABLE 0x1 +#define GICD_DISABLE 0x0 +#define GICD_INT_ACTLOW_LVLTRIG 0x0 +#define GICD_INT_EN_CLR_X32 0xffffffff +#define GICD_INT_EN_SET_SGI 0x0000ffff +#define GICD_INT_EN_CLR_PPI 0xffff0000 +#define GICD_INT_DEF_PRI 0xa0 +#define GICD_INT_DEF_PRI_X4 ((GICD_INT_DEF_PRI << 24) |\ + (GICD_INT_DEF_PRI << 16) |\ + (GICD_INT_DEF_PRI << 8) |\ + GICD_INT_DEF_PRI) + #define GICH_HCR 0x0 #define GICH_VTR 0x4 #define GICH_VMCR 0x8 diff --git a/include/linux/irqchip/irq-omap-intc.h b/include/linux/irqchip/irq-omap-intc.h new file mode 100644 index 000000000000..e06b370cfc0d --- /dev/null +++ b/include/linux/irqchip/irq-omap-intc.h @@ -0,0 +1,32 @@ +/** + * irq-omap-intc.h - INTC Idle Functions + * + * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com + * + * Author: Felipe Balbi <balbi@ti.com> + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 of + * the License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H +#define __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H + +void omap2_init_irq(void); +void omap3_init_irq(void); +void ti81xx_init_irq(void); + +int omap_irq_pending(void); +void omap_intc_save_context(void); +void omap_intc_restore_context(void); +void omap3_intc_suspend(void); +void omap3_intc_prepare_idle(void); +void omap3_intc_resume_idle(void); + +#endif /* __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H */ diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index 472c021a2d4f..faf433af425e 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -12,6 +12,8 @@ struct irq_affinity_notify; struct proc_dir_entry; struct module; struct irq_desc; +struct irq_domain; +struct pt_regs; /** * struct irq_desc - interrupt descriptor @@ -36,6 +38,11 @@ struct irq_desc; * @threads_oneshot: bitfield to handle shared oneshot threads * @threads_active: number of irqaction threads currently running * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers + * @nr_actions: number of installed actions on this descriptor + * @no_suspend_depth: number of irqactions on a irq descriptor with + * IRQF_NO_SUSPEND set + * @force_resume_depth: number of irqactions on a irq descriptor with + * IRQF_FORCE_RESUME set * @dir: /proc/irq/ procfs entry * @name: flow handler name for /proc/interrupts output */ @@ -68,6 +75,11 @@ struct irq_desc { unsigned long threads_oneshot; atomic_t threads_active; wait_queue_head_t wait_for_threads; +#ifdef CONFIG_PM_SLEEP + unsigned int nr_actions; + unsigned int no_suspend_depth; + unsigned int force_resume_depth; +#endif #ifdef CONFIG_PROC_FS struct proc_dir_entry *dir; #endif @@ -118,6 +130,23 @@ static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *de int generic_handle_irq(unsigned int irq); +#ifdef CONFIG_HANDLE_DOMAIN_IRQ +/* + * Convert a HW interrupt number to a logical one using a IRQ domain, + * and handle the result interrupt number. Return -EINVAL if + * conversion failed. Providing a NULL domain indicates that the + * conversion has already been done. + */ +int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq, + bool lookup, struct pt_regs *regs); + +static inline int handle_domain_irq(struct irq_domain *domain, + unsigned int hwirq, struct pt_regs *regs) +{ + return __handle_domain_irq(domain, hwirq, true, regs); +} +#endif + /* Test to see if a driver has successfully requested an irq */ static inline int irq_has_action(unsigned int irq) { diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index 1f44466c1e9d..c367cbdf73ab 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h @@ -258,23 +258,11 @@ extern unsigned long preset_lpj; #define SEC_JIFFIE_SC (32 - SHIFT_HZ) #endif #define NSEC_JIFFIE_SC (SEC_JIFFIE_SC + 29) -#define USEC_JIFFIE_SC (SEC_JIFFIE_SC + 19) #define SEC_CONVERSION ((unsigned long)((((u64)NSEC_PER_SEC << SEC_JIFFIE_SC) +\ TICK_NSEC -1) / (u64)TICK_NSEC)) #define NSEC_CONVERSION ((unsigned long)((((u64)1 << NSEC_JIFFIE_SC) +\ TICK_NSEC -1) / (u64)TICK_NSEC)) -#define USEC_CONVERSION \ - ((unsigned long)((((u64)NSEC_PER_USEC << USEC_JIFFIE_SC) +\ - TICK_NSEC -1) / (u64)TICK_NSEC)) -/* - * USEC_ROUND is used in the timeval to jiffie conversion. See there - * for more details. It is the scaled resolution rounding value. Note - * that it is a 64-bit value. Since, when it is applied, we are already - * in jiffies (albit scaled), it is nothing but the bits we will shift - * off. - */ -#define USEC_ROUND (u64)(((u64)1 << USEC_JIFFIE_SC) - 1) /* * The maximum jiffie value is (MAX_INT >> 1). Here we translate that * into seconds. The 64-bit case will overflow if we are not careful, diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 784304b222b3..98f923b6a0ea 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -8,28 +8,28 @@ * Copyright (C) 2011-2012 Peter Zijlstra <pzijlstr@redhat.com> * * Jump labels provide an interface to generate dynamic branches using - * self-modifying code. Assuming toolchain and architecture support the result - * of a "if (static_key_false(&key))" statement is a unconditional branch (which + * self-modifying code. Assuming toolchain and architecture support, the result + * of a "if (static_key_false(&key))" statement is an unconditional branch (which * defaults to false - and the true block is placed out of line). * * However at runtime we can change the branch target using * static_key_slow_{inc,dec}(). These function as a 'reference' count on the key - * object and for as long as there are references all branches referring to + * object, and for as long as there are references all branches referring to * that particular key will point to the (out of line) true block. * - * Since this relies on modifying code the static_key_slow_{inc,dec}() functions + * Since this relies on modifying code, the static_key_slow_{inc,dec}() functions * must be considered absolute slow paths (machine wide synchronization etc.). - * OTOH, since the affected branches are unconditional their runtime overhead + * OTOH, since the affected branches are unconditional, their runtime overhead * will be absolutely minimal, esp. in the default (off) case where the total * effect is a single NOP of appropriate size. The on case will patch in a jump * to the out-of-line block. * - * When the control is directly exposed to userspace it is prudent to delay the + * When the control is directly exposed to userspace, it is prudent to delay the * decrement to avoid high frequency code modifications which can (and do) * cause significant performance degradation. Struct static_key_deferred and * static_key_slow_dec_deferred() provide for this. * - * Lacking toolchain and or architecture support, it falls back to a simple + * Lacking toolchain and or architecture support, jump labels fall back to a simple * conditional branch. * * struct static_key my_key = STATIC_KEY_INIT_TRUE; @@ -43,8 +43,7 @@ * * Not initializing the key (static data is initialized to 0s anyway) is the * same as using STATIC_KEY_INIT_FALSE. - * -*/ + */ #include <linux/types.h> #include <linux/compiler.h> diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 95624bed87ef..35c8ffb0136f 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -496,6 +496,7 @@ static inline char *hex_byte_pack_upper(char *buf, u8 byte) extern int hex_to_bin(char ch); extern int __must_check hex2bin(u8 *dst, const char *src, size_t count); +extern char *bin2hex(char *dst, const void *src, size_t count); bool mac_pton(const char *s, u8 *mac); @@ -715,23 +716,8 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } (void) (&_max1 == &_max2); \ _max1 > _max2 ? _max1 : _max2; }) -#define min3(x, y, z) ({ \ - typeof(x) _min1 = (x); \ - typeof(y) _min2 = (y); \ - typeof(z) _min3 = (z); \ - (void) (&_min1 == &_min2); \ - (void) (&_min1 == &_min3); \ - _min1 < _min2 ? (_min1 < _min3 ? _min1 : _min3) : \ - (_min2 < _min3 ? _min2 : _min3); }) - -#define max3(x, y, z) ({ \ - typeof(x) _max1 = (x); \ - typeof(y) _max2 = (y); \ - typeof(z) _max3 = (z); \ - (void) (&_max1 == &_max2); \ - (void) (&_max1 == &_max3); \ - _max1 > _max2 ? (_max1 > _max3 ? _max1 : _max3) : \ - (_max2 > _max3 ? _max2 : _max3); }) +#define min3(x, y, z) min((typeof(x))min(x, y), z) +#define max3(x, y, z) max((typeof(x))max(x, y), z) /** * min_not_zero - return the minimum that is _not_ zero, unless both are zero @@ -746,20 +732,13 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } /** * clamp - return a value clamped to a given range with strict typechecking * @val: current value - * @min: minimum allowable value - * @max: maximum allowable value + * @lo: lowest allowable value + * @hi: highest allowable value * - * This macro does strict typechecking of min/max to make sure they are of the + * This macro does strict typechecking of lo/hi to make sure they are of the * same type as val. See the unnecessary pointer comparisons. */ -#define clamp(val, min, max) ({ \ - typeof(val) __val = (val); \ - typeof(min) __min = (min); \ - typeof(max) __max = (max); \ - (void) (&__val == &__min); \ - (void) (&__val == &__max); \ - __val = __val < __min ? __min: __val; \ - __val > __max ? __max: __val; }) +#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi) /* * ..and if you can't take the strict @@ -781,36 +760,26 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } * clamp_t - return a value clamped to a given range using a given type * @type: the type of variable to use * @val: current value - * @min: minimum allowable value - * @max: maximum allowable value + * @lo: minimum allowable value + * @hi: maximum allowable value * * This macro does no typechecking and uses temporary variables of type * 'type' to make all the comparisons. */ -#define clamp_t(type, val, min, max) ({ \ - type __val = (val); \ - type __min = (min); \ - type __max = (max); \ - __val = __val < __min ? __min: __val; \ - __val > __max ? __max: __val; }) +#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi) /** * clamp_val - return a value clamped to a given range using val's type * @val: current value - * @min: minimum allowable value - * @max: maximum allowable value + * @lo: minimum allowable value + * @hi: maximum allowable value * * This macro does no typechecking and uses temporary variables of whatever * type the input argument 'val' is. This is useful when val is an unsigned * type and min and max are literals that will otherwise be assigned a signed * integer type. */ -#define clamp_val(val, min, max) ({ \ - typeof(val) __val = (val); \ - typeof(val) __min = (min); \ - typeof(val) __max = (max); \ - __val = __val < __min ? __min: __val; \ - __val > __max ? __max: __val; }) +#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi) /* diff --git a/include/linux/key-type.h b/include/linux/key-type.h index 44792ee649de..ff9f1d394235 100644 --- a/include/linux/key-type.h +++ b/include/linux/key-type.h @@ -53,6 +53,24 @@ typedef int (*request_key_actor_t)(struct key_construction *key, const char *op, void *aux); /* + * Preparsed matching criterion. + */ +struct key_match_data { + /* Comparison function, defaults to exact description match, but can be + * overridden by type->match_preparse(). Should return true if a match + * is found and false if not. + */ + bool (*cmp)(const struct key *key, + const struct key_match_data *match_data); + + const void *raw_data; /* Raw match data */ + void *preparsed; /* For ->match_preparse() to stash stuff */ + unsigned lookup_type; /* Type of lookup for this search. */ +#define KEYRING_SEARCH_LOOKUP_DIRECT 0x0000 /* Direct lookup by description. */ +#define KEYRING_SEARCH_LOOKUP_ITERATE 0x0001 /* Iterative search. */ +}; + +/* * kernel managed key type definition */ struct key_type { @@ -65,11 +83,6 @@ struct key_type { */ size_t def_datalen; - /* Default key search algorithm. */ - unsigned def_lookup_type; -#define KEYRING_SEARCH_LOOKUP_DIRECT 0x0000 /* Direct lookup by description. */ -#define KEYRING_SEARCH_LOOKUP_ITERATE 0x0001 /* Iterative search. */ - /* vet a description */ int (*vet_description)(const char *description); @@ -96,8 +109,15 @@ struct key_type { */ int (*update)(struct key *key, struct key_preparsed_payload *prep); - /* match a key against a description */ - int (*match)(const struct key *key, const void *desc); + /* Preparse the data supplied to ->match() (optional). The + * data to be preparsed can be found in match_data->raw_data. + * The lookup type can also be set by this function. + */ + int (*match_preparse)(struct key_match_data *match_data); + + /* Free preparsed match data (optional). This should be supplied it + * ->match_preparse() is supplied. */ + void (*match_free)(struct key_match_data *match_data); /* clear some of the data from a key on revokation (optional) * - the key's semaphore will be write-locked by the caller diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h index 554fde3a3927..473b43678ad1 100644 --- a/include/linux/kfifo.h +++ b/include/linux/kfifo.h @@ -722,7 +722,7 @@ __kfifo_uint_must_check_helper( \ /** * kfifo_dma_out_finish - finish a DMA OUT operation * @fifo: address of the fifo to be used - * @len: number of bytes transferrd + * @len: number of bytes transferred * * This macro finish a DMA OUT operation. The out counter will be updated by * the len parameter. No error checking will be done. diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index a4c33b34fe3f..28be31f49250 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -136,12 +136,11 @@ static inline bool is_error_page(struct page *page) #define KVM_REQ_GLOBAL_CLOCK_UPDATE 22 #define KVM_REQ_ENABLE_IBS 23 #define KVM_REQ_DISABLE_IBS 24 +#define KVM_REQ_APIC_PAGE_RELOAD 25 #define KVM_USERSPACE_IRQ_SOURCE_ID 0 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 -struct kvm; -struct kvm_vcpu; extern struct kmem_cache *kvm_vcpu_cache; extern spinlock_t kvm_lock; @@ -200,6 +199,17 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva, int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); #endif +/* + * Carry out a gup that requires IO. Allow the mm to relinquish the mmap + * semaphore if the filemap/swap has to wait on a page lock. pagep == NULL + * controls whether we retry the gup one more time to completion in that case. + * Typically this is called after a FAULT_FLAG_RETRY_NOWAIT in the main tdp + * handler. + */ +int kvm_get_user_page_io(struct task_struct *tsk, struct mm_struct *mm, + unsigned long addr, bool write_fault, + struct page **pagep); + enum { OUTSIDE_GUEST_MODE, IN_GUEST_MODE, @@ -325,8 +335,6 @@ struct kvm_kernel_irq_routing_entry { struct hlist_node link; }; -struct kvm_irq_routing_table; - #ifndef KVM_PRIVATE_MEM_SLOTS #define KVM_PRIVATE_MEM_SLOTS 0 #endif @@ -528,6 +536,8 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable); unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn); +unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn, + bool *writable); void kvm_release_page_clean(struct page *page); void kvm_release_page_dirty(struct page *page); void kvm_set_page_accessed(struct page *page); @@ -579,6 +589,7 @@ void kvm_flush_remote_tlbs(struct kvm *kvm); void kvm_reload_remote_mmus(struct kvm *kvm); void kvm_make_mclock_inprogress_request(struct kvm *kvm); void kvm_make_scan_ioapic_request(struct kvm *kvm); +bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg); @@ -624,6 +635,8 @@ void kvm_arch_exit(void); int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu); +void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu); + void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); @@ -632,8 +645,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu); int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); -int kvm_arch_hardware_enable(void *garbage); -void kvm_arch_hardware_disable(void *garbage); +int kvm_arch_hardware_enable(void); +void kvm_arch_hardware_disable(void); int kvm_arch_hardware_setup(void); void kvm_arch_hardware_unsetup(void); void kvm_arch_check_processor_compat(void *rtn); @@ -1034,8 +1047,6 @@ static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) extern bool kvm_rebooting; -struct kvm_device_ops; - struct kvm_device { struct kvm_device_ops *ops; struct kvm *kvm; @@ -1068,12 +1079,10 @@ struct kvm_device_ops { void kvm_device_get(struct kvm_device *dev); void kvm_device_put(struct kvm_device *dev); struct kvm_device *kvm_device_from_filp(struct file *filp); +int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type); extern struct kvm_device_ops kvm_mpic_ops; extern struct kvm_device_ops kvm_xics_ops; -extern struct kvm_device_ops kvm_vfio_ops; -extern struct kvm_device_ops kvm_arm_vgic_v2_ops; -extern struct kvm_device_ops kvm_flic_ops; #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index b0bcce0ddc95..b606bb689a3e 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -17,6 +17,20 @@ #ifndef __KVM_TYPES_H__ #define __KVM_TYPES_H__ +struct kvm; +struct kvm_async_pf; +struct kvm_device_ops; +struct kvm_interrupt; +struct kvm_irq_routing_table; +struct kvm_memory_slot; +struct kvm_one_reg; +struct kvm_run; +struct kvm_userspace_memory_region; +struct kvm_vcpu; +struct kvm_vcpu_init; + +enum kvm_mr_change; + #include <asm/types.h> /* diff --git a/include/linux/libata.h b/include/linux/libata.h index 92abb497ab14..bd5fefeaf548 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -1404,14 +1404,14 @@ static inline int sata_srst_pmp(struct ata_link *link) * printk helpers */ __printf(3, 4) -int ata_port_printk(const struct ata_port *ap, const char *level, - const char *fmt, ...); +void ata_port_printk(const struct ata_port *ap, const char *level, + const char *fmt, ...); __printf(3, 4) -int ata_link_printk(const struct ata_link *link, const char *level, - const char *fmt, ...); +void ata_link_printk(const struct ata_link *link, const char *level, + const char *fmt, ...); __printf(3, 4) -int ata_dev_printk(const struct ata_device *dev, const char *level, - const char *fmt, ...); +void ata_dev_printk(const struct ata_device *dev, const char *level, + const char *fmt, ...); #define ata_port_err(ap, fmt, ...) \ ata_port_printk(ap, KERN_ERR, fmt, ##__VA_ARGS__) diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index 219d79627c05..ff82a32871b5 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h @@ -178,7 +178,6 @@ struct nlm_block { unsigned char b_granted; /* VFS granted lock */ struct nlm_file * b_file; /* file in question */ struct cache_req * b_cache_req; /* deferred request handling */ - struct file_lock * b_fl; /* set for GETLK */ struct cache_deferred_req * b_deferred_req; unsigned int b_flags; /* block flags */ #define B_QUEUED 1 /* lock queued */ diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 008388f920d7..74ab23176e9b 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -4,7 +4,7 @@ * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> * - * see Documentation/lockdep-design.txt for more details. + * see Documentation/locking/lockdep-design.txt for more details. */ #ifndef __LINUX_LOCKDEP_H #define __LINUX_LOCKDEP_H @@ -362,6 +362,10 @@ extern void lockdep_trace_alloc(gfp_t mask); WARN_ON(debug_locks && !lockdep_is_held(l)); \ } while (0) +#define lockdep_assert_held_once(l) do { \ + WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \ + } while (0) + #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) #else /* !CONFIG_LOCKDEP */ @@ -412,6 +416,7 @@ struct lock_class_key { }; #define lockdep_depth(tsk) (0) #define lockdep_assert_held(l) do { (void)(l); } while (0) +#define lockdep_assert_held_once(l) do { (void)(l); } while (0) #define lockdep_recursing(tsk) (0) @@ -505,6 +510,7 @@ static inline void print_irqtrace_events(struct task_struct *curr) #define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_) #define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_) +#define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_) #define lock_map_release(l) lock_release(l, 1, _THIS_IP_) #ifdef CONFIG_PROVE_LOCKING diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h index d14af7b722ef..164aad1f9f12 100644 --- a/include/linux/mei_cl_bus.h +++ b/include/linux/mei_cl_bus.h @@ -3,6 +3,7 @@ #include <linux/device.h> #include <linux/uuid.h> +#include <linux/mod_devicetable.h> struct mei_cl_device; diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index e0752d204d9e..19df5d857411 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -440,11 +440,6 @@ void __memcg_kmem_uncharge_pages(struct page *page, int order); int memcg_cache_id(struct mem_cgroup *memcg); -int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s, - struct kmem_cache *root_cache); -void memcg_free_cache_params(struct kmem_cache *s); - -int memcg_update_cache_size(struct kmem_cache *s, int num_groups); void memcg_update_array_size(int num_groups); struct kmem_cache * @@ -574,16 +569,6 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg) return -1; } -static inline int memcg_alloc_cache_params(struct mem_cgroup *memcg, - struct kmem_cache *s, struct kmem_cache *root_cache) -{ - return 0; -} - -static inline void memcg_free_cache_params(struct kmem_cache *s) -{ -} - static inline struct kmem_cache * memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) { diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index d9524c49d767..8f1a41951df9 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -84,6 +84,7 @@ extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages); extern int add_one_highpage(struct page *page, int pfn, int bad_ppro); /* VM interface that may be used by firmware interface */ extern int online_pages(unsigned long, unsigned long, int); +extern int test_pages_in_a_zone(unsigned long, unsigned long); extern void __offline_isolated_pages(unsigned long, unsigned long); typedef void (*online_page_callback_t)(struct page *page); diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index f230a978e6ba..3d385c81c153 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -134,9 +134,10 @@ void mpol_free_shared_policy(struct shared_policy *p); struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx); -struct mempolicy *get_vma_policy(struct task_struct *tsk, - struct vm_area_struct *vma, unsigned long addr); -bool vma_policy_mof(struct task_struct *task, struct vm_area_struct *vma); +struct mempolicy *get_task_policy(struct task_struct *p); +struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, + unsigned long addr); +bool vma_policy_mof(struct vm_area_struct *vma); extern void numa_default_policy(void); extern void numa_policy_init(void); diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h index c466ff3e16b8..d0e578fd7053 100644 --- a/include/linux/mfd/max77693-private.h +++ b/include/linux/mfd/max77693-private.h @@ -251,6 +251,15 @@ enum max77693_haptic_reg { MAX77693_HAPTIC_REG_END, }; +/* max77693-pmic LSCNFG configuraton register */ +#define MAX77693_PMIC_LOW_SYS_MASK 0x80 +#define MAX77693_PMIC_LOW_SYS_SHIFT 7 + +/* max77693-haptic configuration register */ +#define MAX77693_CONFIG2_MODE 7 +#define MAX77693_CONFIG2_MEN 6 +#define MAX77693_CONFIG2_HTYP 5 + enum max77693_irq_source { LED_INT = 0, TOPSYS_INT, diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h index b5f73de81aad..1825edacbda7 100644 --- a/include/linux/mfd/samsung/core.h +++ b/include/linux/mfd/samsung/core.h @@ -14,6 +14,27 @@ #ifndef __LINUX_MFD_SEC_CORE_H #define __LINUX_MFD_SEC_CORE_H +/* Macros to represent minimum voltages for LDO/BUCK */ +#define MIN_3000_MV 3000000 +#define MIN_2500_MV 2500000 +#define MIN_2000_MV 2000000 +#define MIN_1800_MV 1800000 +#define MIN_1500_MV 1500000 +#define MIN_1400_MV 1400000 +#define MIN_1000_MV 1000000 + +#define MIN_900_MV 900000 +#define MIN_850_MV 850000 +#define MIN_800_MV 800000 +#define MIN_750_MV 750000 +#define MIN_600_MV 600000 + +/* Macros to represent steps for LDO/BUCK */ +#define STEP_50_MV 50000 +#define STEP_25_MV 25000 +#define STEP_12_5_MV 12500 +#define STEP_6_25_MV 6250 + enum sec_device_type { S5M8751X, S5M8763X, diff --git a/include/linux/mfd/samsung/s2mpa01.h b/include/linux/mfd/samsung/s2mpa01.h index fbc63bc0d6a2..2766108bca2f 100644 --- a/include/linux/mfd/samsung/s2mpa01.h +++ b/include/linux/mfd/samsung/s2mpa01.h @@ -155,18 +155,6 @@ enum s2mpa01_regulators { S2MPA01_REGULATOR_MAX, }; -#define S2MPA01_BUCK_MIN1 600000 -#define S2MPA01_BUCK_MIN2 800000 -#define S2MPA01_BUCK_MIN3 1000000 -#define S2MPA01_BUCK_MIN4 1500000 -#define S2MPA01_LDO_MIN 800000 - -#define S2MPA01_BUCK_STEP1 6250 -#define S2MPA01_BUCK_STEP2 12500 - -#define S2MPA01_LDO_STEP1 50000 -#define S2MPA01_LDO_STEP2 25000 - #define S2MPA01_LDO_VSEL_MASK 0x3F #define S2MPA01_BUCK_VSEL_MASK 0xFF #define S2MPA01_ENABLE_MASK (0x03 << S2MPA01_ENABLE_SHIFT) diff --git a/include/linux/mfd/samsung/s2mps11.h b/include/linux/mfd/samsung/s2mps11.h index b3ddf98dec37..7981a9d77d3f 100644 --- a/include/linux/mfd/samsung/s2mps11.h +++ b/include/linux/mfd/samsung/s2mps11.h @@ -171,15 +171,6 @@ enum s2mps11_regulators { S2MPS11_REGULATOR_MAX, }; -#define S2MPS11_BUCK_MIN1 600000 -#define S2MPS11_BUCK_MIN2 750000 -#define S2MPS11_BUCK_MIN3 3000000 -#define S2MPS11_LDO_MIN 800000 -#define S2MPS11_BUCK_STEP1 6250 -#define S2MPS11_BUCK_STEP2 12500 -#define S2MPS11_BUCK_STEP3 25000 -#define S2MPS11_LDO_STEP1 50000 -#define S2MPS11_LDO_STEP2 25000 #define S2MPS11_LDO_VSEL_MASK 0x3F #define S2MPS11_BUCK_VSEL_MASK 0xFF #define S2MPS11_ENABLE_MASK (0x03 << S2MPS11_ENABLE_SHIFT) diff --git a/include/linux/mfd/samsung/s2mps14.h b/include/linux/mfd/samsung/s2mps14.h index 900cd7a04314..c92f4782afb5 100644 --- a/include/linux/mfd/samsung/s2mps14.h +++ b/include/linux/mfd/samsung/s2mps14.h @@ -123,10 +123,6 @@ enum s2mps14_regulators { }; /* Regulator constraints for BUCKx */ -#define S2MPS14_BUCK1235_MIN_600MV 600000 -#define S2MPS14_BUCK4_MIN_1400MV 1400000 -#define S2MPS14_BUCK1235_STEP_6_25MV 6250 -#define S2MPS14_BUCK4_STEP_12_5MV 12500 #define S2MPS14_BUCK1235_START_SEL 0x20 #define S2MPS14_BUCK4_START_SEL 0x40 /* @@ -136,12 +132,6 @@ enum s2mps14_regulators { */ #define S2MPS14_BUCK_RAMP_DELAY 12500 -/* Regulator constraints for different types of LDOx */ -#define S2MPS14_LDO_MIN_800MV 800000 -#define S2MPS14_LDO_MIN_1800MV 1800000 -#define S2MPS14_LDO_STEP_12_5MV 12500 -#define S2MPS14_LDO_STEP_25MV 25000 - #define S2MPS14_LDO_VSEL_MASK 0x3F #define S2MPS14_BUCK_VSEL_MASK 0xFF #define S2MPS14_ENABLE_MASK (0x03 << S2MPS14_ENABLE_SHIFT) diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h index 8f6f2e91e7ae..57388171610d 100644 --- a/include/linux/mfd/tmio.h +++ b/include/linux/mfd/tmio.h @@ -5,6 +5,7 @@ #include <linux/fb.h> #include <linux/io.h> #include <linux/jiffies.h> +#include <linux/mmc/card.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> @@ -83,6 +84,27 @@ */ #define TMIO_MMC_HAVE_HIGH_REG (1 << 6) +/* + * Some controllers have CMD12 automatically + * issue/non-issue register + */ +#define TMIO_MMC_HAVE_CMD12_CTRL (1 << 7) + +/* + * Some controllers needs to set 1 on SDIO status reserved bits + */ +#define TMIO_MMC_SDIO_STATUS_QUIRK (1 << 8) + +/* + * Some controllers have DMA enable/disable register + */ +#define TMIO_MMC_HAVE_CTL_DMA_REG (1 << 9) + +/* + * Some controllers allows to set SDx actual clock + */ +#define TMIO_MMC_CLK_ACTUAL (1 << 10) + int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base); int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base); void tmio_core_mmc_pwr(void __iomem *cnf, int shift, int state); @@ -96,6 +118,7 @@ struct tmio_mmc_dma { int slave_id_tx; int slave_id_rx; int alignment_shift; + dma_addr_t dma_rx_offset; bool (*filter)(struct dma_chan *chan, void *arg); }; @@ -120,6 +143,8 @@ struct tmio_mmc_data { /* clock management callbacks */ int (*clk_enable)(struct platform_device *pdev, unsigned int *f); void (*clk_disable)(struct platform_device *pdev); + int (*multi_io_quirk)(struct mmc_card *card, + unsigned int direction, int blk_size); }; /* diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h index 2e5b194b9b19..53d33dee70e1 100644 --- a/include/linux/micrel_phy.h +++ b/include/linux/micrel_phy.h @@ -37,6 +37,7 @@ /* struct phy_device dev_flags definitions */ #define MICREL_PHY_50MHZ_CLK 0x00000001 +#define MICREL_PHY_25MHZ_CLK 0x00000002 #define MICREL_KSZ9021_EXTREG_CTRL 0xB #define MICREL_KSZ9021_EXTREG_DATA_WRITE 0xC diff --git a/include/linux/migrate.h b/include/linux/migrate.h index a2901c414664..01aad3ed89ec 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -13,18 +13,9 @@ typedef void free_page_t(struct page *page, unsigned long private); * Return values from addresss_space_operations.migratepage(): * - negative errno on page migration failure; * - zero on page migration success; - * - * The balloon page migration introduces this special case where a 'distinct' - * return code is used to flag a successful page migration to unmap_and_move(). - * This approach is necessary because page migration can race against balloon - * deflation procedure, and for such case we could introduce a nasty page leak - * if a successfully migrated balloon page gets released concurrently with - * migration's unmap_and_move() wrap-up steps. */ #define MIGRATEPAGE_SUCCESS 0 -#define MIGRATEPAGE_BALLOON_SUCCESS 1 /* special ret code for balloon page - * sucessful migration case. - */ + enum migrate_reason { MR_COMPACTION, MR_MEMORY_FAILURE, @@ -82,9 +73,6 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping, return -ENOSYS; } -/* Possible settings for the migrate_page() method in address_operations */ -#define migrate_page NULL - #endif /* CONFIG_MIGRATION */ #ifdef CONFIG_NUMA_BALANCING diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 071f6b234604..37e4404d0227 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -38,6 +38,7 @@ #include <linux/completion.h> #include <linux/radix-tree.h> #include <linux/cpu_rmap.h> +#include <linux/crash_dump.h> #include <linux/atomic.h> @@ -184,19 +185,24 @@ enum { MLX4_DEV_CAP_FLAG2_DMFS_IPOIB = 1LL << 9, MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS = 1LL << 10, MLX4_DEV_CAP_FLAG2_MAD_DEMUX = 1LL << 11, + MLX4_DEV_CAP_FLAG2_CQE_STRIDE = 1LL << 12, + MLX4_DEV_CAP_FLAG2_EQE_STRIDE = 1LL << 13 }; enum { MLX4_DEV_CAP_64B_EQE_ENABLED = 1LL << 0, - MLX4_DEV_CAP_64B_CQE_ENABLED = 1LL << 1 + MLX4_DEV_CAP_64B_CQE_ENABLED = 1LL << 1, + MLX4_DEV_CAP_CQE_STRIDE_ENABLED = 1LL << 2, + MLX4_DEV_CAP_EQE_STRIDE_ENABLED = 1LL << 3 }; enum { - MLX4_USER_DEV_CAP_64B_CQE = 1L << 0 + MLX4_USER_DEV_CAP_LARGE_CQE = 1L << 0 }; enum { - MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0 + MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0, + MLX4_FUNC_CAP_EQE_CQE_STRIDE = 1L << 1 }; @@ -209,6 +215,7 @@ enum { MLX4_BMME_FLAG_TYPE_2_WIN = 1 << 9, MLX4_BMME_FLAG_RESERVED_LKEY = 1 << 10, MLX4_BMME_FLAG_FAST_REG_WR = 1 << 11, + MLX4_BMME_FLAG_VSD_INIT2RTR = 1 << 28, }; enum mlx4_event { @@ -576,7 +583,7 @@ struct mlx4_uar { }; struct mlx4_bf { - unsigned long offset; + unsigned int offset; int buf_size; struct mlx4_uar *uar; void __iomem *reg; @@ -700,6 +707,7 @@ struct mlx4_dev { u64 regid_promisc_array[MLX4_MAX_PORTS + 1]; u64 regid_allmulti_array[MLX4_MAX_PORTS + 1]; struct mlx4_vf_dev *dev_vfs; + int nvfs[MLX4_MAX_PORTS + 1]; }; struct mlx4_eqe { @@ -1196,6 +1204,9 @@ int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id); int mlx4_hw_rule_sz(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id); +int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr, + int port, int qpn, u16 prio, u64 *reg_id); + void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, int i, int val); @@ -1275,7 +1286,7 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, /* Returns true if running in low memory profile (kdump kernel) */ static inline bool mlx4_low_memory_profile(void) { - return reset_devices; + return is_kdump_kernel(); } #endif /* MLX4_DEVICE_H */ diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index 7040dc98ff8b..5f4e36cf0091 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h @@ -56,7 +56,8 @@ enum mlx4_qp_optpar { MLX4_QP_OPTPAR_RNR_RETRY = 1 << 13, MLX4_QP_OPTPAR_ACK_TIMEOUT = 1 << 14, MLX4_QP_OPTPAR_SCHED_QUEUE = 1 << 16, - MLX4_QP_OPTPAR_COUNTER_INDEX = 1 << 20 + MLX4_QP_OPTPAR_COUNTER_INDEX = 1 << 20, + MLX4_QP_OPTPAR_VLAN_STRIPPING = 1 << 21, }; enum mlx4_qp_state { @@ -423,13 +424,20 @@ struct mlx4_wqe_inline_seg { enum mlx4_update_qp_attr { MLX4_UPDATE_QP_SMAC = 1 << 0, + MLX4_UPDATE_QP_VSD = 1 << 2, + MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 2) - 1 +}; + +enum mlx4_update_qp_params_flags { + MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE = 1 << 0, }; struct mlx4_update_qp_params { u8 smac_index; + u32 flags; }; -int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp, +int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, enum mlx4_update_qp_attr attr, struct mlx4_update_qp_params *params); int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 334947151dfc..1d67fd32e71c 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -44,6 +44,50 @@ #error Host endianness not defined #endif +/* helper macros */ +#define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0) +#define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld) +#define __mlx5_bit_off(typ, fld) ((unsigned)(unsigned long)(&(__mlx5_nullp(typ)->fld))) +#define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32) +#define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64) +#define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0x1f)) +#define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1)) +#define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << __mlx5_dw_bit_off(typ, fld)) +#define __mlx5_st_sz_bits(typ) sizeof(struct mlx5_ifc_##typ##_bits) + +#define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8) +#define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8) +#define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32) +#define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8) +#define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld)) + +/* insert a value to a struct */ +#define MLX5_SET(typ, p, fld, v) do { \ + BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \ + *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \ + cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \ + (~__mlx5_dw_mask(typ, fld))) | (((v) & __mlx5_mask(typ, fld)) \ + << __mlx5_dw_bit_off(typ, fld))); \ +} while (0) + +#define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\ +__mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \ +__mlx5_mask(typ, fld)) + +#define MLX5_GET_PR(typ, p, fld) ({ \ + u32 ___t = MLX5_GET(typ, p, fld); \ + pr_debug(#fld " = 0x%x\n", ___t); \ + ___t; \ +}) + +#define MLX5_SET64(typ, p, fld, v) do { \ + BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) != 64); \ + BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \ + *((__be64 *)(p) + __mlx5_64_off(typ, fld)) = cpu_to_be64(v); \ +} while (0) + +#define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld))) + enum { MLX5_MAX_COMMANDS = 32, MLX5_CMD_DATA_BLOCK_SIZE = 512, @@ -71,6 +115,11 @@ enum { }; enum { + MLX5_MIN_PKEY_TABLE_SIZE = 128, + MLX5_MAX_LOG_PKEY_TABLE = 5, +}; + +enum { MLX5_PERM_LOCAL_READ = 1 << 2, MLX5_PERM_LOCAL_WRITE = 1 << 3, MLX5_PERM_REMOTE_READ = 1 << 4, @@ -184,10 +233,10 @@ enum { MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29, MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30, MLX5_DEV_CAP_FLAG_RESIZE_SRQ = 1LL << 32, + MLX5_DEV_CAP_FLAG_DCT = 1LL << 37, MLX5_DEV_CAP_FLAG_REMOTE_FENCE = 1LL << 38, MLX5_DEV_CAP_FLAG_TLP_HINTS = 1LL << 39, MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40, - MLX5_DEV_CAP_FLAG_DCT = 1LL << 41, MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46, }; @@ -243,10 +292,14 @@ enum { }; enum { - MLX5_CAP_OFF_DCT = 41, MLX5_CAP_OFF_CMDIF_CSUM = 46, }; +enum { + HCA_CAP_OPMOD_GET_MAX = 0, + HCA_CAP_OPMOD_GET_CUR = 1, +}; + struct mlx5_inbox_hdr { __be16 opcode; u8 rsvd[4]; @@ -274,101 +327,6 @@ struct mlx5_cmd_query_adapter_mbox_out { u8 vsd_psid[16]; }; -struct mlx5_hca_cap { - u8 rsvd1[16]; - u8 log_max_srq_sz; - u8 log_max_qp_sz; - u8 rsvd2; - u8 log_max_qp; - u8 log_max_strq_sz; - u8 log_max_srqs; - u8 rsvd4[2]; - u8 rsvd5; - u8 log_max_cq_sz; - u8 rsvd6; - u8 log_max_cq; - u8 log_max_eq_sz; - u8 log_max_mkey; - u8 rsvd7; - u8 log_max_eq; - u8 max_indirection; - u8 log_max_mrw_sz; - u8 log_max_bsf_list_sz; - u8 log_max_klm_list_sz; - u8 rsvd_8_0; - u8 log_max_ra_req_dc; - u8 rsvd_8_1; - u8 log_max_ra_res_dc; - u8 rsvd9; - u8 log_max_ra_req_qp; - u8 rsvd10; - u8 log_max_ra_res_qp; - u8 rsvd11[4]; - __be16 max_qp_count; - __be16 rsvd12; - u8 rsvd13; - u8 local_ca_ack_delay; - u8 rsvd14; - u8 num_ports; - u8 log_max_msg; - u8 rsvd15[3]; - __be16 stat_rate_support; - u8 rsvd16[2]; - __be64 flags; - u8 rsvd17; - u8 uar_sz; - u8 rsvd18; - u8 log_pg_sz; - __be16 bf_log_bf_reg_size; - u8 rsvd19[4]; - __be16 max_desc_sz_sq; - u8 rsvd20[2]; - __be16 max_desc_sz_rq; - u8 rsvd21[2]; - __be16 max_desc_sz_sq_dc; - __be32 max_qp_mcg; - u8 rsvd22[3]; - u8 log_max_mcg; - u8 rsvd23; - u8 log_max_pd; - u8 rsvd24; - u8 log_max_xrcd; - u8 rsvd25[42]; - __be16 log_uar_page_sz; - u8 rsvd26[28]; - u8 log_max_atomic_size_qp; - u8 rsvd27[2]; - u8 log_max_atomic_size_dc; - u8 rsvd28[76]; -}; - - -struct mlx5_cmd_query_hca_cap_mbox_in { - struct mlx5_inbox_hdr hdr; - u8 rsvd[8]; -}; - - -struct mlx5_cmd_query_hca_cap_mbox_out { - struct mlx5_outbox_hdr hdr; - u8 rsvd0[8]; - struct mlx5_hca_cap hca_cap; -}; - - -struct mlx5_cmd_set_hca_cap_mbox_in { - struct mlx5_inbox_hdr hdr; - u8 rsvd[8]; - struct mlx5_hca_cap hca_cap; -}; - - -struct mlx5_cmd_set_hca_cap_mbox_out { - struct mlx5_outbox_hdr hdr; - u8 rsvd0[8]; -}; - - struct mlx5_cmd_init_hca_mbox_in { struct mlx5_inbox_hdr hdr; u8 rsvd0[2]; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index b88e9b46d957..246310dc8bef 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -44,6 +44,7 @@ #include <linux/mlx5/device.h> #include <linux/mlx5/doorbell.h> +#include <linux/mlx5/mlx5_ifc.h> enum { MLX5_BOARD_ID_LEN = 64, @@ -99,81 +100,6 @@ enum { }; enum { - MLX5_CMD_OP_QUERY_HCA_CAP = 0x100, - MLX5_CMD_OP_QUERY_ADAPTER = 0x101, - MLX5_CMD_OP_INIT_HCA = 0x102, - MLX5_CMD_OP_TEARDOWN_HCA = 0x103, - MLX5_CMD_OP_ENABLE_HCA = 0x104, - MLX5_CMD_OP_DISABLE_HCA = 0x105, - MLX5_CMD_OP_QUERY_PAGES = 0x107, - MLX5_CMD_OP_MANAGE_PAGES = 0x108, - MLX5_CMD_OP_SET_HCA_CAP = 0x109, - - MLX5_CMD_OP_CREATE_MKEY = 0x200, - MLX5_CMD_OP_QUERY_MKEY = 0x201, - MLX5_CMD_OP_DESTROY_MKEY = 0x202, - MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS = 0x203, - - MLX5_CMD_OP_CREATE_EQ = 0x301, - MLX5_CMD_OP_DESTROY_EQ = 0x302, - MLX5_CMD_OP_QUERY_EQ = 0x303, - - MLX5_CMD_OP_CREATE_CQ = 0x400, - MLX5_CMD_OP_DESTROY_CQ = 0x401, - MLX5_CMD_OP_QUERY_CQ = 0x402, - MLX5_CMD_OP_MODIFY_CQ = 0x403, - - MLX5_CMD_OP_CREATE_QP = 0x500, - MLX5_CMD_OP_DESTROY_QP = 0x501, - MLX5_CMD_OP_RST2INIT_QP = 0x502, - MLX5_CMD_OP_INIT2RTR_QP = 0x503, - MLX5_CMD_OP_RTR2RTS_QP = 0x504, - MLX5_CMD_OP_RTS2RTS_QP = 0x505, - MLX5_CMD_OP_SQERR2RTS_QP = 0x506, - MLX5_CMD_OP_2ERR_QP = 0x507, - MLX5_CMD_OP_RTS2SQD_QP = 0x508, - MLX5_CMD_OP_SQD2RTS_QP = 0x509, - MLX5_CMD_OP_2RST_QP = 0x50a, - MLX5_CMD_OP_QUERY_QP = 0x50b, - MLX5_CMD_OP_CONF_SQP = 0x50c, - MLX5_CMD_OP_MAD_IFC = 0x50d, - MLX5_CMD_OP_INIT2INIT_QP = 0x50e, - MLX5_CMD_OP_SUSPEND_QP = 0x50f, - MLX5_CMD_OP_UNSUSPEND_QP = 0x510, - MLX5_CMD_OP_SQD2SQD_QP = 0x511, - MLX5_CMD_OP_ALLOC_QP_COUNTER_SET = 0x512, - MLX5_CMD_OP_DEALLOC_QP_COUNTER_SET = 0x513, - MLX5_CMD_OP_QUERY_QP_COUNTER_SET = 0x514, - - MLX5_CMD_OP_CREATE_PSV = 0x600, - MLX5_CMD_OP_DESTROY_PSV = 0x601, - MLX5_CMD_OP_QUERY_PSV = 0x602, - MLX5_CMD_OP_QUERY_SIG_RULE_TABLE = 0x603, - MLX5_CMD_OP_QUERY_BLOCK_SIZE_TABLE = 0x604, - - MLX5_CMD_OP_CREATE_SRQ = 0x700, - MLX5_CMD_OP_DESTROY_SRQ = 0x701, - MLX5_CMD_OP_QUERY_SRQ = 0x702, - MLX5_CMD_OP_ARM_RQ = 0x703, - MLX5_CMD_OP_RESIZE_SRQ = 0x704, - - MLX5_CMD_OP_ALLOC_PD = 0x800, - MLX5_CMD_OP_DEALLOC_PD = 0x801, - MLX5_CMD_OP_ALLOC_UAR = 0x802, - MLX5_CMD_OP_DEALLOC_UAR = 0x803, - - MLX5_CMD_OP_ATTACH_TO_MCG = 0x806, - MLX5_CMD_OP_DETACH_FROM_MCG = 0x807, - - - MLX5_CMD_OP_ALLOC_XRCD = 0x80e, - MLX5_CMD_OP_DEALLOC_XRCD = 0x80f, - - MLX5_CMD_OP_ACCESS_REG = 0x805, - MLX5_CMD_OP_MAX = 0x810, -}; - -enum { MLX5_REG_PCAP = 0x5001, MLX5_REG_PMTU = 0x5003, MLX5_REG_PTYS = 0x5004, @@ -335,23 +261,30 @@ struct mlx5_port_caps { int pkey_table_len; }; -struct mlx5_caps { +struct mlx5_general_caps { u8 log_max_eq; u8 log_max_cq; u8 log_max_qp; u8 log_max_mkey; u8 log_max_pd; u8 log_max_srq; + u8 log_max_strq; + u8 log_max_mrw_sz; + u8 log_max_bsf_list_size; + u8 log_max_klm_list_size; u32 max_cqes; int max_wqes; + u32 max_eqes; + u32 max_indirection; int max_sq_desc_sz; int max_rq_desc_sz; + int max_dc_sq_desc_sz; u64 flags; u16 stat_rate_support; int log_max_msg; int num_ports; - int max_ra_res_qp; - int max_ra_req_qp; + u8 log_max_ra_res_qp; + u8 log_max_ra_req_qp; int max_srq_wqes; int bf_reg_size; int bf_regs_per_page; @@ -363,6 +296,19 @@ struct mlx5_caps { u8 log_max_mcg; u32 max_qp_mcg; int min_page_sz; + int pd_cap; + u32 max_qp_counters; + u32 pkey_table_size; + u8 log_max_ra_req_dc; + u8 log_max_ra_res_dc; + u32 uar_sz; + u8 min_log_pg_sz; + u8 log_max_xrcd; + u16 log_uar_page_sz; +}; + +struct mlx5_caps { + struct mlx5_general_caps gen; }; struct mlx5_cmd_mailbox { @@ -429,6 +375,16 @@ struct mlx5_core_mr { u32 pd; }; +enum mlx5_res_type { + MLX5_RES_QP, +}; + +struct mlx5_core_rsc_common { + enum mlx5_res_type res; + atomic_t refcount; + struct completion free; +}; + struct mlx5_core_srq { u32 srqn; int max; @@ -695,6 +651,9 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); void mlx5_cmd_use_events(struct mlx5_core_dev *dev); void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr); +int mlx5_cmd_status_to_err_v2(void *ptr); +int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps, + u16 opmod); int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size); int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, @@ -751,7 +710,7 @@ int mlx5_eq_init(struct mlx5_core_dev *dev); void mlx5_eq_cleanup(struct mlx5_core_dev *dev); void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas); void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn); -void mlx5_qp_event(struct mlx5_core_dev *dev, u32 qpn, int event_type); +void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type); void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector); @@ -788,6 +747,7 @@ void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn, int npsvs, u32 *sig_index); int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num); +void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common); static inline u32 mlx5_mkey_to_idx(u32 mkey) { diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h new file mode 100644 index 000000000000..5f48b8f592c5 --- /dev/null +++ b/include/linux/mlx5/mlx5_ifc.h @@ -0,0 +1,349 @@ +/* + * Copyright (c) 2014, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX5_IFC_H +#define MLX5_IFC_H + +enum { + MLX5_CMD_OP_QUERY_HCA_CAP = 0x100, + MLX5_CMD_OP_QUERY_ADAPTER = 0x101, + MLX5_CMD_OP_INIT_HCA = 0x102, + MLX5_CMD_OP_TEARDOWN_HCA = 0x103, + MLX5_CMD_OP_ENABLE_HCA = 0x104, + MLX5_CMD_OP_DISABLE_HCA = 0x105, + MLX5_CMD_OP_QUERY_PAGES = 0x107, + MLX5_CMD_OP_MANAGE_PAGES = 0x108, + MLX5_CMD_OP_SET_HCA_CAP = 0x109, + MLX5_CMD_OP_CREATE_MKEY = 0x200, + MLX5_CMD_OP_QUERY_MKEY = 0x201, + MLX5_CMD_OP_DESTROY_MKEY = 0x202, + MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS = 0x203, + MLX5_CMD_OP_PAGE_FAULT_RESUME = 0x204, + MLX5_CMD_OP_CREATE_EQ = 0x301, + MLX5_CMD_OP_DESTROY_EQ = 0x302, + MLX5_CMD_OP_QUERY_EQ = 0x303, + MLX5_CMD_OP_GEN_EQE = 0x304, + MLX5_CMD_OP_CREATE_CQ = 0x400, + MLX5_CMD_OP_DESTROY_CQ = 0x401, + MLX5_CMD_OP_QUERY_CQ = 0x402, + MLX5_CMD_OP_MODIFY_CQ = 0x403, + MLX5_CMD_OP_CREATE_QP = 0x500, + MLX5_CMD_OP_DESTROY_QP = 0x501, + MLX5_CMD_OP_RST2INIT_QP = 0x502, + MLX5_CMD_OP_INIT2RTR_QP = 0x503, + MLX5_CMD_OP_RTR2RTS_QP = 0x504, + MLX5_CMD_OP_RTS2RTS_QP = 0x505, + MLX5_CMD_OP_SQERR2RTS_QP = 0x506, + MLX5_CMD_OP_2ERR_QP = 0x507, + MLX5_CMD_OP_2RST_QP = 0x50a, + MLX5_CMD_OP_QUERY_QP = 0x50b, + MLX5_CMD_OP_INIT2INIT_QP = 0x50e, + MLX5_CMD_OP_CREATE_PSV = 0x600, + MLX5_CMD_OP_DESTROY_PSV = 0x601, + MLX5_CMD_OP_CREATE_SRQ = 0x700, + MLX5_CMD_OP_DESTROY_SRQ = 0x701, + MLX5_CMD_OP_QUERY_SRQ = 0x702, + MLX5_CMD_OP_ARM_RQ = 0x703, + MLX5_CMD_OP_RESIZE_SRQ = 0x704, + MLX5_CMD_OP_CREATE_DCT = 0x710, + MLX5_CMD_OP_DESTROY_DCT = 0x711, + MLX5_CMD_OP_DRAIN_DCT = 0x712, + MLX5_CMD_OP_QUERY_DCT = 0x713, + MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION = 0x714, + MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750, + MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751, + MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT = 0x752, + MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT = 0x753, + MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT = 0x754, + MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT = 0x755, + MLX5_CMD_OP_QUERY_RCOE_ADDRESS = 0x760, + MLX5_CMD_OP_SET_ROCE_ADDRESS = 0x761, + MLX5_CMD_OP_QUERY_VPORT_COUNTER = 0x770, + MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771, + MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772, + MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773, + MLX5_CMD_OP_ALLOC_PD = 0x800, + MLX5_CMD_OP_DEALLOC_PD = 0x801, + MLX5_CMD_OP_ALLOC_UAR = 0x802, + MLX5_CMD_OP_DEALLOC_UAR = 0x803, + MLX5_CMD_OP_CONFIG_INT_MODERATION = 0x804, + MLX5_CMD_OP_ACCESS_REG = 0x805, + MLX5_CMD_OP_ATTACH_TO_MCG = 0x806, + MLX5_CMD_OP_DETACH_FROM_MCG = 0x807, + MLX5_CMD_OP_GET_DROPPED_PACKET_LOG = 0x80a, + MLX5_CMD_OP_MAD_IFC = 0x50d, + MLX5_CMD_OP_QUERY_MAD_DEMUX = 0x80b, + MLX5_CMD_OP_SET_MAD_DEMUX = 0x80c, + MLX5_CMD_OP_NOP = 0x80d, + MLX5_CMD_OP_ALLOC_XRCD = 0x80e, + MLX5_CMD_OP_DEALLOC_XRCD = 0x80f, + MLX5_CMD_OP_SET_BURST_SIZE = 0x812, + MLX5_CMD_OP_QUERY_BURST_SZIE = 0x813, + MLX5_CMD_OP_ACTIVATE_TRACER = 0x814, + MLX5_CMD_OP_DEACTIVATE_TRACER = 0x815, + MLX5_CMD_OP_CREATE_SNIFFER_RULE = 0x820, + MLX5_CMD_OP_DESTROY_SNIFFER_RULE = 0x821, + MLX5_CMD_OP_QUERY_CONG_PARAMS = 0x822, + MLX5_CMD_OP_MODIFY_CONG_PARAMS = 0x823, + MLX5_CMD_OP_QUERY_CONG_STATISTICS = 0x824, + MLX5_CMD_OP_CREATE_TIR = 0x900, + MLX5_CMD_OP_MODIFY_TIR = 0x901, + MLX5_CMD_OP_DESTROY_TIR = 0x902, + MLX5_CMD_OP_QUERY_TIR = 0x903, + MLX5_CMD_OP_CREATE_TIS = 0x912, + MLX5_CMD_OP_MODIFY_TIS = 0x913, + MLX5_CMD_OP_DESTROY_TIS = 0x914, + MLX5_CMD_OP_QUERY_TIS = 0x915, + MLX5_CMD_OP_CREATE_SQ = 0x904, + MLX5_CMD_OP_MODIFY_SQ = 0x905, + MLX5_CMD_OP_DESTROY_SQ = 0x906, + MLX5_CMD_OP_QUERY_SQ = 0x907, + MLX5_CMD_OP_CREATE_RQ = 0x908, + MLX5_CMD_OP_MODIFY_RQ = 0x909, + MLX5_CMD_OP_DESTROY_RQ = 0x90a, + MLX5_CMD_OP_QUERY_RQ = 0x90b, + MLX5_CMD_OP_CREATE_RMP = 0x90c, + MLX5_CMD_OP_MODIFY_RMP = 0x90d, + MLX5_CMD_OP_DESTROY_RMP = 0x90e, + MLX5_CMD_OP_QUERY_RMP = 0x90f, + MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x910, + MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY = 0x911, + MLX5_CMD_OP_MAX = 0x911 +}; + +struct mlx5_ifc_cmd_hca_cap_bits { + u8 reserved_0[0x80]; + + u8 log_max_srq_sz[0x8]; + u8 log_max_qp_sz[0x8]; + u8 reserved_1[0xb]; + u8 log_max_qp[0x5]; + + u8 log_max_strq_sz[0x8]; + u8 reserved_2[0x3]; + u8 log_max_srqs[0x5]; + u8 reserved_3[0x10]; + + u8 reserved_4[0x8]; + u8 log_max_cq_sz[0x8]; + u8 reserved_5[0xb]; + u8 log_max_cq[0x5]; + + u8 log_max_eq_sz[0x8]; + u8 reserved_6[0x2]; + u8 log_max_mkey[0x6]; + u8 reserved_7[0xc]; + u8 log_max_eq[0x4]; + + u8 max_indirection[0x8]; + u8 reserved_8[0x1]; + u8 log_max_mrw_sz[0x7]; + u8 reserved_9[0x2]; + u8 log_max_bsf_list_size[0x6]; + u8 reserved_10[0x2]; + u8 log_max_klm_list_size[0x6]; + + u8 reserved_11[0xa]; + u8 log_max_ra_req_dc[0x6]; + u8 reserved_12[0xa]; + u8 log_max_ra_res_dc[0x6]; + + u8 reserved_13[0xa]; + u8 log_max_ra_req_qp[0x6]; + u8 reserved_14[0xa]; + u8 log_max_ra_res_qp[0x6]; + + u8 pad_cap[0x1]; + u8 cc_query_allowed[0x1]; + u8 cc_modify_allowed[0x1]; + u8 reserved_15[0x1d]; + + u8 reserved_16[0x6]; + u8 max_qp_cnt[0xa]; + u8 pkey_table_size[0x10]; + + u8 eswitch_owner[0x1]; + u8 reserved_17[0xa]; + u8 local_ca_ack_delay[0x5]; + u8 reserved_18[0x8]; + u8 num_ports[0x8]; + + u8 reserved_19[0x3]; + u8 log_max_msg[0x5]; + u8 reserved_20[0x18]; + + u8 stat_rate_support[0x10]; + u8 reserved_21[0x10]; + + u8 reserved_22[0x10]; + u8 cmdif_checksum[0x2]; + u8 sigerr_cqe[0x1]; + u8 reserved_23[0x1]; + u8 wq_signature[0x1]; + u8 sctr_data_cqe[0x1]; + u8 reserved_24[0x1]; + u8 sho[0x1]; + u8 tph[0x1]; + u8 rf[0x1]; + u8 dc[0x1]; + u8 reserved_25[0x2]; + u8 roce[0x1]; + u8 atomic[0x1]; + u8 rsz_srq[0x1]; + + u8 cq_oi[0x1]; + u8 cq_resize[0x1]; + u8 cq_moderation[0x1]; + u8 sniffer_rule_flow[0x1]; + u8 sniffer_rule_vport[0x1]; + u8 sniffer_rule_phy[0x1]; + u8 reserved_26[0x1]; + u8 pg[0x1]; + u8 block_lb_mc[0x1]; + u8 reserved_27[0x3]; + u8 cd[0x1]; + u8 reserved_28[0x1]; + u8 apm[0x1]; + u8 reserved_29[0x7]; + u8 qkv[0x1]; + u8 pkv[0x1]; + u8 reserved_30[0x4]; + u8 xrc[0x1]; + u8 ud[0x1]; + u8 uc[0x1]; + u8 rc[0x1]; + + u8 reserved_31[0xa]; + u8 uar_sz[0x6]; + u8 reserved_32[0x8]; + u8 log_pg_sz[0x8]; + + u8 bf[0x1]; + u8 reserved_33[0xa]; + u8 log_bf_reg_size[0x5]; + u8 reserved_34[0x10]; + + u8 reserved_35[0x10]; + u8 max_wqe_sz_sq[0x10]; + + u8 reserved_36[0x10]; + u8 max_wqe_sz_rq[0x10]; + + u8 reserved_37[0x10]; + u8 max_wqe_sz_sq_dc[0x10]; + + u8 reserved_38[0x7]; + u8 max_qp_mcg[0x19]; + + u8 reserved_39[0x18]; + u8 log_max_mcg[0x8]; + + u8 reserved_40[0xb]; + u8 log_max_pd[0x5]; + u8 reserved_41[0xb]; + u8 log_max_xrcd[0x5]; + + u8 reserved_42[0x20]; + + u8 reserved_43[0x3]; + u8 log_max_rq[0x5]; + u8 reserved_44[0x3]; + u8 log_max_sq[0x5]; + u8 reserved_45[0x3]; + u8 log_max_tir[0x5]; + u8 reserved_46[0x3]; + u8 log_max_tis[0x5]; + + u8 reserved_47[0x13]; + u8 log_max_rq_per_tir[0x5]; + u8 reserved_48[0x3]; + u8 log_max_tis_per_sq[0x5]; + + u8 reserved_49[0xe0]; + + u8 reserved_50[0x10]; + u8 log_uar_page_sz[0x10]; + + u8 reserved_51[0x100]; + + u8 reserved_52[0x1f]; + u8 cqe_zip[0x1]; + + u8 cqe_zip_timeout[0x10]; + u8 cqe_zip_max_num[0x10]; + + u8 reserved_53[0x220]; +}; + +struct mlx5_ifc_set_hca_cap_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; + + struct mlx5_ifc_cmd_hca_cap_bits hca_capability_struct; +}; + +struct mlx5_ifc_query_hca_cap_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; +}; + +struct mlx5_ifc_query_hca_cap_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + u8 capability_struct[256][0x8]; +}; + +struct mlx5_ifc_set_hca_cap_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +#endif /* MLX5_IFC_H */ diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index 9709b30e2d69..7c4c0f1f5805 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -342,10 +342,9 @@ struct mlx5_stride_block_ctrl_seg { }; struct mlx5_core_qp { + struct mlx5_core_rsc_common common; /* must be first */ void (*event) (struct mlx5_core_qp *, int); int qpn; - atomic_t refcount; - struct completion free; struct mlx5_rsc_debug *dbg; int pid; }; diff --git a/include/linux/mm.h b/include/linux/mm.h index 8981cc882ed2..fa0d74e06428 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -18,6 +18,7 @@ #include <linux/pfn.h> #include <linux/bit_spinlock.h> #include <linux/shrinker.h> +#include <linux/resource.h> struct mempolicy; struct anon_vma; @@ -553,6 +554,25 @@ static inline void __ClearPageBuddy(struct page *page) atomic_set(&page->_mapcount, -1); } +#define PAGE_BALLOON_MAPCOUNT_VALUE (-256) + +static inline int PageBalloon(struct page *page) +{ + return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE; +} + +static inline void __SetPageBalloon(struct page *page) +{ + VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); + atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE); +} + +static inline void __ClearPageBalloon(struct page *page) +{ + VM_BUG_ON_PAGE(!PageBalloon(page), page); + atomic_set(&page->_mapcount, -1); +} + void put_page(struct page *page); void put_pages_list(struct list_head *pages); @@ -1247,8 +1267,8 @@ static inline int stack_guard_page_end(struct vm_area_struct *vma, !vma_growsup(vma->vm_next, addr); } -extern pid_t -vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group); +extern struct task_struct *task_of_stack(struct task_struct *task, + struct vm_area_struct *vma, bool in_group); extern unsigned long move_page_tables(struct vm_area_struct *vma, unsigned long old_addr, struct vm_area_struct *new_vma, @@ -1780,6 +1800,20 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **, bool *need_rmap_locks); extern void exit_mmap(struct mm_struct *); +static inline int check_data_rlimit(unsigned long rlim, + unsigned long new, + unsigned long start, + unsigned long end_data, + unsigned long start_data) +{ + if (rlim < RLIM_INFINITY) { + if (((new - start) + (end_data - start_data)) > rlim) + return -ENOSPC; + } + + return 0; +} + extern int mm_take_all_locks(struct mm_struct *mm); extern void mm_drop_all_locks(struct mm_struct *mm); @@ -1985,6 +2019,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma, #define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */ #define FOLL_NUMA 0x200 /* force NUMA hinting page fault */ #define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */ +#define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */ typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, void *data); diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index d424b9de3aff..b0692d28f8e6 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -42,7 +42,8 @@ struct mmc_csd { unsigned int read_partial:1, read_misalign:1, write_partial:1, - write_misalign:1; + write_misalign:1, + dsr_imp:1; }; struct mmc_ext_csd { @@ -74,7 +75,7 @@ struct mmc_ext_csd { unsigned int sec_trim_mult; /* Secure trim multiplier */ unsigned int sec_erase_mult; /* Secure erase multiplier */ unsigned int trim_timeout; /* In milliseconds */ - bool enhanced_area_en; /* enable bit */ + bool partition_setting_completed; /* enable bit */ unsigned long long enhanced_area_offset; /* Units: Byte */ unsigned int enhanced_area_size; /* Units: KB */ unsigned int cache_size; /* Units: KB */ @@ -214,11 +215,12 @@ enum mmc_blk_status { }; /* The number of MMC physical partitions. These consist of: - * boot partitions (2), general purpose partitions (4) in MMC v4.4. + * boot partitions (2), general purpose partitions (4) and + * RPMB partition (1) in MMC v4.4. */ #define MMC_NUM_BOOT_PARTITION 2 #define MMC_NUM_GP_PARTITION 4 -#define MMC_NUM_PHY_PARTITION 6 +#define MMC_NUM_PHY_PARTITION 7 #define MAX_MMC_PART_NAME_LEN 20 /* diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h index 29ce014ab421..001366927cf4 100644 --- a/include/linux/mmc/dw_mmc.h +++ b/include/linux/mmc/dw_mmc.h @@ -26,6 +26,8 @@ enum dw_mci_state { STATE_DATA_BUSY, STATE_SENDING_STOP, STATE_DATA_ERROR, + STATE_SENDING_CMD11, + STATE_WAITING_CMD11_DONE, }; enum { @@ -188,7 +190,7 @@ struct dw_mci { /* Workaround flags */ u32 quirks; - struct regulator *vmmc; /* Power regulator */ + bool vqmmc_enabled; unsigned long irq_flags; /* IRQ flags */ int irq; }; diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 7960424d0bc0..df0c15396bbf 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -42,6 +42,7 @@ struct mmc_ios { #define MMC_POWER_OFF 0 #define MMC_POWER_UP 1 #define MMC_POWER_ON 2 +#define MMC_POWER_UNDEFINED 3 unsigned char bus_width; /* data bus width */ @@ -139,6 +140,13 @@ struct mmc_host_ops { int (*select_drive_strength)(unsigned int max_dtr, int host_drv, int card_drv); void (*hw_reset)(struct mmc_host *host); void (*card_event)(struct mmc_host *host); + + /* + * Optional callback to support controllers with HW issues for multiple + * I/O. Returns the number of supported blocks for the request. + */ + int (*multi_io_quirk)(struct mmc_card *card, + unsigned int direction, int blk_size); }; struct mmc_card; @@ -265,7 +273,6 @@ struct mmc_host { #define MMC_CAP2_BOOTPART_NOACC (1 << 0) /* Boot partition no access */ #define MMC_CAP2_FULL_PWR_CYCLE (1 << 2) /* Can do full power cycle */ -#define MMC_CAP2_NO_MULTI_READ (1 << 3) /* Multiblock reads don't work */ #define MMC_CAP2_HS200_1_8V_SDR (1 << 5) /* can support */ #define MMC_CAP2_HS200_1_2V_SDR (1 << 6) /* can support */ #define MMC_CAP2_HS200 (MMC_CAP2_HS200_1_8V_SDR | \ @@ -365,6 +372,9 @@ struct mmc_host { unsigned int slotno; /* used for sdio acpi binding */ + int dsr_req; /* DSR value is valid */ + u32 dsr; /* optional driver stage (DSR) value */ + unsigned long private[0] ____cacheline_aligned; }; diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h index 64ec963ed347..1cd00b3a75b9 100644 --- a/include/linux/mmc/mmc.h +++ b/include/linux/mmc/mmc.h @@ -53,6 +53,11 @@ #define MMC_SEND_TUNING_BLOCK 19 /* adtc R1 */ #define MMC_SEND_TUNING_BLOCK_HS200 21 /* adtc R1 */ +#define MMC_TUNING_BLK_PATTERN_4BIT_SIZE 64 +#define MMC_TUNING_BLK_PATTERN_8BIT_SIZE 128 +extern const u8 tuning_blk_pattern_4bit[MMC_TUNING_BLK_PATTERN_4BIT_SIZE]; +extern const u8 tuning_blk_pattern_8bit[MMC_TUNING_BLK_PATTERN_8BIT_SIZE]; + /* class 3 */ #define MMC_WRITE_DAT_UNTIL_STOP 20 /* adtc [31:0] data addr R1 */ @@ -281,6 +286,7 @@ struct _mmc_csd { #define EXT_CSD_EXP_EVENTS_CTRL 56 /* R/W, 2 bytes */ #define EXT_CSD_DATA_SECTOR_SIZE 61 /* R */ #define EXT_CSD_GP_SIZE_MULT 143 /* R/W */ +#define EXT_CSD_PARTITION_SETTING_COMPLETED 155 /* R/W */ #define EXT_CSD_PARTITION_ATTRIBUTE 156 /* R/W */ #define EXT_CSD_PARTITION_SUPPORT 160 /* RO */ #define EXT_CSD_HPI_MGMT 161 /* R/W */ @@ -349,6 +355,7 @@ struct _mmc_csd { #define EXT_CSD_PART_CONFIG_ACC_RPMB (0x3) #define EXT_CSD_PART_CONFIG_ACC_GP0 (0x4) +#define EXT_CSD_PART_SETTING_COMPLETED (0x1) #define EXT_CSD_PART_SUPPORT_PART_EN (0x1) #define EXT_CSD_CMD_SET_NORMAL (1<<0) diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h index 09ebe57d5ce9..dba793e3a331 100644 --- a/include/linux/mmc/sdhci.h +++ b/include/linux/mmc/sdhci.h @@ -98,6 +98,8 @@ struct sdhci_host { #define SDHCI_QUIRK2_BROKEN_HS200 (1<<6) /* Controller does not support DDR50 */ #define SDHCI_QUIRK2_BROKEN_DDR50 (1<<7) +/* Stop command (CMD12) can set Transfer Complete when not using MMC_RSP_BUSY */ +#define SDHCI_QUIRK2_STOP_WITH_TC (1<<8) int irq; /* Device IRQ */ void __iomem *ioaddr; /* Mapped address */ @@ -146,6 +148,7 @@ struct sdhci_host { struct mmc_command *cmd; /* Current command */ struct mmc_data *data; /* Current data request */ unsigned int data_early:1; /* Data finished before cmd */ + unsigned int busy_handle:1; /* Handling the order of Busy-end */ struct sg_mapping_iter sg_miter; /* SG state for PIO */ unsigned int blocks; /* remaining PIO blocks */ diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h index d2433381e828..e56fa24c9322 100644 --- a/include/linux/mmc/slot-gpio.h +++ b/include/linux/mmc/slot-gpio.h @@ -24,7 +24,10 @@ void mmc_gpio_free_cd(struct mmc_host *host); int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id, unsigned int idx, bool override_active_level, - unsigned int debounce); + unsigned int debounce, bool *gpio_invert); +int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id, + unsigned int idx, bool override_active_level, + unsigned int debounce, bool *gpio_invert); void mmc_gpiod_free_cd(struct mmc_host *host); void mmc_gpiod_request_cd_irq(struct mmc_host *host); diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h index 2f348d02f640..877ef226f90f 100644 --- a/include/linux/mmdebug.h +++ b/include/linux/mmdebug.h @@ -4,10 +4,14 @@ #include <linux/stringify.h> struct page; +struct vm_area_struct; +struct mm_struct; extern void dump_page(struct page *page, const char *reason); extern void dump_page_badflags(struct page *page, const char *reason, unsigned long badflags); +void dump_vma(const struct vm_area_struct *vma); +void dump_mm(const struct mm_struct *mm); #ifdef CONFIG_DEBUG_VM #define VM_BUG_ON(cond) BUG_ON(cond) @@ -18,12 +22,28 @@ extern void dump_page_badflags(struct page *page, const char *reason, BUG(); \ } \ } while (0) +#define VM_BUG_ON_VMA(cond, vma) \ + do { \ + if (unlikely(cond)) { \ + dump_vma(vma); \ + BUG(); \ + } \ + } while (0) +#define VM_BUG_ON_MM(cond, mm) \ + do { \ + if (unlikely(cond)) { \ + dump_mm(mm); \ + BUG(); \ + } \ + } while (0) #define VM_WARN_ON(cond) WARN_ON(cond) #define VM_WARN_ON_ONCE(cond) WARN_ON_ONCE(cond) #define VM_WARN_ONCE(cond, format...) WARN_ONCE(cond, format) #else #define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond) #define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond) +#define VM_BUG_ON_VMA(cond, vma) VM_BUG_ON(cond) +#define VM_BUG_ON_MM(cond, mm) VM_BUG_ON(cond) #define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond) diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index 27288692241e..88787bb4b3b9 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -57,10 +57,13 @@ struct mmu_notifier_ops { * pte. This way the VM will provide proper aging to the * accesses to the page through the secondary MMUs and not * only to the ones through the Linux pte. + * Start-end is necessary in case the secondary MMU is mapping the page + * at a smaller granularity than the primary MMU. */ int (*clear_flush_young)(struct mmu_notifier *mn, struct mm_struct *mm, - unsigned long address); + unsigned long start, + unsigned long end); /* * test_young is called to check the young/accessed bitflag in @@ -175,7 +178,8 @@ extern void mmu_notifier_unregister_no_release(struct mmu_notifier *mn, extern void __mmu_notifier_mm_destroy(struct mm_struct *mm); extern void __mmu_notifier_release(struct mm_struct *mm); extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm, - unsigned long address); + unsigned long start, + unsigned long end); extern int __mmu_notifier_test_young(struct mm_struct *mm, unsigned long address); extern void __mmu_notifier_change_pte(struct mm_struct *mm, @@ -194,10 +198,11 @@ static inline void mmu_notifier_release(struct mm_struct *mm) } static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm, - unsigned long address) + unsigned long start, + unsigned long end) { if (mm_has_notifiers(mm)) - return __mmu_notifier_clear_flush_young(mm, address); + return __mmu_notifier_clear_flush_young(mm, start, end); return 0; } @@ -255,7 +260,9 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) unsigned long ___address = __address; \ __young = ptep_clear_flush_young(___vma, ___address, __ptep); \ __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \ - ___address); \ + ___address, \ + ___address + \ + PAGE_SIZE); \ __young; \ }) @@ -266,7 +273,9 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) unsigned long ___address = __address; \ __young = pmdp_clear_flush_young(___vma, ___address, __pmdp); \ __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \ - ___address); \ + ___address, \ + ___address + \ + PMD_SIZE); \ __young; \ }) @@ -301,7 +310,8 @@ static inline void mmu_notifier_release(struct mm_struct *mm) } static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm, - unsigned long address) + unsigned long start, + unsigned long end) { return 0; } diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 318df7051850..48bf12ef6620 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -521,13 +521,13 @@ struct zone { atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; } ____cacheline_internodealigned_in_smp; -typedef enum { +enum zone_flags { ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */ ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */ ZONE_CONGESTED, /* zone has many dirty pages backed by * a congested BDI */ - ZONE_TAIL_LRU_DIRTY, /* reclaim scanning has recently found + ZONE_DIRTY, /* reclaim scanning has recently found * many dirty file pages at the tail * of the LRU. */ @@ -535,52 +535,7 @@ typedef enum { * many pages under writeback */ ZONE_FAIR_DEPLETED, /* fair zone policy batch depleted */ -} zone_flags_t; - -static inline void zone_set_flag(struct zone *zone, zone_flags_t flag) -{ - set_bit(flag, &zone->flags); -} - -static inline int zone_test_and_set_flag(struct zone *zone, zone_flags_t flag) -{ - return test_and_set_bit(flag, &zone->flags); -} - -static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag) -{ - clear_bit(flag, &zone->flags); -} - -static inline int zone_is_reclaim_congested(const struct zone *zone) -{ - return test_bit(ZONE_CONGESTED, &zone->flags); -} - -static inline int zone_is_reclaim_dirty(const struct zone *zone) -{ - return test_bit(ZONE_TAIL_LRU_DIRTY, &zone->flags); -} - -static inline int zone_is_reclaim_writeback(const struct zone *zone) -{ - return test_bit(ZONE_WRITEBACK, &zone->flags); -} - -static inline int zone_is_reclaim_locked(const struct zone *zone) -{ - return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags); -} - -static inline int zone_is_fair_depleted(const struct zone *zone) -{ - return test_bit(ZONE_FAIR_DEPLETED, &zone->flags); -} - -static inline int zone_is_oom_locked(const struct zone *zone) -{ - return test_bit(ZONE_OOM_LOCKED, &zone->flags); -} +}; static inline unsigned long zone_end_pfn(const struct zone *zone) { diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index 494f99e852da..b43f4752304e 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h @@ -42,7 +42,7 @@ struct kernel_param; * NOARG - the parameter allows for no argument (foo instead of foo=1) */ enum { - KERNEL_PARAM_FL_NOARG = (1 << 0) + KERNEL_PARAM_OPS_FL_NOARG = (1 << 0) }; struct kernel_param_ops { @@ -56,11 +56,21 @@ struct kernel_param_ops { void (*free)(void *arg); }; +/* + * Flags available for kernel_param + * + * UNSAFE - the parameter is dangerous and setting it will taint the kernel + */ +enum { + KERNEL_PARAM_FL_UNSAFE = (1 << 0) +}; + struct kernel_param { const char *name; const struct kernel_param_ops *ops; u16 perm; - s16 level; + s8 level; + u8 flags; union { void *arg; const struct kparam_string *str; @@ -113,6 +123,12 @@ struct kparam_array module_param_named(name, name, type, perm) /** + * module_param_unsafe - same as module_param but taints kernel + */ +#define module_param_unsafe(name, type, perm) \ + module_param_named_unsafe(name, name, type, perm) + +/** * module_param_named - typesafe helper for a renamed module/cmdline parameter * @name: a valid C identifier which is the parameter name. * @value: the actual lvalue to alter. @@ -129,6 +145,14 @@ struct kparam_array __MODULE_PARM_TYPE(name, #type) /** + * module_param_named_unsafe - same as module_param_named but taints kernel + */ +#define module_param_named_unsafe(name, value, type, perm) \ + param_check_##type(name, &(value)); \ + module_param_cb_unsafe(name, ¶m_ops_##type, &value, perm); \ + __MODULE_PARM_TYPE(name, #type) + +/** * module_param_cb - general callback for a module/cmdline parameter * @name: a valid C identifier which is the parameter name. * @ops: the set & get operations for this parameter. @@ -137,7 +161,11 @@ struct kparam_array * The ops can have NULL set or get functions. */ #define module_param_cb(name, ops, arg, perm) \ - __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, -1) + __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, -1, 0) + +#define module_param_cb_unsafe(name, ops, arg, perm) \ + __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, -1, \ + KERNEL_PARAM_FL_UNSAFE) /** * <level>_param_cb - general callback for a module/cmdline parameter @@ -149,7 +177,7 @@ struct kparam_array * The ops can have NULL set or get functions. */ #define __level_param_cb(name, ops, arg, perm, level) \ - __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, level) + __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, level, 0) #define core_param_cb(name, ops, arg, perm) \ __level_param_cb(name, ops, arg, perm, 1) @@ -184,22 +212,22 @@ struct kparam_array /* This is the fundamental function for registering boot/module parameters. */ -#define __module_param_call(prefix, name, ops, arg, perm, level) \ +#define __module_param_call(prefix, name, ops, arg, perm, level, flags) \ /* Default value instead of permissions? */ \ static const char __param_str_##name[] = prefix #name; \ static struct kernel_param __moduleparam_const __param_##name \ __used \ __attribute__ ((unused,__section__ ("__param"),aligned(sizeof(void *)))) \ = { __param_str_##name, ops, VERIFY_OCTAL_PERMISSIONS(perm), \ - level, { arg } } + level, flags, { arg } } /* Obsolete - use module_param_cb() */ #define module_param_call(name, set, get, arg, perm) \ static struct kernel_param_ops __param_ops_##name = \ - { 0, (void *)set, (void *)get }; \ + { .flags = 0, (void *)set, (void *)get }; \ __module_param_call(MODULE_PARAM_PREFIX, \ name, &__param_ops_##name, arg, \ - (perm) + sizeof(__check_old_set_param(set))*0, -1) + (perm) + sizeof(__check_old_set_param(set))*0, -1, 0) /* We don't get oldget: it's often a new-style param_get_uint, etc. */ static inline int @@ -279,7 +307,7 @@ static inline void __kernel_param_unlock(void) */ #define core_param(name, var, type, perm) \ param_check_##type(name, &(var)); \ - __module_param_call("", name, ¶m_ops_##type, &var, perm, -1) + __module_param_call("", name, ¶m_ops_##type, &var, perm, -1, 0) #endif /* !MODULE */ /** @@ -297,7 +325,7 @@ static inline void __kernel_param_unlock(void) = { len, string }; \ __module_param_call(MODULE_PARAM_PREFIX, name, \ ¶m_ops_string, \ - .str = &__param_string_##name, perm, -1); \ + .str = &__param_string_##name, perm, -1, 0);\ __MODULE_PARM_TYPE(name, "string") /** @@ -444,7 +472,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp); __module_param_call(MODULE_PARAM_PREFIX, name, \ ¶m_array_ops, \ .arr = &__param_arr_##name, \ - perm, -1); \ + perm, -1, 0); \ __MODULE_PARM_TYPE(name, "array of " #type) extern struct kernel_param_ops param_array_ops; diff --git a/include/linux/msi.h b/include/linux/msi.h index 8103f32f6d87..44f4746d033b 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -29,7 +29,6 @@ struct msi_desc { __u8 multi_cap : 3; /* log2 num of messages supported */ __u8 maskbit : 1; /* mask-pending bit supported ? */ __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */ - __u8 pos; /* Location of the msi capability */ __u16 entry_nr; /* specific enabled entry */ unsigned default_irq; /* default pre-assigned irq */ } msi_attrib; @@ -47,8 +46,6 @@ struct msi_desc { /* Last set MSI message */ struct msi_msg msg; - - struct kobject kobj; }; /* @@ -60,7 +57,6 @@ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc); void arch_teardown_msi_irq(unsigned int irq); int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); void arch_teardown_msi_irqs(struct pci_dev *dev); -int arch_msi_check_device(struct pci_dev* dev, int nvec, int type); void arch_restore_msi_irqs(struct pci_dev *dev); void default_teardown_msi_irqs(struct pci_dev *dev); @@ -77,8 +73,6 @@ struct msi_chip { int (*setup_irq)(struct msi_chip *chip, struct pci_dev *dev, struct msi_desc *desc); void (*teardown_irq)(struct msi_chip *chip, unsigned int irq); - int (*check_device)(struct msi_chip *chip, struct pci_dev *dev, - int nvec, int type); }; #endif /* LINUX_MSI_H */ diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 8d5535c58cc2..cc31498fc526 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -52,7 +52,7 @@ struct mutex { atomic_t count; spinlock_t wait_lock; struct list_head wait_list; -#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP) +#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER) struct task_struct *owner; #endif #ifdef CONFIG_MUTEX_SPIN_ON_OWNER @@ -133,7 +133,7 @@ static inline int mutex_is_locked(struct mutex *lock) /* * See kernel/locking/mutex.c for detailed documentation of these APIs. - * Also see Documentation/mutex-design.txt. + * Also see Documentation/locking/mutex-design.txt. */ #ifdef CONFIG_DEBUG_LOCK_ALLOC extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 38377392d082..838407aea705 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -30,6 +30,7 @@ #include <linux/bug.h> #include <linux/delay.h> #include <linux/atomic.h> +#include <linux/prefetch.h> #include <asm/cache.h> #include <asm/byteorder.h> @@ -543,7 +544,7 @@ struct netdev_queue { * read mostly part */ struct net_device *dev; - struct Qdisc *qdisc; + struct Qdisc __rcu *qdisc; struct Qdisc *qdisc_sleeping; #ifdef CONFIG_SYSFS struct kobject kobj; @@ -1206,6 +1207,7 @@ enum netdev_priv_flags { IFF_SUPP_NOFCS = 1<<19, IFF_LIVE_ADDR_CHANGE = 1<<20, IFF_MACVLAN = 1<<21, + IFF_XMIT_DST_RELEASE_PERM = 1<<22, }; #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN @@ -1230,6 +1232,7 @@ enum netdev_priv_flags { #define IFF_SUPP_NOFCS IFF_SUPP_NOFCS #define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE #define IFF_MACVLAN IFF_MACVLAN +#define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM /** * struct net_device - The DEVICE structure. @@ -1416,6 +1419,8 @@ enum netdev_priv_flags { * @gso_max_size: Maximum size of generic segmentation offload * @gso_max_segs: Maximum number of segments that can be passed to the * NIC for GSO + * @gso_min_segs: Minimum number of segments that can be passed to the + * NIC for GSO * * @dcbnl_ops: Data Center Bridging netlink ops * @num_tc: Number of traffic classes in the net device @@ -1666,7 +1671,7 @@ struct net_device { unsigned int gso_max_size; #define GSO_MAX_SEGS 65535 u16 gso_max_segs; - + u16 gso_min_segs; #ifdef CONFIG_DCB const struct dcbnl_rtnl_ops *dcbnl_ops; #endif @@ -1747,6 +1752,12 @@ struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, return &dev->_tx[index]; } +static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev, + const struct sk_buff *skb) +{ + return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); +} + static inline void netdev_for_each_tx_queue(struct net_device *dev, void (*f)(struct net_device *, struct netdev_queue *, @@ -1781,24 +1792,13 @@ void dev_net_set(struct net_device *dev, struct net *net) #endif } -static inline bool netdev_uses_dsa_tags(struct net_device *dev) +static inline bool netdev_uses_dsa(struct net_device *dev) { -#ifdef CONFIG_NET_DSA_TAG_DSA - if (dev->dsa_ptr != NULL) - return dsa_uses_dsa_tags(dev->dsa_ptr); -#endif - - return 0; -} - -static inline bool netdev_uses_trailer_tags(struct net_device *dev) -{ -#ifdef CONFIG_NET_DSA_TAG_TRAILER +#if IS_ENABLED(CONFIG_NET_DSA) if (dev->dsa_ptr != NULL) - return dsa_uses_trailer_tags(dev->dsa_ptr); + return dsa_uses_tagged_protocol(dev->dsa_ptr); #endif - - return 0; + return false; } /** @@ -1879,11 +1879,20 @@ struct napi_gro_cb { /* jiffies when first packet was created/queued */ unsigned long age; - /* Used in ipv6_gro_receive() */ + /* Used in ipv6_gro_receive() and foo-over-udp */ u16 proto; /* Used in udp_gro_receive */ - u16 udp_mark; + u8 udp_mark:1; + + /* GRO checksum is valid */ + u8 csum_valid:1; + + /* Number of checksums via CHECKSUM_UNNECESSARY */ + u8 csum_cnt:3; + + /* Used in foo-over-udp, set in udp[46]_gro_receive */ + u8 is_ipv6:1; /* used to support CHECKSUM_COMPLETE for tunneling protocols */ __wsum csum; @@ -1910,7 +1919,6 @@ struct packet_type { struct offload_callbacks { struct sk_buff *(*gso_segment)(struct sk_buff *skb, netdev_features_t features); - int (*gso_send_check)(struct sk_buff *skb); struct sk_buff **(*gro_receive)(struct sk_buff **head, struct sk_buff *skb); int (*gro_complete)(struct sk_buff *skb, int nhoff); @@ -1924,6 +1932,7 @@ struct packet_offload { struct udp_offload { __be16 port; + u8 ipproto; struct offload_callbacks callbacks; }; @@ -1982,6 +1991,7 @@ struct pcpu_sw_netstats { #define NETDEV_CHANGEUPPER 0x0015 #define NETDEV_RESEND_IGMP 0x0016 #define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */ +#define NETDEV_CHANGEINFODATA 0x0018 int register_netdevice_notifier(struct notifier_block *nb); int unregister_netdevice_notifier(struct notifier_block *nb); @@ -2074,8 +2084,8 @@ void __dev_remove_pack(struct packet_type *pt); void dev_add_offload(struct packet_offload *po); void dev_remove_offload(struct packet_offload *po); -struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags, - unsigned short mask); +struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags, + unsigned short mask); struct net_device *dev_get_by_name(struct net *net, const char *name); struct net_device *dev_get_by_name_rcu(struct net *net, const char *name); struct net_device *__dev_get_by_name(struct net *net, const char *name); @@ -2153,11 +2163,97 @@ static inline void *skb_gro_network_header(struct sk_buff *skb) static inline void skb_gro_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len) { - if (skb->ip_summed == CHECKSUM_COMPLETE) + if (NAPI_GRO_CB(skb)->csum_valid) NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum, csum_partial(start, len, 0)); } +/* GRO checksum functions. These are logical equivalents of the normal + * checksum functions (in skbuff.h) except that they operate on the GRO + * offsets and fields in sk_buff. + */ + +__sum16 __skb_gro_checksum_complete(struct sk_buff *skb); + +static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb, + bool zero_okay, + __sum16 check) +{ + return (skb->ip_summed != CHECKSUM_PARTIAL && + NAPI_GRO_CB(skb)->csum_cnt == 0 && + (!zero_okay || check)); +} + +static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb, + __wsum psum) +{ + if (NAPI_GRO_CB(skb)->csum_valid && + !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum))) + return 0; + + NAPI_GRO_CB(skb)->csum = psum; + + return __skb_gro_checksum_complete(skb); +} + +static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb) +{ + if (NAPI_GRO_CB(skb)->csum_cnt > 0) { + /* Consume a checksum from CHECKSUM_UNNECESSARY */ + NAPI_GRO_CB(skb)->csum_cnt--; + } else { + /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we + * verified a new top level checksum or an encapsulated one + * during GRO. This saves work if we fallback to normal path. + */ + __skb_incr_checksum_unnecessary(skb); + } +} + +#define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \ + compute_pseudo) \ +({ \ + __sum16 __ret = 0; \ + if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \ + __ret = __skb_gro_checksum_validate_complete(skb, \ + compute_pseudo(skb, proto)); \ + if (__ret) \ + __skb_mark_checksum_bad(skb); \ + else \ + skb_gro_incr_csum_unnecessary(skb); \ + __ret; \ +}) + +#define skb_gro_checksum_validate(skb, proto, compute_pseudo) \ + __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo) + +#define skb_gro_checksum_validate_zero_check(skb, proto, check, \ + compute_pseudo) \ + __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo) + +#define skb_gro_checksum_simple_validate(skb) \ + __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo) + +static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb) +{ + return (NAPI_GRO_CB(skb)->csum_cnt == 0 && + !NAPI_GRO_CB(skb)->csum_valid); +} + +static inline void __skb_gro_checksum_convert(struct sk_buff *skb, + __sum16 check, __wsum pseudo) +{ + NAPI_GRO_CB(skb)->csum = ~pseudo; + NAPI_GRO_CB(skb)->csum_valid = 1; +} + +#define skb_gro_checksum_try_convert(skb, proto, check, compute_pseudo) \ +do { \ + if (__skb_gro_checksum_convert_check(skb)) \ + __skb_gro_checksum_convert(skb, check, \ + compute_pseudo(skb, proto)); \ +} while (0) + static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, @@ -2261,12 +2357,7 @@ static inline void input_queue_tail_incr_save(struct softnet_data *sd, DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); void __netif_schedule(struct Qdisc *q); - -static inline void netif_schedule_queue(struct netdev_queue *txq) -{ - if (!(txq->state & QUEUE_STATE_ANY_XOFF)) - __netif_schedule(txq->qdisc); -} +void netif_schedule_queue(struct netdev_queue *txq); static inline void netif_tx_schedule_all(struct net_device *dev) { @@ -2302,11 +2393,7 @@ static inline void netif_tx_start_all_queues(struct net_device *dev) } } -static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue) -{ - if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) - __netif_schedule(dev_queue->qdisc); -} +void netif_tx_wake_queue(struct netdev_queue *dev_queue); /** * netif_wake_queue - restart transmit @@ -2394,6 +2481,34 @@ netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue) return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN; } +/** + * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write + * @dev_queue: pointer to transmit queue + * + * BQL enabled drivers might use this helper in their ndo_start_xmit(), + * to give appropriate hint to the cpu. + */ +static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue) +{ +#ifdef CONFIG_BQL + prefetchw(&dev_queue->dql.num_queued); +#endif +} + +/** + * netdev_txq_bql_complete_prefetchw - prefetch bql data for write + * @dev_queue: pointer to transmit queue + * + * BQL enabled drivers might use this helper in their TX completion path, + * to give appropriate hint to the cpu. + */ +static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue) +{ +#ifdef CONFIG_BQL + prefetchw(&dev_queue->dql.limit); +#endif +} + static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue, unsigned int bytes) { @@ -2578,19 +2693,7 @@ static inline bool netif_subqueue_stopped(const struct net_device *dev, return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb)); } -/** - * netif_wake_subqueue - allow sending packets on subqueue - * @dev: network device - * @queue_index: sub queue index - * - * Resume individual transmit queue of a device with multiple transmit queues. - */ -static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) -{ - struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); - if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) - __netif_schedule(txq->qdisc); -} +void netif_wake_subqueue(struct net_device *dev, u16 queue_index); #ifdef CONFIG_XPS int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, @@ -2754,8 +2857,9 @@ int dev_set_mac_address(struct net_device *, struct sockaddr *); int dev_change_carrier(struct net_device *, bool new_carrier); int dev_get_phys_port_id(struct net_device *dev, struct netdev_phys_port_id *ppid); -int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, - struct netdev_queue *txq); +struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev); +struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, + struct netdev_queue *txq, int *ret); int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb); @@ -3176,7 +3280,7 @@ static inline int __dev_uc_sync(struct net_device *dev, } /** - * __dev_uc_unsync - Remove synchonized addresses from device + * __dev_uc_unsync - Remove synchronized addresses from device * @dev: device to sync * @unsync: function to call if address should be removed * @@ -3220,7 +3324,7 @@ static inline int __dev_mc_sync(struct net_device *dev, } /** - * __dev_mc_unsync - Remove synchonized addresses from device + * __dev_mc_unsync - Remove synchronized addresses from device * @dev: device to sync * @unsync: function to call if address should be removed * @@ -3357,6 +3461,27 @@ int __init dev_proc_init(void); #define dev_proc_init() 0 #endif +static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops, + struct sk_buff *skb, struct net_device *dev, + bool more) +{ + skb->xmit_more = more ? 1 : 0; + return ops->ndo_start_xmit(skb, dev); +} + +static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev, + struct netdev_queue *txq, bool more) +{ + const struct net_device_ops *ops = dev->netdev_ops; + int rc; + + rc = __netdev_start_xmit(ops, skb, dev, more); + if (rc == NETDEV_TX_OK) + txq_trans_update(txq); + + return rc; +} + int netdev_class_create_file_ns(struct class_attribute *class_attr, const void *ns); void netdev_class_remove_file_ns(struct class_attribute *class_attr, @@ -3494,6 +3619,12 @@ static inline bool netif_supports_nofcs(struct net_device *dev) return dev->priv_flags & IFF_SUPP_NOFCS; } +/* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */ +static inline void netif_keep_dst(struct net_device *dev) +{ + dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM); +} + extern struct pernet_operations __net_initdata loopback_net_ops; /* Logging, debugging and troubleshooting/diagnostic helpers. */ @@ -3523,22 +3654,22 @@ static inline const char *netdev_reg_state(const struct net_device *dev) } __printf(3, 4) -int netdev_printk(const char *level, const struct net_device *dev, - const char *format, ...); +void netdev_printk(const char *level, const struct net_device *dev, + const char *format, ...); __printf(2, 3) -int netdev_emerg(const struct net_device *dev, const char *format, ...); +void netdev_emerg(const struct net_device *dev, const char *format, ...); __printf(2, 3) -int netdev_alert(const struct net_device *dev, const char *format, ...); +void netdev_alert(const struct net_device *dev, const char *format, ...); __printf(2, 3) -int netdev_crit(const struct net_device *dev, const char *format, ...); +void netdev_crit(const struct net_device *dev, const char *format, ...); __printf(2, 3) -int netdev_err(const struct net_device *dev, const char *format, ...); +void netdev_err(const struct net_device *dev, const char *format, ...); __printf(2, 3) -int netdev_warn(const struct net_device *dev, const char *format, ...); +void netdev_warn(const struct net_device *dev, const char *format, ...); __printf(2, 3) -int netdev_notice(const struct net_device *dev, const char *format, ...); +void netdev_notice(const struct net_device *dev, const char *format, ...); __printf(2, 3) -int netdev_info(const struct net_device *dev, const char *format, ...); +void netdev_info(const struct net_device *dev, const char *format, ...); #define MODULE_ALIAS_NETDEV(device) \ MODULE_ALIAS("netdev-" device) @@ -3556,7 +3687,6 @@ do { \ ({ \ if (0) \ netdev_printk(KERN_DEBUG, __dev, format, ##args); \ - 0; \ }) #endif diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 2077489f9887..2517ece98820 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -9,6 +9,7 @@ #include <linux/in6.h> #include <linux/wait.h> #include <linux/list.h> +#include <linux/static_key.h> #include <uapi/linux/netfilter.h> #ifdef CONFIG_NETFILTER static inline int NF_DROP_GETERR(int verdict) @@ -99,9 +100,9 @@ void nf_unregister_sockopt(struct nf_sockopt_ops *reg); extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; -#if defined(CONFIG_JUMP_LABEL) -#include <linux/static_key.h> +#ifdef HAVE_JUMP_LABEL extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; + static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook) { if (__builtin_constant_p(pf) && diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index 96afc29184be..f1606fa6132d 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h @@ -57,6 +57,8 @@ enum ip_set_extension { IPSET_EXT_COUNTER = (1 << IPSET_EXT_BIT_COUNTER), IPSET_EXT_BIT_COMMENT = 2, IPSET_EXT_COMMENT = (1 << IPSET_EXT_BIT_COMMENT), + IPSET_EXT_BIT_SKBINFO = 3, + IPSET_EXT_SKBINFO = (1 << IPSET_EXT_BIT_SKBINFO), /* Mark set with an extension which needs to call destroy */ IPSET_EXT_BIT_DESTROY = 7, IPSET_EXT_DESTROY = (1 << IPSET_EXT_BIT_DESTROY), @@ -65,12 +67,14 @@ enum ip_set_extension { #define SET_WITH_TIMEOUT(s) ((s)->extensions & IPSET_EXT_TIMEOUT) #define SET_WITH_COUNTER(s) ((s)->extensions & IPSET_EXT_COUNTER) #define SET_WITH_COMMENT(s) ((s)->extensions & IPSET_EXT_COMMENT) +#define SET_WITH_SKBINFO(s) ((s)->extensions & IPSET_EXT_SKBINFO) #define SET_WITH_FORCEADD(s) ((s)->flags & IPSET_CREATE_FLAG_FORCEADD) /* Extension id, in size order */ enum ip_set_ext_id { IPSET_EXT_ID_COUNTER = 0, IPSET_EXT_ID_TIMEOUT, + IPSET_EXT_ID_SKBINFO, IPSET_EXT_ID_COMMENT, IPSET_EXT_ID_MAX, }; @@ -92,6 +96,10 @@ struct ip_set_ext { u64 packets; u64 bytes; u32 timeout; + u32 skbmark; + u32 skbmarkmask; + u32 skbprio; + u16 skbqueue; char *comment; }; @@ -104,6 +112,13 @@ struct ip_set_comment { char *str; }; +struct ip_set_skbinfo { + u32 skbmark; + u32 skbmarkmask; + u32 skbprio; + u16 skbqueue; +}; + struct ip_set; #define ext_timeout(e, s) \ @@ -112,7 +127,8 @@ struct ip_set; (struct ip_set_counter *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COUNTER]) #define ext_comment(e, s) \ (struct ip_set_comment *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COMMENT]) - +#define ext_skbinfo(e, s) \ +(struct ip_set_skbinfo *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_SKBINFO]) typedef int (*ipset_adtfn)(struct ip_set *set, void *value, const struct ip_set_ext *ext, @@ -256,6 +272,8 @@ ip_set_put_flags(struct sk_buff *skb, struct ip_set *set) cadt_flags |= IPSET_FLAG_WITH_COUNTERS; if (SET_WITH_COMMENT(set)) cadt_flags |= IPSET_FLAG_WITH_COMMENT; + if (SET_WITH_SKBINFO(set)) + cadt_flags |= IPSET_FLAG_WITH_SKBINFO; if (SET_WITH_FORCEADD(set)) cadt_flags |= IPSET_FLAG_WITH_FORCEADD; @@ -304,6 +322,43 @@ ip_set_update_counter(struct ip_set_counter *counter, } } +static inline void +ip_set_get_skbinfo(struct ip_set_skbinfo *skbinfo, + const struct ip_set_ext *ext, + struct ip_set_ext *mext, u32 flags) +{ + mext->skbmark = skbinfo->skbmark; + mext->skbmarkmask = skbinfo->skbmarkmask; + mext->skbprio = skbinfo->skbprio; + mext->skbqueue = skbinfo->skbqueue; +} +static inline bool +ip_set_put_skbinfo(struct sk_buff *skb, struct ip_set_skbinfo *skbinfo) +{ + /* Send nonzero parameters only */ + return ((skbinfo->skbmark || skbinfo->skbmarkmask) && + nla_put_net64(skb, IPSET_ATTR_SKBMARK, + cpu_to_be64((u64)skbinfo->skbmark << 32 | + skbinfo->skbmarkmask))) || + (skbinfo->skbprio && + nla_put_net32(skb, IPSET_ATTR_SKBPRIO, + cpu_to_be32(skbinfo->skbprio))) || + (skbinfo->skbqueue && + nla_put_net16(skb, IPSET_ATTR_SKBQUEUE, + cpu_to_be16(skbinfo->skbqueue))); + +} + +static inline void +ip_set_init_skbinfo(struct ip_set_skbinfo *skbinfo, + const struct ip_set_ext *ext) +{ + skbinfo->skbmark = ext->skbmark; + skbinfo->skbmarkmask = ext->skbmarkmask; + skbinfo->skbprio = ext->skbprio; + skbinfo->skbqueue = ext->skbqueue; +} + static inline bool ip_set_put_counter(struct sk_buff *skb, struct ip_set_counter *counter) { @@ -497,6 +552,9 @@ ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set, if (SET_WITH_COMMENT(set) && ip_set_put_comment(skb, ext_comment(e, set))) return -EMSGSIZE; + if (SET_WITH_SKBINFO(set) && + ip_set_put_skbinfo(skb, ext_skbinfo(e, set))) + return -EMSGSIZE; return 0; } diff --git a/include/linux/netfilter/ipset/ip_set_list.h b/include/linux/netfilter/ipset/ip_set_list.h index 68c2aea897f5..fe2622a00151 100644 --- a/include/linux/netfilter/ipset/ip_set_list.h +++ b/include/linux/netfilter/ipset/ip_set_list.h @@ -6,5 +6,6 @@ #define IP_SET_LIST_DEFAULT_SIZE 8 #define IP_SET_LIST_MIN_SIZE 4 +#define IP_SET_LIST_MAX_SIZE 65536 #endif /* __IP_SET_LIST_H */ diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h index 8ab1c278b66d..c755e4971fa3 100644 --- a/include/linux/netfilter_bridge.h +++ b/include/linux/netfilter_bridge.h @@ -15,7 +15,7 @@ enum nf_br_hook_priorities { NF_BR_PRI_LAST = INT_MAX, }; -#ifdef CONFIG_BRIDGE_NETFILTER +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) #define BRNF_PKT_TYPE 0x01 #define BRNF_BRIDGED_DNAT 0x02 @@ -24,16 +24,6 @@ enum nf_br_hook_priorities { #define BRNF_8021Q 0x10 #define BRNF_PPPoE 0x20 -/* Only used in br_forward.c */ -int nf_bridge_copy_header(struct sk_buff *skb); -static inline int nf_bridge_maybe_copy_header(struct sk_buff *skb) -{ - if (skb->nf_bridge && - skb->nf_bridge->mask & (BRNF_BRIDGED | BRNF_BRIDGED_DNAT)) - return nf_bridge_copy_header(skb); - return 0; -} - static inline unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb) { switch (skb->protocol) { @@ -46,6 +36,44 @@ static inline unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb) } } +static inline void nf_bridge_update_protocol(struct sk_buff *skb) +{ + if (skb->nf_bridge->mask & BRNF_8021Q) + skb->protocol = htons(ETH_P_8021Q); + else if (skb->nf_bridge->mask & BRNF_PPPoE) + skb->protocol = htons(ETH_P_PPP_SES); +} + +/* Fill in the header for fragmented IP packets handled by + * the IPv4 connection tracking code. + * + * Only used in br_forward.c + */ +static inline int nf_bridge_copy_header(struct sk_buff *skb) +{ + int err; + unsigned int header_size; + + nf_bridge_update_protocol(skb); + header_size = ETH_HLEN + nf_bridge_encap_header_len(skb); + err = skb_cow_head(skb, header_size); + if (err) + return err; + + skb_copy_to_linear_data_offset(skb, -header_size, + skb->nf_bridge->data, header_size); + __skb_push(skb, nf_bridge_encap_header_len(skb)); + return 0; +} + +static inline int nf_bridge_maybe_copy_header(struct sk_buff *skb) +{ + if (skb->nf_bridge && + skb->nf_bridge->mask & (BRNF_BRIDGED | BRNF_BRIDGED_DNAT)) + return nf_bridge_copy_header(skb); + return 0; +} + static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb) { if (unlikely(skb->nf_bridge->mask & BRNF_PPPoE)) diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index a1e3064a8d99..026b0c042c40 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -110,6 +110,20 @@ enum nfs_opnum4 { OP_DESTROY_CLIENTID = 57, OP_RECLAIM_COMPLETE = 58, + /* nfs42 */ + OP_ALLOCATE = 59, + OP_COPY = 60, + OP_COPY_NOTIFY = 61, + OP_DEALLOCATE = 62, + OP_IO_ADVISE = 63, + OP_LAYOUTERROR = 64, + OP_LAYOUTSTATS = 65, + OP_OFFLOAD_CANCEL = 66, + OP_OFFLOAD_STATUS = 67, + OP_READ_PLUS = 68, + OP_SEEK = 69, + OP_WRITE_SAME = 70, + OP_ILLEGAL = 10044, }; @@ -117,10 +131,10 @@ enum nfs_opnum4 { Needs to be updated if more operations are defined in future.*/ #define FIRST_NFS4_OP OP_ACCESS -#define LAST_NFS4_OP OP_RECLAIM_COMPLETE +#define LAST_NFS4_OP OP_WRITE_SAME #define LAST_NFS40_OP OP_RELEASE_LOCKOWNER #define LAST_NFS41_OP OP_RECLAIM_COMPLETE -#define LAST_NFS42_OP OP_RECLAIM_COMPLETE +#define LAST_NFS42_OP OP_WRITE_SAME enum nfsstat4 { NFS4_OK = 0, @@ -235,10 +249,11 @@ enum nfsstat4 { /* nfs42 */ NFS4ERR_PARTNER_NOTSUPP = 10088, NFS4ERR_PARTNER_NO_AUTH = 10089, - NFS4ERR_METADATA_NOTSUPP = 10090, + NFS4ERR_UNION_NOTSUPP = 10090, NFS4ERR_OFFLOAD_DENIED = 10091, NFS4ERR_WRONG_LFS = 10092, NFS4ERR_BADLABEL = 10093, + NFS4ERR_OFFLOAD_NO_REQS = 10094, }; static inline bool seqid_mutating_err(u32 err) @@ -535,4 +550,9 @@ struct nfs4_deviceid { char data[NFS4_DEVICEID4_SIZE]; }; +enum data_content4 { + NFS4_CONTENT_DATA = 0, + NFS4_CONTENT_HOLE = 1, +}; + #endif diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 5180a7ededec..28d649054d5f 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -443,22 +443,6 @@ static inline struct rpc_cred *nfs_file_cred(struct file *file) } /* - * linux/fs/nfs/xattr.c - */ -#ifdef CONFIG_NFS_V3_ACL -extern ssize_t nfs3_listxattr(struct dentry *, char *, size_t); -extern ssize_t nfs3_getxattr(struct dentry *, const char *, void *, size_t); -extern int nfs3_setxattr(struct dentry *, const char *, - const void *, size_t, int); -extern int nfs3_removexattr (struct dentry *, const char *name); -#else -# define nfs3_listxattr NULL -# define nfs3_getxattr NULL -# define nfs3_setxattr NULL -# define nfs3_removexattr NULL -#endif - -/* * linux/fs/nfs/direct.c */ extern ssize_t nfs_direct_IO(int, struct kiocb *, struct iov_iter *, loff_t); @@ -529,17 +513,9 @@ extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned extern int nfs_wb_all(struct inode *inode); extern int nfs_wb_page(struct inode *inode, struct page* page); extern int nfs_wb_page_cancel(struct inode *inode, struct page* page); -#if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4) extern int nfs_commit_inode(struct inode *, int); extern struct nfs_commit_data *nfs_commitdata_alloc(void); extern void nfs_commit_free(struct nfs_commit_data *data); -#else -static inline int -nfs_commit_inode(struct inode *inode, int how) -{ - return 0; -} -#endif static inline int nfs_have_writebacks(struct inode *inode) @@ -557,23 +533,6 @@ extern int nfs_readpage_async(struct nfs_open_context *, struct inode *, struct page *); /* - * linux/fs/nfs3proc.c - */ -#ifdef CONFIG_NFS_V3_ACL -extern struct posix_acl *nfs3_get_acl(struct inode *inode, int type); -extern int nfs3_set_acl(struct inode *inode, struct posix_acl *acl, int type); -extern int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl, - struct posix_acl *dfacl); -extern const struct xattr_handler *nfs3_xattr_handlers[]; -#else -static inline int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl, - struct posix_acl *dfacl) -{ - return 0; -} -#endif /* CONFIG_NFS_V3_ACL */ - -/* * inline functions */ diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 0040629894df..6951c7d9097d 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -252,17 +252,6 @@ struct nfs4_layoutget { gfp_t gfp_flags; }; -struct nfs4_getdevicelist_args { - struct nfs4_sequence_args seq_args; - const struct nfs_fh *fh; - u32 layoutclass; -}; - -struct nfs4_getdevicelist_res { - struct nfs4_sequence_res seq_res; - struct pnfs_devicelist *devlist; -}; - struct nfs4_getdeviceinfo_args { struct nfs4_sequence_args seq_args; struct pnfs_device *pdev; @@ -279,6 +268,9 @@ struct nfs4_layoutcommit_args { __u64 lastbytewritten; struct inode *inode; const u32 *bitmask; + size_t layoutupdate_len; + struct page *layoutupdate_page; + struct page **layoutupdate_pages; }; struct nfs4_layoutcommit_res { @@ -1328,6 +1320,7 @@ struct nfs_commit_data { struct pnfs_layout_segment *lseg; struct nfs_client *ds_clp; /* pNFS data server */ int ds_commit_index; + loff_t lwb; const struct rpc_call_ops *mds_ops; const struct nfs_commit_completion_ops *completion_ops; int (*commit_done_cb) (struct rpc_task *task, struct nfs_commit_data *data); @@ -1346,6 +1339,7 @@ struct nfs_unlinkdata { struct inode *dir; struct rpc_cred *cred; struct nfs_fattr dir_attr; + long timeout; }; struct nfs_renamedata { @@ -1359,6 +1353,7 @@ struct nfs_renamedata { struct dentry *new_dentry; struct nfs_fattr new_fattr; void (*complete)(struct rpc_task *, struct nfs_renamedata *); + long timeout; }; struct nfs_access_entry; diff --git a/include/linux/of.h b/include/linux/of.h index 6c4363b8ddc3..6545e7aec7bb 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -863,4 +863,7 @@ static inline int of_changeset_update_property(struct of_changeset *ocs, } #endif +/* CONFIG_OF_RESOLVE api */ +extern int of_resolve_phandles(struct device_node *tree); + #endif /* _LINUX_OF_H */ diff --git a/include/linux/of_address.h b/include/linux/of_address.h index fb7b7221e063..8cb14eb393d6 100644 --- a/include/linux/of_address.h +++ b/include/linux/of_address.h @@ -23,17 +23,6 @@ struct of_pci_range { #define for_each_of_pci_range(parser, range) \ for (; of_pci_range_parser_one(parser, range);) -static inline void of_pci_range_to_resource(struct of_pci_range *range, - struct device_node *np, - struct resource *res) -{ - res->flags = range->flags; - res->start = range->cpu_addr; - res->end = range->cpu_addr + range->size - 1; - res->parent = res->child = res->sibling = NULL; - res->name = np->full_name; -} - /* Translate a DMA address from device space to CPU space */ extern u64 of_translate_dma_address(struct device_node *dev, const __be32 *in_addr); @@ -55,7 +44,9 @@ extern void __iomem *of_iomap(struct device_node *device, int index); extern const __be32 *of_get_address(struct device_node *dev, int index, u64 *size, unsigned int *flags); +extern int pci_register_io_range(phys_addr_t addr, resource_size_t size); extern unsigned long pci_address_to_pio(phys_addr_t addr); +extern phys_addr_t pci_pio_to_address(unsigned long pio); extern int of_pci_range_parser_init(struct of_pci_range_parser *parser, struct device_node *node); @@ -80,6 +71,11 @@ static inline const __be32 *of_get_address(struct device_node *dev, int index, return NULL; } +static inline phys_addr_t pci_pio_to_address(unsigned long pio) +{ + return 0; +} + static inline int of_pci_range_parser_init(struct of_pci_range_parser *parser, struct device_node *node) { @@ -138,6 +134,9 @@ extern const __be32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size, unsigned int *flags); extern int of_pci_address_to_resource(struct device_node *dev, int bar, struct resource *r); +extern int of_pci_range_to_resource(struct of_pci_range *range, + struct device_node *np, + struct resource *res); #else /* CONFIG_OF_ADDRESS && CONFIG_PCI */ static inline int of_pci_address_to_resource(struct device_node *dev, int bar, struct resource *r) @@ -150,6 +149,12 @@ static inline const __be32 *of_get_pci_address(struct device_node *dev, { return NULL; } +static inline int of_pci_range_to_resource(struct of_pci_range *range, + struct device_node *np, + struct resource *res) +{ + return -ENOSYS; +} #endif /* CONFIG_OF_ADDRESS && CONFIG_PCI */ #endif /* __OF_ADDRESS_H */ diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h index dde3a4a0fa5d..1fd207e7a847 100644 --- a/include/linux/of_pci.h +++ b/include/linux/of_pci.h @@ -15,6 +15,7 @@ struct device_node *of_pci_find_child_device(struct device_node *parent, int of_pci_get_devfn(struct device_node *np); int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin); int of_pci_parse_bus_range(struct device_node *node, struct resource *res); +int of_get_pci_domain_nr(struct device_node *node); #else static inline int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq) { @@ -43,6 +44,18 @@ of_pci_parse_bus_range(struct device_node *node, struct resource *res) { return -EINVAL; } + +static inline int +of_get_pci_domain_nr(struct device_node *node) +{ + return -1; +} +#endif + +#if defined(CONFIG_OF_ADDRESS) +int of_pci_get_host_bridge_resources(struct device_node *dev, + unsigned char busno, unsigned char bus_max, + struct list_head *resources, resource_size_t *io_base); #endif #if defined(CONFIG_OF) && defined(CONFIG_PCI_MSI) diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h index 6f06f8bc612c..e5a70132a240 100644 --- a/include/linux/omap-dma.h +++ b/include/linux/omap-dma.h @@ -306,15 +306,12 @@ extern void omap_set_dma_transfer_params(int lch, int data_type, int elem_count, int frame_count, int sync_mode, int dma_trigger, int src_or_dst_synch); -extern void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode, - u32 color); extern void omap_set_dma_write_mode(int lch, enum omap_dma_write_mode mode); extern void omap_set_dma_channel_mode(int lch, enum omap_dma_channel_mode mode); extern void omap_set_dma_src_params(int lch, int src_port, int src_amode, unsigned long src_start, int src_ei, int src_fi); -extern void omap_set_dma_src_index(int lch, int eidx, int fidx); extern void omap_set_dma_src_data_pack(int lch, int enable); extern void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode); @@ -322,7 +319,6 @@ extern void omap_set_dma_src_burst_mode(int lch, extern void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode, unsigned long dest_start, int dst_ei, int dst_fi); -extern void omap_set_dma_dest_index(int lch, int eidx, int fidx); extern void omap_set_dma_dest_data_pack(int lch, int enable); extern void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode); @@ -331,52 +327,19 @@ extern void omap_set_dma_params(int lch, struct omap_dma_channel_params *params); extern void omap_dma_link_lch(int lch_head, int lch_queue); -extern void omap_dma_unlink_lch(int lch_head, int lch_queue); extern int omap_set_dma_callback(int lch, void (*callback)(int lch, u16 ch_status, void *data), void *data); extern dma_addr_t omap_get_dma_src_pos(int lch); extern dma_addr_t omap_get_dma_dst_pos(int lch); -extern void omap_clear_dma(int lch); extern int omap_get_dma_active_status(int lch); extern int omap_dma_running(void); extern void omap_dma_set_global_params(int arb_rate, int max_fifo_depth, int tparams); -extern int omap_dma_set_prio_lch(int lch, unsigned char read_prio, - unsigned char write_prio); -extern void omap_set_dma_dst_endian_type(int lch, enum end_type etype); -extern void omap_set_dma_src_endian_type(int lch, enum end_type etype); -extern int omap_get_dma_index(int lch, int *ei, int *fi); - void omap_dma_global_context_save(void); void omap_dma_global_context_restore(void); -extern void omap_dma_disable_irq(int lch); - -/* Chaining APIs */ -#ifndef CONFIG_ARCH_OMAP1 -extern int omap_request_dma_chain(int dev_id, const char *dev_name, - void (*callback) (int lch, u16 ch_status, - void *data), - int *chain_id, int no_of_chans, - int chain_mode, - struct omap_dma_channel_params params); -extern int omap_free_dma_chain(int chain_id); -extern int omap_dma_chain_a_transfer(int chain_id, int src_start, - int dest_start, int elem_count, - int frame_count, void *callbk_data); -extern int omap_start_dma_chain_transfers(int chain_id); -extern int omap_stop_dma_chain_transfers(int chain_id); -extern int omap_get_dma_chain_index(int chain_id, int *ei, int *fi); -extern int omap_get_dma_chain_dst_pos(int chain_id); -extern int omap_get_dma_chain_src_pos(int chain_id); - -extern int omap_modify_dma_chain_params(int chain_id, - struct omap_dma_channel_params params); -extern int omap_dma_chain_status(int chain_id); -#endif - #if defined(CONFIG_ARCH_OMAP1) && IS_ENABLED(CONFIG_FB_OMAP) #include <mach/lcd_dma.h> #else diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 3df8c7db7a4e..7ea069cd3257 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -24,8 +24,7 @@ enum mapping_flags { AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */ AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */ AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */ - AS_BALLOON_MAP = __GFP_BITS_SHIFT + 4, /* balloon page special map */ - AS_EXITING = __GFP_BITS_SHIFT + 5, /* final truncate in progress */ + AS_EXITING = __GFP_BITS_SHIFT + 4, /* final truncate in progress */ }; static inline void mapping_set_error(struct address_space *mapping, int error) @@ -55,21 +54,6 @@ static inline int mapping_unevictable(struct address_space *mapping) return !!mapping; } -static inline void mapping_set_balloon(struct address_space *mapping) -{ - set_bit(AS_BALLOON_MAP, &mapping->flags); -} - -static inline void mapping_clear_balloon(struct address_space *mapping) -{ - clear_bit(AS_BALLOON_MAP, &mapping->flags); -} - -static inline int mapping_balloon(struct address_space *mapping) -{ - return mapping && test_bit(AS_BALLOON_MAP, &mapping->flags); -} - static inline void mapping_set_exiting(struct address_space *mapping) { set_bit(AS_EXITING, &mapping->flags); @@ -96,7 +80,7 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) } /* - * The page cache can done in larger chunks than + * The page cache can be done in larger chunks than * one page, because it allows for more efficient * throughput (it can then be mapped into user * space in smaller chunks for same flexibility). @@ -496,12 +480,14 @@ static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm, } /* - * This is exported only for wait_on_page_locked/wait_on_page_writeback. - * Never use this directly! + * This is exported only for wait_on_page_locked/wait_on_page_writeback, + * and for filesystems which need to wait on PG_private. */ extern void wait_on_page_bit(struct page *page, int bit_nr); extern int wait_on_page_bit_killable(struct page *page, int bit_nr); +extern int wait_on_page_bit_killable_timeout(struct page *page, + int bit_nr, unsigned long timeout); static inline int wait_on_page_locked_killable(struct page *page) { @@ -510,6 +496,12 @@ static inline int wait_on_page_locked_killable(struct page *page) return 0; } +extern wait_queue_head_t *page_waitqueue(struct page *page); +static inline void wake_up_page(struct page *page, int bit) +{ + __wake_up_bit(page_waitqueue(page), &page->flags, bit); +} + /* * Wait for a page to be unlocked. * diff --git a/include/linux/pci.h b/include/linux/pci.h index 61978a460841..5be8db45e368 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -45,7 +45,7 @@ * In the interest of not exposing interfaces to user-space unnecessarily, * the following kernel-only defines are being added here. */ -#define PCI_DEVID(bus, devfn) ((((u16)bus) << 8) | devfn) +#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn)) /* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */ #define PCI_BUS_NUM(x) (((x) >> 8) & 0xff) @@ -303,6 +303,7 @@ struct pci_dev { D3cold, not set for devices powered on/off by the corresponding bridge */ + unsigned int ignore_hotplug:1; /* Ignore hotplug events */ unsigned int d3_delay; /* D3->D0 transition time in ms */ unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */ @@ -456,6 +457,9 @@ struct pci_bus { unsigned char primary; /* number of primary bridge */ unsigned char max_bus_speed; /* enum pci_bus_speed */ unsigned char cur_bus_speed; /* enum pci_bus_speed */ +#ifdef CONFIG_PCI_DOMAINS_GENERIC + int domain_nr; +#endif char name[48]; @@ -1021,6 +1025,11 @@ bool pci_dev_run_wake(struct pci_dev *dev); bool pci_check_pme_status(struct pci_dev *dev); void pci_pme_wakeup_bus(struct pci_bus *bus); +static inline void pci_ignore_hotplug(struct pci_dev *dev) +{ + dev->ignore_hotplug = 1; +} + static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable) { @@ -1097,6 +1106,9 @@ int __must_check pci_bus_alloc_resource(struct pci_bus *bus, resource_size_t), void *alignf_data); + +int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr); + static inline dma_addr_t pci_bus_address(struct pci_dev *pdev, int bar) { struct pci_bus_region region; @@ -1282,12 +1294,32 @@ void pci_cfg_access_unlock(struct pci_dev *dev); */ #ifdef CONFIG_PCI_DOMAINS extern int pci_domains_supported; +int pci_get_new_domain_nr(void); #else enum { pci_domains_supported = 0 }; static inline int pci_domain_nr(struct pci_bus *bus) { return 0; } static inline int pci_proc_domain(struct pci_bus *bus) { return 0; } +static inline int pci_get_new_domain_nr(void) { return -ENOSYS; } #endif /* CONFIG_PCI_DOMAINS */ +/* + * Generic implementation for PCI domain support. If your + * architecture does not need custom management of PCI + * domains then this implementation will be used + */ +#ifdef CONFIG_PCI_DOMAINS_GENERIC +static inline int pci_domain_nr(struct pci_bus *bus) +{ + return bus->domain_nr; +} +void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent); +#else +static inline void pci_bus_assign_domain_nr(struct pci_bus *bus, + struct device *parent) +{ +} +#endif + /* some architectures require additional setup to direct VGA traffic */ typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode, unsigned int command_bits, u32 flags); @@ -1396,6 +1428,7 @@ static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus, static inline int pci_domain_nr(struct pci_bus *bus) { return 0; } static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; } +static inline int pci_get_new_domain_nr(void) { return -ENOSYS; } #define dev_is_pci(d) (false) #define dev_is_pf(d) (false) @@ -1557,16 +1590,11 @@ enum pci_fixup_pass { #ifdef CONFIG_PCI_QUIRKS void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev); -struct pci_dev *pci_get_dma_source(struct pci_dev *dev); int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags); void pci_dev_specific_enable_acs(struct pci_dev *dev); #else static inline void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) { } -static inline struct pci_dev *pci_get_dma_source(struct pci_dev *dev) -{ - return pci_dev_get(dev); -} static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags) { @@ -1701,7 +1729,7 @@ bool pci_acs_path_enabled(struct pci_dev *start, struct pci_dev *end, u16 acs_flags); #define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */ -#define PCI_VPD_LRDT_ID(x) (x | PCI_VPD_LRDT) +#define PCI_VPD_LRDT_ID(x) ((x) | PCI_VPD_LRDT) /* Large Resource Data Type Tag Item Names */ #define PCI_VPD_LTIN_ID_STRING 0x02 /* Identifier String */ @@ -1828,15 +1856,17 @@ int pci_for_each_dma_alias(struct pci_dev *pdev, int (*fn)(struct pci_dev *pdev, u16 alias, void *data), void *data); -/** - * pci_find_upstream_pcie_bridge - find upstream PCIe-to-PCI bridge of a device - * @pdev: the PCI device - * - * if the device is PCIE, return NULL - * if the device isn't connected to a PCIe bridge (that is its parent is a - * legacy PCI bridge and the bridge is directly connected to bus 0), return its - * parent - */ -struct pci_dev *pci_find_upstream_pcie_bridge(struct pci_dev *pdev); - +/* helper functions for operation of device flag */ +static inline void pci_set_dev_assigned(struct pci_dev *pdev) +{ + pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED; +} +static inline void pci_clear_dev_assigned(struct pci_dev *pdev) +{ + pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED; +} +static inline bool pci_is_dev_assigned(struct pci_dev *pdev) +{ + return (pdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) == PCI_DEV_FLAGS_ASSIGNED; +} #endif /* LINUX_PCI_H */ diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h index 5f2e559af6b0..2706ee9a4327 100644 --- a/include/linux/pci_hotplug.h +++ b/include/linux/pci_hotplug.h @@ -187,6 +187,4 @@ static inline int pci_get_hp_params(struct pci_dev *dev, return -ENODEV; } #endif - -void pci_configure_slot(struct pci_dev *dev); #endif diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 6ed0bb73a864..0aa51b451597 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2245,6 +2245,8 @@ #define PCI_VENDOR_ID_MORETON 0x15aa #define PCI_DEVICE_ID_RASTEL_2PORT 0x2000 +#define PCI_VENDOR_ID_VMWARE 0x15ad + #define PCI_VENDOR_ID_ZOLTRIX 0x15b0 #define PCI_DEVICE_ID_ZOLTRIX_2BD0 0x2bd0 @@ -2536,6 +2538,7 @@ #define PCI_DEVICE_ID_INTEL_EESSC 0x0008 #define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100 #define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154 +#define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150 #define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00 #define PCI_DEVICE_ID_INTEL_PXHD_0 0x0320 #define PCI_DEVICE_ID_INTEL_PXHD_1 0x0321 @@ -2818,7 +2821,22 @@ #define PCI_DEVICE_ID_INTEL_UNC_R2PCIE 0x3c43 #define PCI_DEVICE_ID_INTEL_UNC_R3QPI0 0x3c44 #define PCI_DEVICE_ID_INTEL_UNC_R3QPI1 0x3c45 +#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS 0x3c71 /* 15.1 */ +#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR0 0x3c72 /* 16.2 */ +#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR1 0x3c73 /* 16.3 */ +#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR2 0x3c76 /* 16.6 */ +#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR3 0x3c77 /* 16.7 */ +#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0 0x3ca0 /* 14.0 */ +#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA 0x3ca8 /* 15.0 */ +#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0 0x3caa /* 15.2 */ +#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1 0x3cab /* 15.3 */ +#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2 0x3cac /* 15.4 */ +#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3 0x3cad /* 15.5 */ +#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO 0x3cb8 /* 17.0 */ #define PCI_DEVICE_ID_INTEL_JAKETOWN_UBOX 0x3ce0 +#define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0 0x3cf4 /* 12.6 */ +#define PCI_DEVICE_ID_INTEL_SBRIDGE_BR 0x3cf5 /* 13.6 */ +#define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1 0x3cf6 /* 12.7 */ #define PCI_DEVICE_ID_INTEL_IOAT_SNB 0x402f #define PCI_DEVICE_ID_INTEL_5100_16 0x65f0 #define PCI_DEVICE_ID_INTEL_5100_19 0x65f3 diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 3dfbf237cd8f..d5c89e0dd0e6 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -13,7 +13,7 @@ * * The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less * than an atomic_t - this is because of the way shutdown works, see - * percpu_ref_kill()/PCPU_COUNT_BIAS. + * percpu_ref_kill()/PERCPU_COUNT_BIAS. * * Before you call percpu_ref_kill(), percpu_ref_put() does not check for the * refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill() @@ -29,7 +29,7 @@ * calls io_destroy() or the process exits. * * In the aio code, kill_ioctx() is called when we wish to destroy a kioctx; it - * calls percpu_ref_kill(), then hlist_del_rcu() and sychronize_rcu() to remove + * calls percpu_ref_kill(), then hlist_del_rcu() and synchronize_rcu() to remove * the kioctx from the proccess's list of kioctxs - after that, there can't be * any new users of the kioctx (from lookup_ioctx()) and it's then safe to drop * the initial ref with percpu_ref_put(). @@ -49,28 +49,60 @@ #include <linux/kernel.h> #include <linux/percpu.h> #include <linux/rcupdate.h> +#include <linux/gfp.h> struct percpu_ref; typedef void (percpu_ref_func_t)(struct percpu_ref *); +/* flags set in the lower bits of percpu_ref->percpu_count_ptr */ +enum { + __PERCPU_REF_ATOMIC = 1LU << 0, /* operating in atomic mode */ + __PERCPU_REF_DEAD = 1LU << 1, /* (being) killed */ + __PERCPU_REF_ATOMIC_DEAD = __PERCPU_REF_ATOMIC | __PERCPU_REF_DEAD, + + __PERCPU_REF_FLAG_BITS = 2, +}; + +/* @flags for percpu_ref_init() */ +enum { + /* + * Start w/ ref == 1 in atomic mode. Can be switched to percpu + * operation using percpu_ref_switch_to_percpu(). If initialized + * with this flag, the ref will stay in atomic mode until + * percpu_ref_switch_to_percpu() is invoked on it. + */ + PERCPU_REF_INIT_ATOMIC = 1 << 0, + + /* + * Start dead w/ ref == 0 in atomic mode. Must be revived with + * percpu_ref_reinit() before used. Implies INIT_ATOMIC. + */ + PERCPU_REF_INIT_DEAD = 1 << 1, +}; + struct percpu_ref { - atomic_t count; + atomic_long_t count; /* * The low bit of the pointer indicates whether the ref is in percpu * mode; if set, then get/put will manipulate the atomic_t. */ - unsigned long pcpu_count_ptr; + unsigned long percpu_count_ptr; percpu_ref_func_t *release; - percpu_ref_func_t *confirm_kill; + percpu_ref_func_t *confirm_switch; + bool force_atomic:1; struct rcu_head rcu; }; int __must_check percpu_ref_init(struct percpu_ref *ref, - percpu_ref_func_t *release); -void percpu_ref_reinit(struct percpu_ref *ref); + percpu_ref_func_t *release, unsigned int flags, + gfp_t gfp); void percpu_ref_exit(struct percpu_ref *ref); +void percpu_ref_switch_to_atomic(struct percpu_ref *ref, + percpu_ref_func_t *confirm_switch); +void percpu_ref_switch_to_percpu(struct percpu_ref *ref); void percpu_ref_kill_and_confirm(struct percpu_ref *ref, percpu_ref_func_t *confirm_kill); +void percpu_ref_reinit(struct percpu_ref *ref); /** * percpu_ref_kill - drop the initial ref @@ -87,26 +119,24 @@ static inline void percpu_ref_kill(struct percpu_ref *ref) return percpu_ref_kill_and_confirm(ref, NULL); } -#define PCPU_REF_DEAD 1 - /* * Internal helper. Don't use outside percpu-refcount proper. The * function doesn't return the pointer and let the caller test it for NULL * because doing so forces the compiler to generate two conditional - * branches as it can't assume that @ref->pcpu_count is not NULL. + * branches as it can't assume that @ref->percpu_count is not NULL. */ -static inline bool __pcpu_ref_alive(struct percpu_ref *ref, - unsigned __percpu **pcpu_countp) +static inline bool __ref_is_percpu(struct percpu_ref *ref, + unsigned long __percpu **percpu_countp) { - unsigned long pcpu_ptr = ACCESS_ONCE(ref->pcpu_count_ptr); + unsigned long percpu_ptr = ACCESS_ONCE(ref->percpu_count_ptr); /* paired with smp_store_release() in percpu_ref_reinit() */ smp_read_barrier_depends(); - if (unlikely(pcpu_ptr & PCPU_REF_DEAD)) + if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC)) return false; - *pcpu_countp = (unsigned __percpu *)pcpu_ptr; + *percpu_countp = (unsigned long __percpu *)percpu_ptr; return true; } @@ -114,18 +144,20 @@ static inline bool __pcpu_ref_alive(struct percpu_ref *ref, * percpu_ref_get - increment a percpu refcount * @ref: percpu_ref to get * - * Analagous to atomic_inc(). - */ + * Analagous to atomic_long_inc(). + * + * This function is safe to call as long as @ref is between init and exit. + */ static inline void percpu_ref_get(struct percpu_ref *ref) { - unsigned __percpu *pcpu_count; + unsigned long __percpu *percpu_count; rcu_read_lock_sched(); - if (__pcpu_ref_alive(ref, &pcpu_count)) - this_cpu_inc(*pcpu_count); + if (__ref_is_percpu(ref, &percpu_count)) + this_cpu_inc(*percpu_count); else - atomic_inc(&ref->count); + atomic_long_inc(&ref->count); rcu_read_unlock_sched(); } @@ -137,20 +169,20 @@ static inline void percpu_ref_get(struct percpu_ref *ref) * Increment a percpu refcount unless its count already reached zero. * Returns %true on success; %false on failure. * - * The caller is responsible for ensuring that @ref stays accessible. + * This function is safe to call as long as @ref is between init and exit. */ static inline bool percpu_ref_tryget(struct percpu_ref *ref) { - unsigned __percpu *pcpu_count; - int ret = false; + unsigned long __percpu *percpu_count; + int ret; rcu_read_lock_sched(); - if (__pcpu_ref_alive(ref, &pcpu_count)) { - this_cpu_inc(*pcpu_count); + if (__ref_is_percpu(ref, &percpu_count)) { + this_cpu_inc(*percpu_count); ret = true; } else { - ret = atomic_inc_not_zero(&ref->count); + ret = atomic_long_inc_not_zero(&ref->count); } rcu_read_unlock_sched(); @@ -165,23 +197,26 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref) * Increment a percpu refcount unless it has already been killed. Returns * %true on success; %false on failure. * - * Completion of percpu_ref_kill() in itself doesn't guarantee that tryget - * will fail. For such guarantee, percpu_ref_kill_and_confirm() should be - * used. After the confirm_kill callback is invoked, it's guaranteed that - * no new reference will be given out by percpu_ref_tryget(). + * Completion of percpu_ref_kill() in itself doesn't guarantee that this + * function will fail. For such guarantee, percpu_ref_kill_and_confirm() + * should be used. After the confirm_kill callback is invoked, it's + * guaranteed that no new reference will be given out by + * percpu_ref_tryget_live(). * - * The caller is responsible for ensuring that @ref stays accessible. + * This function is safe to call as long as @ref is between init and exit. */ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) { - unsigned __percpu *pcpu_count; + unsigned long __percpu *percpu_count; int ret = false; rcu_read_lock_sched(); - if (__pcpu_ref_alive(ref, &pcpu_count)) { - this_cpu_inc(*pcpu_count); + if (__ref_is_percpu(ref, &percpu_count)) { + this_cpu_inc(*percpu_count); ret = true; + } else if (!(ACCESS_ONCE(ref->percpu_count_ptr) & __PERCPU_REF_DEAD)) { + ret = atomic_long_inc_not_zero(&ref->count); } rcu_read_unlock_sched(); @@ -195,16 +230,18 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) * * Decrement the refcount, and if 0, call the release function (which was passed * to percpu_ref_init()) + * + * This function is safe to call as long as @ref is between init and exit. */ static inline void percpu_ref_put(struct percpu_ref *ref) { - unsigned __percpu *pcpu_count; + unsigned long __percpu *percpu_count; rcu_read_lock_sched(); - if (__pcpu_ref_alive(ref, &pcpu_count)) - this_cpu_dec(*pcpu_count); - else if (unlikely(atomic_dec_and_test(&ref->count))) + if (__ref_is_percpu(ref, &percpu_count)) + this_cpu_dec(*percpu_count); + else if (unlikely(atomic_long_dec_and_test(&ref->count))) ref->release(ref); rcu_read_unlock_sched(); @@ -215,14 +252,16 @@ static inline void percpu_ref_put(struct percpu_ref *ref) * @ref: percpu_ref to test * * Returns %true if @ref reached zero. + * + * This function is safe to call as long as @ref is between init and exit. */ static inline bool percpu_ref_is_zero(struct percpu_ref *ref) { - unsigned __percpu *pcpu_count; + unsigned long __percpu *percpu_count; - if (__pcpu_ref_alive(ref, &pcpu_count)) + if (__ref_is_percpu(ref, &percpu_count)) return false; - return !atomic_read(&ref->count); + return !atomic_long_read(&ref->count); } #endif diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 6f61b61b7996..a3aa63e47637 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -48,9 +48,9 @@ * intelligent way to determine this would be nice. */ #if BITS_PER_LONG > 32 -#define PERCPU_DYNAMIC_RESERVE (20 << 10) +#define PERCPU_DYNAMIC_RESERVE (28 << 10) #else -#define PERCPU_DYNAMIC_RESERVE (12 << 10) +#define PERCPU_DYNAMIC_RESERVE (20 << 10) #endif extern void *pcpu_base_addr; @@ -122,11 +122,16 @@ extern void __init setup_per_cpu_areas(void); #endif extern void __init percpu_init_late(void); +extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp); extern void __percpu *__alloc_percpu(size_t size, size_t align); extern void free_percpu(void __percpu *__pdata); extern phys_addr_t per_cpu_ptr_to_phys(void *addr); -#define alloc_percpu(type) \ - (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type)) +#define alloc_percpu_gfp(type, gfp) \ + (typeof(type) __percpu *)__alloc_percpu_gfp(sizeof(type), \ + __alignof__(type), gfp) +#define alloc_percpu(type) \ + (typeof(type) __percpu *)__alloc_percpu(sizeof(type), \ + __alignof__(type)) #endif /* __LINUX_PERCPU_H */ diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h index d5dd4657c8d6..50e50095c8d1 100644 --- a/include/linux/percpu_counter.h +++ b/include/linux/percpu_counter.h @@ -12,6 +12,7 @@ #include <linux/threads.h> #include <linux/percpu.h> #include <linux/types.h> +#include <linux/gfp.h> #ifdef CONFIG_SMP @@ -26,14 +27,14 @@ struct percpu_counter { extern int percpu_counter_batch; -int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, +int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp, struct lock_class_key *key); -#define percpu_counter_init(fbc, value) \ +#define percpu_counter_init(fbc, value, gfp) \ ({ \ static struct lock_class_key __key; \ \ - __percpu_counter_init(fbc, value, &__key); \ + __percpu_counter_init(fbc, value, gfp, &__key); \ }) void percpu_counter_destroy(struct percpu_counter *fbc); @@ -89,7 +90,8 @@ struct percpu_counter { s64 count; }; -static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount) +static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount, + gfp_t gfp) { fbc->count = amount; return 0; diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 707617a8c0f6..893a0d07986f 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -52,6 +52,7 @@ struct perf_guest_info_callbacks { #include <linux/atomic.h> #include <linux/sysfs.h> #include <linux/perf_regs.h> +#include <linux/workqueue.h> #include <asm/local.h> struct perf_callchain_entry { @@ -268,6 +269,7 @@ struct pmu { * enum perf_event_active_state - the states of a event */ enum perf_event_active_state { + PERF_EVENT_STATE_EXIT = -3, PERF_EVENT_STATE_ERROR = -2, PERF_EVENT_STATE_OFF = -1, PERF_EVENT_STATE_INACTIVE = 0, @@ -507,6 +509,9 @@ struct perf_event_context { int nr_cgroups; /* cgroup evts */ int nr_branch_stack; /* branch_stack evt */ struct rcu_head rcu_head; + + struct delayed_work orphans_remove; + bool orphans_remove_sched; }; /* @@ -604,6 +609,13 @@ struct perf_sample_data { u64 txn; }; +/* default value for data source */ +#define PERF_MEM_NA (PERF_MEM_S(OP, NA) |\ + PERF_MEM_S(LVL, NA) |\ + PERF_MEM_S(SNOOP, NA) |\ + PERF_MEM_S(LOCK, NA) |\ + PERF_MEM_S(TLB, NA)) + static inline void perf_sample_data_init(struct perf_sample_data *data, u64 addr, u64 period) { @@ -616,7 +628,7 @@ static inline void perf_sample_data_init(struct perf_sample_data *data, data->regs_user.regs = NULL; data->stack_user_size = 0; data->weight = 0; - data->data_src.val = 0; + data->data_src.val = PERF_MEM_NA; data->txn = 0; } diff --git a/include/linux/phonedev.h b/include/linux/phonedev.h deleted file mode 100644 index 4269de99e320..000000000000 --- a/include/linux/phonedev.h +++ /dev/null @@ -1,25 +0,0 @@ -#ifndef __LINUX_PHONEDEV_H -#define __LINUX_PHONEDEV_H - -#include <linux/types.h> - -#ifdef __KERNEL__ - -#include <linux/poll.h> - -struct phone_device { - struct phone_device *next; - const struct file_operations *f_op; - int (*open) (struct phone_device *, struct file *); - int board; /* Device private index */ - int minor; -}; - -extern int phonedev_init(void); -#define PHONE_MAJOR 100 -extern int phone_register_device(struct phone_device *, int unit); -#define PHONE_UNIT_ANY -1 -extern void phone_unregister_device(struct phone_device *); - -#endif -#endif diff --git a/include/linux/phy.h b/include/linux/phy.h index ed39956b5613..d090cfcaa167 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -598,6 +598,19 @@ static inline int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum) } /** + * phy_read_mmd_indirect - reads data from the MMD registers + * @phydev: The PHY device bus + * @prtad: MMD Address + * @devad: MMD DEVAD + * @addr: PHY address on the MII bus + * + * Description: it reads data from the MMD registers (clause 22 to access to + * clause 45) of the specified phy address. + */ +int phy_read_mmd_indirect(struct phy_device *phydev, int prtad, + int devad, int addr); + +/** * phy_read - Convenience function for reading a given PHY register * @phydev: the phy_device struct * @regnum: register number to read @@ -668,6 +681,20 @@ static inline int phy_write_mmd(struct phy_device *phydev, int devad, return mdiobus_write(phydev->bus, phydev->addr, regnum, val); } +/** + * phy_write_mmd_indirect - writes data to the MMD registers + * @phydev: The PHY device + * @prtad: MMD Address + * @devad: MMD DEVAD + * @addr: PHY address on the MII bus + * @data: data to write in the MMD register + * + * Description: Write data from the MMD registers of the specified + * phy address. + */ +void phy_write_mmd_indirect(struct phy_device *phydev, int prtad, + int devad, int addr, u32 data); + struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, bool is_c45, struct phy_c45_device_ids *c45_ids); diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h index ae612acebb53..f2ca1b459377 100644 --- a/include/linux/phy_fixed.h +++ b/include/linux/phy_fixed.h @@ -14,34 +14,35 @@ struct device_node; #ifdef CONFIG_FIXED_PHY extern int fixed_phy_add(unsigned int irq, int phy_id, struct fixed_phy_status *status); -extern int fixed_phy_register(unsigned int irq, - struct fixed_phy_status *status, - struct device_node *np); +extern struct phy_device *fixed_phy_register(unsigned int irq, + struct fixed_phy_status *status, + struct device_node *np); extern void fixed_phy_del(int phy_addr); +extern int fixed_phy_set_link_update(struct phy_device *phydev, + int (*link_update)(struct net_device *, + struct fixed_phy_status *)); #else static inline int fixed_phy_add(unsigned int irq, int phy_id, struct fixed_phy_status *status) { return -ENODEV; } -static inline int fixed_phy_register(unsigned int irq, - struct fixed_phy_status *status, - struct device_node *np) +static inline struct phy_device *fixed_phy_register(unsigned int irq, + struct fixed_phy_status *status, + struct device_node *np) { - return -ENODEV; + return ERR_PTR(-ENODEV); } static inline int fixed_phy_del(int phy_addr) { return -ENODEV; } -#endif /* CONFIG_FIXED_PHY */ - -/* - * This function issued only by fixed_phy-aware drivers, no need - * protect it with #ifdef - */ -extern int fixed_phy_set_link_update(struct phy_device *phydev, +static inline int fixed_phy_set_link_update(struct phy_device *phydev, int (*link_update)(struct net_device *, - struct fixed_phy_status *)); + struct fixed_phy_status *)) +{ + return -ENODEV; +} +#endif /* CONFIG_FIXED_PHY */ #endif /* __PHY_FIXED_H */ diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h index a15f10727eb8..d578a60eff23 100644 --- a/include/linux/pinctrl/pinconf-generic.h +++ b/include/linux/pinctrl/pinconf-generic.h @@ -57,7 +57,7 @@ * which are then pulled up with an external resistor. Setting this * config will enable open drain mode, the argument is ignored. * @PIN_CONFIG_DRIVE_OPEN_SOURCE: the pin will be driven with open source - * (open emitter). Setting this config will enable open drain mode, the + * (open emitter). Setting this config will enable open source mode, the * argument is ignored. * @PIN_CONFIG_DRIVE_STRENGTH: the pin will sink or source at most the current * passed as argument. The argument is in mA. diff --git a/include/linux/pinctrl/pinmux.h b/include/linux/pinctrl/pinmux.h index 3097aafbeb24..511bda9ed4bf 100644 --- a/include/linux/pinctrl/pinmux.h +++ b/include/linux/pinctrl/pinmux.h @@ -39,13 +39,12 @@ struct pinctrl_dev; * name can be used with the generic @pinctrl_ops to retrieve the * actual pins affected. The applicable groups will be returned in * @groups and the number of groups in @num_groups - * @enable: enable a certain muxing function with a certain pin group. The + * @set_mux: enable a certain muxing function with a certain pin group. The * driver does not need to figure out whether enabling this function * conflicts some other use of the pins in that group, such collisions * are handled by the pinmux subsystem. The @func_selector selects a * certain function whereas @group_selector selects a certain set of pins * to be used. On simple controllers the latter argument may be ignored - * @disable: disable a certain muxing selector with a certain pin group * @gpio_request_enable: requests and enables GPIO on a certain pin. * Implement this only if you can mux every pin individually as GPIO. The * affected GPIO range is passed along with an offset(pin number) into that @@ -68,8 +67,8 @@ struct pinmux_ops { unsigned selector, const char * const **groups, unsigned * const num_groups); - int (*enable) (struct pinctrl_dev *pctldev, unsigned func_selector, - unsigned group_selector); + int (*set_mux) (struct pinctrl_dev *pctldev, unsigned func_selector, + unsigned group_selector); int (*gpio_request_enable) (struct pinctrl_dev *pctldev, struct pinctrl_gpio_range *range, unsigned offset); diff --git a/include/linux/platform_data/drv260x-pdata.h b/include/linux/platform_data/drv260x-pdata.h new file mode 100644 index 000000000000..0a03b0944411 --- /dev/null +++ b/include/linux/platform_data/drv260x-pdata.h @@ -0,0 +1,28 @@ +/* + * Platform data for DRV260X haptics driver family + * + * Author: Dan Murphy <dmurphy@ti.com> + * + * Copyright: (C) 2014 Texas Instruments, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#ifndef _LINUX_DRV260X_PDATA_H +#define _LINUX_DRV260X_PDATA_H + +struct drv260x_platform_data { + u32 library_selection; + u32 mode; + u32 vib_rated_voltage; + u32 vib_overdrive_voltage; +}; + +#endif diff --git a/include/linux/platform_data/gpio-dwapb.h b/include/linux/platform_data/gpio-dwapb.h new file mode 100644 index 000000000000..28702c849af1 --- /dev/null +++ b/include/linux/platform_data/gpio-dwapb.h @@ -0,0 +1,32 @@ +/* + * Copyright(c) 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef GPIO_DW_APB_H +#define GPIO_DW_APB_H + +struct dwapb_port_property { + struct device_node *node; + const char *name; + unsigned int idx; + unsigned int ngpio; + unsigned int gpio_base; + unsigned int irq; + bool irq_shared; +}; + +struct dwapb_platform_data { + struct dwapb_port_property *properties; + unsigned int nports; +}; + +#endif diff --git a/include/linux/platform_data/isl9305.h b/include/linux/platform_data/isl9305.h new file mode 100644 index 000000000000..1419133fa69e --- /dev/null +++ b/include/linux/platform_data/isl9305.h @@ -0,0 +1,30 @@ +/* + * isl9305 - Intersil ISL9305 DCDC regulator + * + * Copyright 2014 Linaro Ltd + * + * Author: Mark Brown <broonie@kernel.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __ISL9305_H +#define __ISL9305_H + +#define ISL9305_DCD1 0 +#define ISL9305_DCD2 1 +#define ISL9305_LDO1 2 +#define ISL9305_LDO2 3 + +#define ISL9305_MAX_REGULATOR ISL9305_LDO2 + +struct regulator_init_data; + +struct isl9305_pdata { + struct regulator_init_data *init_data[ISL9305_MAX_REGULATOR]; +}; + +#endif diff --git a/include/linux/platform_data/samsung-usbphy.h b/include/linux/platform_data/samsung-usbphy.h deleted file mode 100644 index 1bd24cba982b..000000000000 --- a/include/linux/platform_data/samsung-usbphy.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright (C) 2012 Samsung Electronics Co.Ltd - * http://www.samsung.com/ - * Author: Praveen Paneri <p.paneri@samsung.com> - * - * Defines platform data for samsung usb phy driver. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#ifndef __SAMSUNG_USBPHY_PLATFORM_H -#define __SAMSUNG_USBPHY_PLATFORM_H - -/** - * samsung_usbphy_data - Platform data for USB PHY driver. - * @pmu_isolation: Function to control usb phy isolation in PMU. - */ -struct samsung_usbphy_data { - void (*pmu_isolation)(int on); -}; - -extern void samsung_usbphy_set_pdata(struct samsung_usbphy_data *pd); - -#endif /* __SAMSUNG_USBPHY_PLATFORM_H */ diff --git a/include/linux/platform_data/tegra_emc.h b/include/linux/platform_data/tegra_emc.h deleted file mode 100644 index df67505e98f8..000000000000 --- a/include/linux/platform_data/tegra_emc.h +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright (C) 2011 Google, Inc. - * - * Author: - * Colin Cross <ccross@android.com> - * Olof Johansson <olof@lixom.net> - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#ifndef __TEGRA_EMC_H_ -#define __TEGRA_EMC_H_ - -#define TEGRA_EMC_NUM_REGS 46 - -struct tegra_emc_table { - unsigned long rate; - u32 regs[TEGRA_EMC_NUM_REGS]; -}; - -struct tegra_emc_pdata { - int num_tables; - struct tegra_emc_table *tables; -}; - -#endif diff --git a/include/linux/pm.h b/include/linux/pm.h index 72c0fe098a27..383fd68aaee1 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -619,6 +619,7 @@ extern int dev_pm_put_subsys_data(struct device *dev); */ struct dev_pm_domain { struct dev_pm_ops ops; + void (*detach)(struct device *dev, bool power_off); }; /* @@ -679,12 +680,16 @@ struct dev_pm_domain { extern void device_pm_lock(void); extern void dpm_resume_start(pm_message_t state); extern void dpm_resume_end(pm_message_t state); +extern void dpm_resume_noirq(pm_message_t state); +extern void dpm_resume_early(pm_message_t state); extern void dpm_resume(pm_message_t state); extern void dpm_complete(pm_message_t state); extern void device_pm_unlock(void); extern int dpm_suspend_end(pm_message_t state); extern int dpm_suspend_start(pm_message_t state); +extern int dpm_suspend_noirq(pm_message_t state); +extern int dpm_suspend_late(pm_message_t state); extern int dpm_suspend(pm_message_t state); extern int dpm_prepare(pm_message_t state); diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index ebc4c76ffb73..73e938b7e937 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -35,18 +35,10 @@ struct gpd_dev_ops { int (*stop)(struct device *dev); int (*save_state)(struct device *dev); int (*restore_state)(struct device *dev); - int (*suspend)(struct device *dev); - int (*suspend_late)(struct device *dev); - int (*resume_early)(struct device *dev); - int (*resume)(struct device *dev); - int (*freeze)(struct device *dev); - int (*freeze_late)(struct device *dev); - int (*thaw_early)(struct device *dev); - int (*thaw)(struct device *dev); bool (*active_wakeup)(struct device *dev); }; -struct gpd_cpu_data { +struct gpd_cpuidle_data { unsigned int saved_exit_latency; struct cpuidle_state *idle_state; }; @@ -71,7 +63,6 @@ struct generic_pm_domain { unsigned int suspended_count; /* System suspend device counter */ unsigned int prepared_count; /* Suspend counter of prepared devices */ bool suspend_power_off; /* Power status before system suspend */ - bool dev_irq_safe; /* Device callbacks are IRQ-safe */ int (*power_off)(struct generic_pm_domain *domain); s64 power_off_latency_ns; int (*power_on)(struct generic_pm_domain *domain); @@ -80,8 +71,9 @@ struct generic_pm_domain { s64 max_off_time_ns; /* Maximum allowed "suspended" time. */ bool max_off_time_changed; bool cached_power_down_ok; - struct device_node *of_node; /* Node in device tree */ - struct gpd_cpu_data *cpu_data; + struct gpd_cpuidle_data *cpuidle_data; + void (*attach_dev)(struct device *dev); + void (*detach_dev)(struct device *dev); }; static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd) @@ -108,7 +100,6 @@ struct gpd_timing_data { struct generic_pm_domain_data { struct pm_domain_data base; - struct gpd_dev_ops ops; struct gpd_timing_data td; struct notifier_block nb; struct mutex lock; @@ -127,17 +118,11 @@ static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev) return to_gpd_data(dev->power.subsys_data->domain_data); } -extern struct dev_power_governor simple_qos_governor; - extern struct generic_pm_domain *dev_to_genpd(struct device *dev); extern int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, struct gpd_timing_data *td); -extern int __pm_genpd_of_add_device(struct device_node *genpd_node, - struct device *dev, - struct gpd_timing_data *td); - extern int __pm_genpd_name_add_device(const char *domain_name, struct device *dev, struct gpd_timing_data *td); @@ -151,10 +136,6 @@ extern int pm_genpd_add_subdomain_names(const char *master_name, const char *subdomain_name); extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, struct generic_pm_domain *target); -extern int pm_genpd_add_callbacks(struct device *dev, - struct gpd_dev_ops *ops, - struct gpd_timing_data *td); -extern int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td); extern int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state); extern int pm_genpd_name_attach_cpuidle(const char *name, int state); extern int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd); @@ -165,8 +146,7 @@ extern void pm_genpd_init(struct generic_pm_domain *genpd, extern int pm_genpd_poweron(struct generic_pm_domain *genpd); extern int pm_genpd_name_poweron(const char *domain_name); -extern bool default_stop_ok(struct device *dev); - +extern struct dev_power_governor simple_qos_governor; extern struct dev_power_governor pm_domain_always_on_gov; #else @@ -184,12 +164,6 @@ static inline int __pm_genpd_add_device(struct generic_pm_domain *genpd, { return -ENOSYS; } -static inline int __pm_genpd_of_add_device(struct device_node *genpd_node, - struct device *dev, - struct gpd_timing_data *td) -{ - return -ENOSYS; -} static inline int __pm_genpd_name_add_device(const char *domain_name, struct device *dev, struct gpd_timing_data *td) @@ -217,16 +191,6 @@ static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, { return -ENOSYS; } -static inline int pm_genpd_add_callbacks(struct device *dev, - struct gpd_dev_ops *ops, - struct gpd_timing_data *td) -{ - return -ENOSYS; -} -static inline int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td) -{ - return -ENOSYS; -} static inline int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int st) { return -ENOSYS; @@ -255,10 +219,6 @@ static inline int pm_genpd_name_poweron(const char *domain_name) { return -ENOSYS; } -static inline bool default_stop_ok(struct device *dev) -{ - return false; -} #define simple_qos_governor NULL #define pm_domain_always_on_gov NULL #endif @@ -269,45 +229,87 @@ static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, return __pm_genpd_add_device(genpd, dev, NULL); } -static inline int pm_genpd_of_add_device(struct device_node *genpd_node, - struct device *dev) -{ - return __pm_genpd_of_add_device(genpd_node, dev, NULL); -} - static inline int pm_genpd_name_add_device(const char *domain_name, struct device *dev) { return __pm_genpd_name_add_device(domain_name, dev, NULL); } -static inline int pm_genpd_remove_callbacks(struct device *dev) -{ - return __pm_genpd_remove_callbacks(dev, true); -} - #ifdef CONFIG_PM_GENERIC_DOMAINS_RUNTIME -extern void genpd_queue_power_off_work(struct generic_pm_domain *genpd); extern void pm_genpd_poweroff_unused(void); #else -static inline void genpd_queue_power_off_work(struct generic_pm_domain *gpd) {} static inline void pm_genpd_poweroff_unused(void) {} #endif #ifdef CONFIG_PM_GENERIC_DOMAINS_SLEEP -extern void pm_genpd_syscore_switch(struct device *dev, bool suspend); +extern void pm_genpd_syscore_poweroff(struct device *dev); +extern void pm_genpd_syscore_poweron(struct device *dev); #else -static inline void pm_genpd_syscore_switch(struct device *dev, bool suspend) {} +static inline void pm_genpd_syscore_poweroff(struct device *dev) {} +static inline void pm_genpd_syscore_poweron(struct device *dev) {} #endif -static inline void pm_genpd_syscore_poweroff(struct device *dev) +/* OF PM domain providers */ +struct of_device_id; + +struct genpd_onecell_data { + struct generic_pm_domain **domains; + unsigned int num_domains; +}; + +typedef struct generic_pm_domain *(*genpd_xlate_t)(struct of_phandle_args *args, + void *data); + +#ifdef CONFIG_PM_GENERIC_DOMAINS_OF +int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate, + void *data); +void of_genpd_del_provider(struct device_node *np); + +struct generic_pm_domain *__of_genpd_xlate_simple( + struct of_phandle_args *genpdspec, + void *data); +struct generic_pm_domain *__of_genpd_xlate_onecell( + struct of_phandle_args *genpdspec, + void *data); + +int genpd_dev_pm_attach(struct device *dev); +#else /* !CONFIG_PM_GENERIC_DOMAINS_OF */ +static inline int __of_genpd_add_provider(struct device_node *np, + genpd_xlate_t xlate, void *data) +{ + return 0; +} +static inline void of_genpd_del_provider(struct device_node *np) {} + +#define __of_genpd_xlate_simple NULL +#define __of_genpd_xlate_onecell NULL + +static inline int genpd_dev_pm_attach(struct device *dev) +{ + return -ENODEV; +} +#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */ + +static inline int of_genpd_add_provider_simple(struct device_node *np, + struct generic_pm_domain *genpd) +{ + return __of_genpd_add_provider(np, __of_genpd_xlate_simple, genpd); +} +static inline int of_genpd_add_provider_onecell(struct device_node *np, + struct genpd_onecell_data *data) { - pm_genpd_syscore_switch(dev, true); + return __of_genpd_add_provider(np, __of_genpd_xlate_onecell, data); } -static inline void pm_genpd_syscore_poweron(struct device *dev) +#ifdef CONFIG_PM +extern int dev_pm_domain_attach(struct device *dev, bool power_on); +extern void dev_pm_domain_detach(struct device *dev, bool power_off); +#else +static inline int dev_pm_domain_attach(struct device *dev, bool power_on) { - pm_genpd_syscore_switch(dev, false); + return -ENODEV; } +static inline void dev_pm_domain_detach(struct device *dev, bool power_off) {} +#endif #endif /* _LINUX_PM_DOMAIN_H */ diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index 9d117f61d976..b97bf2ef996e 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -74,6 +74,8 @@ static inline int remove_proc_subtree(const char *name, struct proc_dir_entry *p #endif /* CONFIG_PROC_FS */ +struct net; + static inline struct proc_dir_entry *proc_net_mkdir( struct net *net, const char *name, struct proc_dir_entry *parent) { diff --git a/include/linux/proportions.h b/include/linux/proportions.h index 26a8a4ed9b07..00e8e8fa7358 100644 --- a/include/linux/proportions.h +++ b/include/linux/proportions.h @@ -12,6 +12,7 @@ #include <linux/percpu_counter.h> #include <linux/spinlock.h> #include <linux/mutex.h> +#include <linux/gfp.h> struct prop_global { /* @@ -40,7 +41,7 @@ struct prop_descriptor { struct mutex mutex; /* serialize the prop_global switch */ }; -int prop_descriptor_init(struct prop_descriptor *pd, int shift); +int prop_descriptor_init(struct prop_descriptor *pd, int shift, gfp_t gfp); void prop_change_shift(struct prop_descriptor *pd, int new_shift); /* @@ -61,7 +62,7 @@ struct prop_local_percpu { raw_spinlock_t lock; /* protect the snapshot state */ }; -int prop_local_init_percpu(struct prop_local_percpu *pl); +int prop_local_init_percpu(struct prop_local_percpu *pl, gfp_t gfp); void prop_local_destroy_percpu(struct prop_local_percpu *pl); void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl); void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl, diff --git a/include/linux/random.h b/include/linux/random.h index 57fbbffd77a0..b05856e16b75 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -26,7 +26,7 @@ unsigned int get_random_int(void); unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len); u32 prandom_u32(void); -void prandom_bytes(void *buf, int nbytes); +void prandom_bytes(void *buf, size_t nbytes); void prandom_seed(u32 seed); void prandom_reseed_late(void); @@ -35,7 +35,7 @@ struct rnd_state { }; u32 prandom_u32_state(struct rnd_state *state); -void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes); +void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes); /** * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro) diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index d231aa17b1d7..a4a819ffb2d1 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -47,14 +47,12 @@ #include <asm/barrier.h> extern int rcu_expedited; /* for sysctl */ -#ifdef CONFIG_RCU_TORTURE_TEST -extern int rcutorture_runnable; /* for sysctl */ -#endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ enum rcutorture_type { RCU_FLAVOR, RCU_BH_FLAVOR, RCU_SCHED_FLAVOR, + RCU_TASKS_FLAVOR, SRCU_FLAVOR, INVALID_RCU_FLAVOR }; @@ -197,6 +195,28 @@ void call_rcu_sched(struct rcu_head *head, void synchronize_sched(void); +/** + * call_rcu_tasks() - Queue an RCU for invocation task-based grace period + * @head: structure to be used for queueing the RCU updates. + * @func: actual callback function to be invoked after the grace period + * + * The callback function will be invoked some time after a full grace + * period elapses, in other words after all currently executing RCU + * read-side critical sections have completed. call_rcu_tasks() assumes + * that the read-side critical sections end at a voluntary context + * switch (not a preemption!), entry into idle, or transition to usermode + * execution. As such, there are no read-side primitives analogous to + * rcu_read_lock() and rcu_read_unlock() because this primitive is intended + * to determine that all tasks have passed through a safe state, not so + * much for data-strcuture synchronization. + * + * See the description of call_rcu() for more detailed information on + * memory ordering guarantees. + */ +void call_rcu_tasks(struct rcu_head *head, void (*func)(struct rcu_head *head)); +void synchronize_rcu_tasks(void); +void rcu_barrier_tasks(void); + #ifdef CONFIG_PREEMPT_RCU void __rcu_read_lock(void); @@ -238,8 +258,8 @@ static inline int rcu_preempt_depth(void) /* Internal to kernel */ void rcu_init(void); -void rcu_sched_qs(int cpu); -void rcu_bh_qs(int cpu); +void rcu_sched_qs(void); +void rcu_bh_qs(void); void rcu_check_callbacks(int cpu, int user); struct notifier_block; void rcu_idle_enter(void); @@ -269,6 +289,14 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev, struct task_struct *next) { } #endif /* CONFIG_RCU_USER_QS */ +#ifdef CONFIG_RCU_NOCB_CPU +void rcu_init_nohz(void); +#else /* #ifdef CONFIG_RCU_NOCB_CPU */ +static inline void rcu_init_nohz(void) +{ +} +#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ + /** * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers * @a: Code that RCU needs to pay attention to. @@ -294,6 +322,36 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev, rcu_irq_exit(); \ } while (0) +/* + * Note a voluntary context switch for RCU-tasks benefit. This is a + * macro rather than an inline function to avoid #include hell. + */ +#ifdef CONFIG_TASKS_RCU +#define TASKS_RCU(x) x +extern struct srcu_struct tasks_rcu_exit_srcu; +#define rcu_note_voluntary_context_switch(t) \ + do { \ + if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \ + ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \ + } while (0) +#else /* #ifdef CONFIG_TASKS_RCU */ +#define TASKS_RCU(x) do { } while (0) +#define rcu_note_voluntary_context_switch(t) do { } while (0) +#endif /* #else #ifdef CONFIG_TASKS_RCU */ + +/** + * cond_resched_rcu_qs - Report potential quiescent states to RCU + * + * This macro resembles cond_resched(), except that it is defined to + * report potential quiescent states to RCU-tasks even if the cond_resched() + * machinery were to be shut off, as some advocate for PREEMPT kernels. + */ +#define cond_resched_rcu_qs() \ +do { \ + rcu_note_voluntary_context_switch(current); \ + cond_resched(); \ +} while (0) + #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) bool __rcu_is_watching(void); #endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */ @@ -349,7 +407,7 @@ bool rcu_lockdep_current_cpu_online(void); #else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ static inline bool rcu_lockdep_current_cpu_online(void) { - return 1; + return true; } #endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ @@ -371,41 +429,7 @@ extern struct lockdep_map rcu_sched_lock_map; extern struct lockdep_map rcu_callback_map; int debug_lockdep_rcu_enabled(void); -/** - * rcu_read_lock_held() - might we be in RCU read-side critical section? - * - * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU - * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, - * this assumes we are in an RCU read-side critical section unless it can - * prove otherwise. This is useful for debug checks in functions that - * require that they be called within an RCU read-side critical section. - * - * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot - * and while lockdep is disabled. - * - * Note that rcu_read_lock() and the matching rcu_read_unlock() must - * occur in the same context, for example, it is illegal to invoke - * rcu_read_unlock() in process context if the matching rcu_read_lock() - * was invoked from within an irq handler. - * - * Note that rcu_read_lock() is disallowed if the CPU is either idle or - * offline from an RCU perspective, so check for those as well. - */ -static inline int rcu_read_lock_held(void) -{ - if (!debug_lockdep_rcu_enabled()) - return 1; - if (!rcu_is_watching()) - return 0; - if (!rcu_lockdep_current_cpu_online()) - return 0; - return lock_is_held(&rcu_lock_map); -} - -/* - * rcu_read_lock_bh_held() is defined out of line to avoid #include-file - * hell. - */ +int rcu_read_lock_held(void); int rcu_read_lock_bh_held(void); /** diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index d40a6a451330..38cc5b1e252d 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -80,7 +80,7 @@ static inline void kfree_call_rcu(struct rcu_head *head, static inline void rcu_note_context_switch(int cpu) { - rcu_sched_qs(cpu); + rcu_sched_qs(); } /* diff --git a/include/linux/reboot.h b/include/linux/reboot.h index 48bf152761c7..67fc8fcdc4b0 100644 --- a/include/linux/reboot.h +++ b/include/linux/reboot.h @@ -38,6 +38,9 @@ extern int reboot_force; extern int register_reboot_notifier(struct notifier_block *); extern int unregister_reboot_notifier(struct notifier_block *); +extern int register_restart_handler(struct notifier_block *); +extern int unregister_restart_handler(struct notifier_block *); +extern void do_kernel_restart(char *cmd); /* * Architecture-specific implementations of sys_reboot commands. diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index f8a8733068a7..d347c805f923 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -93,7 +93,12 @@ struct regmap; * OVER_TEMP Regulator over temp. * FORCE_DISABLE Regulator forcibly shut down by software. * VOLTAGE_CHANGE Regulator voltage changed. + * Data passed is old voltage cast to (void *). * DISABLE Regulator was disabled. + * PRE_VOLTAGE_CHANGE Regulator is about to have voltage changed. + * Data passed is "struct pre_voltage_change_data" + * ABORT_VOLTAGE_CHANGE Regulator voltage change failed for some reason. + * Data passed is old voltage cast to (void *). * * NOTE: These events can be OR'ed together when passed into handler. */ @@ -106,6 +111,21 @@ struct regmap; #define REGULATOR_EVENT_FORCE_DISABLE 0x20 #define REGULATOR_EVENT_VOLTAGE_CHANGE 0x40 #define REGULATOR_EVENT_DISABLE 0x80 +#define REGULATOR_EVENT_PRE_VOLTAGE_CHANGE 0x100 +#define REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE 0x200 + +/** + * struct pre_voltage_change_data - Data sent with PRE_VOLTAGE_CHANGE event + * + * @old_uV: Current voltage before change. + * @min_uV: Min voltage we'll change to. + * @max_uV: Max voltage we'll change to. + */ +struct pre_voltage_change_data { + unsigned long old_uV; + unsigned long min_uV; + unsigned long max_uV; +}; struct regulator; diff --git a/include/linux/regulator/da9211.h b/include/linux/regulator/da9211.h index 0981ce0e72cc..5479394fefce 100644 --- a/include/linux/regulator/da9211.h +++ b/include/linux/regulator/da9211.h @@ -1,5 +1,5 @@ /* - * da9211.h - Regulator device driver for DA9211 + * da9211.h - Regulator device driver for DA9211/DA9213 * Copyright (C) 2014 Dialog Semiconductor Ltd. * * This library is free software; you can redistribute it and/or @@ -20,6 +20,11 @@ #define DA9211_MAX_REGULATORS 2 +enum da9211_chip_id { + DA9211, + DA9213, +}; + struct da9211_pdata { /* * Number of buck @@ -27,6 +32,6 @@ struct da9211_pdata { * 2 : 2 phase 2 buck */ int num_buck; - struct regulator_init_data *init_data; + struct regulator_init_data *init_data[DA9211_MAX_REGULATORS]; }; #endif diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index 4efa1ed8a2b0..fc0ee0ce8325 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -203,6 +203,8 @@ enum regulator_type { * * @name: Identifying name for the regulator. * @supply_name: Identifying the regulator supply + * @of_match: Name used to identify regulator in DT. + * @regulators_node: Name of node containing regulator definitions in DT. * @id: Numerical identifier for the regulator. * @ops: Regulator operations table. * @irq: Interrupt number for the regulator. @@ -240,14 +242,17 @@ enum regulator_type { * @bypass_val_off: Disabling value for control when using regmap set_bypass * * @enable_time: Time taken for initial enable of regulator (in uS). + * @off_on_delay: guard time (in uS), before re-enabling a regulator */ struct regulator_desc { const char *name; const char *supply_name; + const char *of_match; + const char *regulators_node; int id; bool continuous_voltage_range; unsigned n_voltages; - struct regulator_ops *ops; + const struct regulator_ops *ops; int irq; enum regulator_type type; struct module *owner; @@ -278,6 +283,8 @@ struct regulator_desc { unsigned int bypass_val_off; unsigned int enable_time; + + unsigned int off_on_delay; }; /** @@ -350,6 +357,9 @@ struct regulator_dev { struct regulator_enable_gpio *ena_pin; unsigned int ena_gpio_state:1; + + /* time when this regulator was disabled last time */ + unsigned long last_off_jiffy; }; struct regulator_dev * diff --git a/include/linux/regulator/max1586.h b/include/linux/regulator/max1586.h index de9a7fae20be..cedd0febe882 100644 --- a/include/linux/regulator/max1586.h +++ b/include/linux/regulator/max1586.h @@ -40,7 +40,7 @@ */ struct max1586_subdev_data { int id; - char *name; + const char *name; struct regulator_init_data *platform_data; }; diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 36826c0166c5..fb298e9d6d3a 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -44,6 +44,7 @@ struct rhashtable; * @head_offset: Offset of rhash_head in struct to be hashed * @hash_rnd: Seed to use while hashing * @max_shift: Maximum number of shifts while expanding + * @min_shift: Minimum number of shifts while shrinking * @hashfn: Function to hash key * @obj_hashfn: Function to hash object * @grow_decision: If defined, may return true if table should expand @@ -57,6 +58,7 @@ struct rhashtable_params { size_t head_offset; u32 hash_rnd; size_t max_shift; + size_t min_shift; rht_hashfn_t hashfn; rht_obj_hashfn_t obj_hashfn; bool (*grow_decision)(const struct rhashtable *ht, diff --git a/include/linux/rmap.h b/include/linux/rmap.h index be574506e6a9..c0c2bce6b0b7 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -150,7 +150,7 @@ int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *); static inline void anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next) { - VM_BUG_ON(vma->anon_vma != next->anon_vma); + VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma); unlink_anon_vmas(next); } diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 167bae7bdfa4..6cacbce1a06c 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -47,6 +47,16 @@ static inline int lockdep_rtnl_is_held(void) rcu_dereference_check(p, lockdep_rtnl_is_held()) /** + * rcu_dereference_bh_rtnl - rcu_dereference_bh with debug checking + * @p: The pointer to read, prior to dereference + * + * Do an rcu_dereference_bh(p), but check caller either holds rcu_read_lock_bh() + * or RTNL. Note : Please prefer rtnl_dereference() or rcu_dereference_bh() + */ +#define rcu_dereference_bh_rtnl(p) \ + rcu_dereference_bh_check(p, lockdep_rtnl_is_held()) + +/** * rtnl_dereference - fetch RCU pointer when updates are prevented by RTNL * @p: The pointer to read, prior to dereferencing * diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index 035d3c57fc8a..8f498cdde280 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -149,7 +149,7 @@ extern void downgrade_write(struct rw_semaphore *sem); * static then another method for expressing nested locking is * the explicit definition of lock class keys and the use of * lockdep_set_class() at lock initialization time. - * See Documentation/lockdep-design.txt for more details.) + * See Documentation/locking/lockdep-design.txt for more details.) */ extern void down_read_nested(struct rw_semaphore *sem, int subclass); extern void down_write_nested(struct rw_semaphore *sem, int subclass); diff --git a/include/linux/sched.h b/include/linux/sched.h index 18f52624eaa6..5e344bbe63ec 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -168,6 +168,7 @@ extern int nr_threads; DECLARE_PER_CPU(unsigned long, process_counts); extern int nr_processes(void); extern unsigned long nr_running(void); +extern bool single_task_running(void); extern unsigned long nr_iowait(void); extern unsigned long nr_iowait_cpu(int cpu); extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load); @@ -1215,6 +1216,13 @@ struct sched_dl_entity { struct hrtimer dl_timer; }; +union rcu_special { + struct { + bool blocked; + bool need_qs; + } b; + short s; +}; struct rcu_node; enum perf_event_task_context { @@ -1267,12 +1275,18 @@ struct task_struct { #ifdef CONFIG_PREEMPT_RCU int rcu_read_lock_nesting; - char rcu_read_unlock_special; + union rcu_special rcu_read_unlock_special; struct list_head rcu_node_entry; #endif /* #ifdef CONFIG_PREEMPT_RCU */ #ifdef CONFIG_TREE_PREEMPT_RCU struct rcu_node *rcu_blocked_node; #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ +#ifdef CONFIG_TASKS_RCU + unsigned long rcu_tasks_nvcsw; + bool rcu_tasks_holdout; + struct list_head rcu_tasks_holdout_list; + int rcu_tasks_idle_cpu; +#endif /* #ifdef CONFIG_TASKS_RCU */ #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) struct sched_info sched_info; @@ -1906,8 +1920,6 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ #define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ -#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ -#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ @@ -1939,11 +1951,13 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) #define used_math() tsk_used_math(current) -/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags */ +/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags + * __GFP_FS is also cleared as it implies __GFP_IO. + */ static inline gfp_t memalloc_noio_flags(gfp_t flags) { if (unlikely(current->flags & PF_MEMALLOC_NOIO)) - flags &= ~__GFP_IO; + flags &= ~(__GFP_IO | __GFP_FS); return flags; } @@ -1960,17 +1974,31 @@ static inline void memalloc_noio_restore(unsigned int flags) } /* Per-process atomic flags. */ -#define PFA_NO_NEW_PRIVS 0x00000001 /* May not gain new privileges. */ +#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */ +#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ +#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ -static inline bool task_no_new_privs(struct task_struct *p) -{ - return test_bit(PFA_NO_NEW_PRIVS, &p->atomic_flags); -} -static inline void task_set_no_new_privs(struct task_struct *p) -{ - set_bit(PFA_NO_NEW_PRIVS, &p->atomic_flags); -} +#define TASK_PFA_TEST(name, func) \ + static inline bool task_##func(struct task_struct *p) \ + { return test_bit(PFA_##name, &p->atomic_flags); } +#define TASK_PFA_SET(name, func) \ + static inline void task_set_##func(struct task_struct *p) \ + { set_bit(PFA_##name, &p->atomic_flags); } +#define TASK_PFA_CLEAR(name, func) \ + static inline void task_clear_##func(struct task_struct *p) \ + { clear_bit(PFA_##name, &p->atomic_flags); } + +TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs) +TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs) + +TASK_PFA_TEST(SPREAD_PAGE, spread_page) +TASK_PFA_SET(SPREAD_PAGE, spread_page) +TASK_PFA_CLEAR(SPREAD_PAGE, spread_page) + +TASK_PFA_TEST(SPREAD_SLAB, spread_slab) +TASK_PFA_SET(SPREAD_SLAB, spread_slab) +TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab) /* * task->jobctl flags @@ -2002,29 +2030,21 @@ extern void task_clear_jobctl_trapping(struct task_struct *task); extern void task_clear_jobctl_pending(struct task_struct *task, unsigned int mask); -#ifdef CONFIG_PREEMPT_RCU - -#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ -#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */ - static inline void rcu_copy_process(struct task_struct *p) { +#ifdef CONFIG_PREEMPT_RCU p->rcu_read_lock_nesting = 0; - p->rcu_read_unlock_special = 0; -#ifdef CONFIG_TREE_PREEMPT_RCU + p->rcu_read_unlock_special.s = 0; p->rcu_blocked_node = NULL; -#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ INIT_LIST_HEAD(&p->rcu_node_entry); +#endif /* #ifdef CONFIG_PREEMPT_RCU */ +#ifdef CONFIG_TASKS_RCU + p->rcu_tasks_holdout = false; + INIT_LIST_HEAD(&p->rcu_tasks_holdout_list); + p->rcu_tasks_idle_cpu = -1; +#endif /* #ifdef CONFIG_TASKS_RCU */ } -#else - -static inline void rcu_copy_process(struct task_struct *p) -{ -} - -#endif - static inline void tsk_restore_flags(struct task_struct *task, unsigned long orig_flags, unsigned long flags) { @@ -2611,9 +2631,22 @@ static inline void setup_thread_stack(struct task_struct *p, struct task_struct task_thread_info(p)->task = p; } +/* + * Return the address of the last usable long on the stack. + * + * When the stack grows down, this is just above the thread + * info struct. Going any lower will corrupt the threadinfo. + * + * When the stack grows up, this is the highest address. + * Beyond that position, we corrupt data on the next page. + */ static inline unsigned long *end_of_stack(struct task_struct *p) { +#ifdef CONFIG_STACK_GROWSUP + return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1; +#else return (unsigned long *)(task_thread_info(p) + 1); +#endif } #endif diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h index 005bf3e38db5..f0f8bad54be9 100644 --- a/include/linux/screen_info.h +++ b/include/linux/screen_info.h @@ -5,12 +5,4 @@ extern struct screen_info screen_info; -#define ORIG_X (screen_info.orig_x) -#define ORIG_Y (screen_info.orig_y) -#define ORIG_VIDEO_MODE (screen_info.orig_video_mode) -#define ORIG_VIDEO_COLS (screen_info.orig_video_cols) -#define ORIG_VIDEO_EGA_BX (screen_info.orig_video_ega_bx) -#define ORIG_VIDEO_LINES (screen_info.orig_video_lines) -#define ORIG_VIDEO_ISVGA (screen_info.orig_video_isVGA) -#define ORIG_VIDEO_POINTS (screen_info.orig_video_points) #endif /* _SCREEN_INFO_H */ diff --git a/include/linux/security.h b/include/linux/security.h index 623f90e5f38d..ba96471c11ba 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -1559,7 +1559,7 @@ struct security_operations { int (*file_lock) (struct file *file, unsigned int cmd); int (*file_fcntl) (struct file *file, unsigned int cmd, unsigned long arg); - int (*file_set_fowner) (struct file *file); + void (*file_set_fowner) (struct file *file); int (*file_send_sigiotask) (struct task_struct *tsk, struct fown_struct *fown, int sig); int (*file_receive) (struct file *file); @@ -1834,7 +1834,7 @@ int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot, unsigned long prot); int security_file_lock(struct file *file, unsigned int cmd); int security_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg); -int security_file_set_fowner(struct file *file); +void security_file_set_fowner(struct file *file); int security_file_send_sigiotask(struct task_struct *tsk, struct fown_struct *fown, int sig); int security_file_receive(struct file *file); @@ -2108,7 +2108,7 @@ static inline int security_dentry_init_security(struct dentry *dentry, static inline int security_inode_init_security(struct inode *inode, struct inode *dir, const struct qstr *qstr, - const initxattrs initxattrs, + const initxattrs xattrs, void *fs_data) { return 0; @@ -2312,9 +2312,9 @@ static inline int security_file_fcntl(struct file *file, unsigned int cmd, return 0; } -static inline int security_file_set_fowner(struct file *file) +static inline void security_file_set_fowner(struct file *file) { - return 0; + return; } static inline int security_file_send_sigiotask(struct task_struct *tsk, diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h index f93649e22c43..3df10d5f154b 100644 --- a/include/linux/serial_8250.h +++ b/include/linux/serial_8250.h @@ -84,6 +84,7 @@ struct uart_8250_port { unsigned char mcr_mask; /* mask of user bits */ unsigned char mcr_force; /* mask of forced bits */ unsigned char cur_iotype; /* Running I/O type */ + unsigned int rpm_tx_active; /* * Some bits in registers are cleared on a read, so they must @@ -96,10 +97,13 @@ struct uart_8250_port { unsigned char msr_saved_flags; struct uart_8250_dma *dma; + struct serial_rs485 rs485; /* 8250 specific callbacks */ int (*dl_read)(struct uart_8250_port *); void (*dl_write)(struct uart_8250_port *, int); + int (*rs485_config)(struct uart_8250_port *, + struct serial_rs485 *rs485); }; static inline struct uart_8250_port *up_to_u8250p(struct uart_port *up) @@ -121,6 +125,8 @@ extern void serial8250_early_out(struct uart_port *port, int offset, int value); extern int setup_early_serial8250_console(char *cmdline); extern void serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old); +extern int serial8250_do_startup(struct uart_port *port); +extern void serial8250_do_shutdown(struct uart_port *port); extern void serial8250_do_pm(struct uart_port *port, unsigned int state, unsigned int oldstate); extern int fsl8250_handle_irq(struct uart_port *port); diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index cf3a1e789bf5..21c2e05c1bc3 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -112,6 +112,7 @@ struct uart_icount { }; typedef unsigned int __bitwise__ upf_t; +typedef unsigned int __bitwise__ upstat_t; struct uart_port { spinlock_t lock; /* port lock */ @@ -122,6 +123,10 @@ struct uart_port { void (*set_termios)(struct uart_port *, struct ktermios *new, struct ktermios *old); + int (*startup)(struct uart_port *port); + void (*shutdown)(struct uart_port *port); + void (*throttle)(struct uart_port *port); + void (*unthrottle)(struct uart_port *port); int (*handle_irq)(struct uart_port *); void (*pm)(struct uart_port *, unsigned int state, unsigned int old); @@ -152,6 +157,7 @@ struct uart_port { unsigned long sysrq; /* sysrq timeout */ #endif + /* flags must be updated while holding port mutex */ upf_t flags; #define UPF_FOURPORT ((__force upf_t) (1 << 1)) @@ -187,6 +193,13 @@ struct uart_port { #define UPF_CHANGE_MASK ((__force upf_t) (0x17fff)) #define UPF_USR_MASK ((__force upf_t) (UPF_SPD_MASK|UPF_LOW_LATENCY)) + /* status must be updated while holding port lock */ + upstat_t status; + +#define UPSTAT_CTS_ENABLE ((__force upstat_t) (1 << 0)) +#define UPSTAT_DCD_ENABLE ((__force upstat_t) (1 << 1)) + + int hw_stopped; /* sw-assisted CTS flow state */ unsigned int mctrl; /* current modem ctrl settings */ unsigned int timeout; /* character-based timeout */ unsigned int type; /* port type */ @@ -347,11 +360,16 @@ int uart_resume_port(struct uart_driver *reg, struct uart_port *port); static inline int uart_tx_stopped(struct uart_port *port) { struct tty_struct *tty = port->state->port.tty; - if(tty->stopped || tty->hw_stopped) + if (tty->stopped || port->hw_stopped) return 1; return 0; } +static inline bool uart_cts_enabled(struct uart_port *uport) +{ + return uport->status & UPSTAT_CTS_ENABLE; +} + /* * The following are helper functions for the low level drivers. */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index abde271c18ae..3ab0749d6875 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -28,7 +28,6 @@ #include <linux/textsearch.h> #include <net/checksum.h> #include <linux/rcupdate.h> -#include <linux/dmaengine.h> #include <linux/hrtimer.h> #include <linux/dma-mapping.h> #include <linux/netdev_features.h> @@ -47,11 +46,29 @@ * * The hardware you're dealing with doesn't calculate the full checksum * (as in CHECKSUM_COMPLETE), but it does parse headers and verify checksums - * for specific protocols e.g. TCP/UDP/SCTP, then, for such packets it will - * set CHECKSUM_UNNECESSARY if their checksums are okay. skb->csum is still - * undefined in this case though. It is a bad option, but, unfortunately, - * nowadays most vendors do this. Apparently with the secret goal to sell - * you new devices, when you will add new protocol to your host, f.e. IPv6 8) + * for specific protocols. For such packets it will set CHECKSUM_UNNECESSARY + * if their checksums are okay. skb->csum is still undefined in this case + * though. It is a bad option, but, unfortunately, nowadays most vendors do + * this. Apparently with the secret goal to sell you new devices, when you + * will add new protocol to your host, f.e. IPv6 8) + * + * CHECKSUM_UNNECESSARY is applicable to following protocols: + * TCP: IPv6 and IPv4. + * UDP: IPv4 and IPv6. A device may apply CHECKSUM_UNNECESSARY to a + * zero UDP checksum for either IPv4 or IPv6, the networking stack + * may perform further validation in this case. + * GRE: only if the checksum is present in the header. + * SCTP: indicates the CRC in SCTP header has been validated. + * + * skb->csum_level indicates the number of consecutive checksums found in + * the packet minus one that have been verified as CHECKSUM_UNNECESSARY. + * For instance if a device receives an IPv6->UDP->GRE->IPv4->TCP packet + * and a device is able to verify the checksums for UDP (possibly zero), + * GRE (checksum flag is set), and TCP-- skb->csum_level would be set to + * two. If the device were only able to verify the UDP checksum and not + * GRE, either because it doesn't support GRE checksum of because GRE + * checksum is bad, skb->csum_level would be set to zero (TCP checksum is + * not considered in this case). * * CHECKSUM_COMPLETE: * @@ -112,6 +129,9 @@ #define CHECKSUM_COMPLETE 2 #define CHECKSUM_PARTIAL 3 +/* Maximum value in skb->csum_level */ +#define SKB_MAX_CSUM_LEVEL 3 + #define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES) #define SKB_WITH_OVERHEAD(X) \ ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) @@ -135,7 +155,7 @@ struct nf_conntrack { }; #endif -#ifdef CONFIG_BRIDGE_NETFILTER +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) struct nf_bridge_info { atomic_t use; unsigned int mask; @@ -318,9 +338,10 @@ struct skb_shared_info { enum { - SKB_FCLONE_UNAVAILABLE, - SKB_FCLONE_ORIG, - SKB_FCLONE_CLONE, + SKB_FCLONE_UNAVAILABLE, /* skb has no fclone (from head_cache) */ + SKB_FCLONE_ORIG, /* orig skb (from fclone_cache) */ + SKB_FCLONE_CLONE, /* companion fclone skb (from fclone_cache) */ + SKB_FCLONE_FREE, /* this companion fclone skb is available */ }; enum { @@ -452,6 +473,7 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1, * @tc_verd: traffic control verdict * @hash: the packet hash * @queue_mapping: Queue mapping for multiqueue devices + * @xmit_more: More SKBs are pending for this queue * @ndisc_nodetype: router type (from link layer) * @ooo_okay: allow the mapping of a socket to a queue to be changed * @l4_hash: indicate hash is a canonical 4-tuple hash over transport @@ -460,8 +482,6 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1, * @wifi_acked_valid: wifi_acked was set * @wifi_acked: whether frame was acked on wifi or not * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS - * @dma_cookie: a cookie to one of several possible DMA operations - * done by skb DMA functions * @napi_id: id of the NAPI struct this skb came from * @secmark: security marking * @mark: Generic packet mark @@ -505,87 +525,99 @@ struct sk_buff { char cb[48] __aligned(8); unsigned long _skb_refdst; + void (*destructor)(struct sk_buff *skb); #ifdef CONFIG_XFRM struct sec_path *sp; #endif +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) + struct nf_conntrack *nfct; +#endif +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) + struct nf_bridge_info *nf_bridge; +#endif unsigned int len, data_len; __u16 mac_len, hdr_len; - union { - __wsum csum; - struct { - __u16 csum_start; - __u16 csum_offset; - }; - }; - __u32 priority; + + /* Following fields are _not_ copied in __copy_skb_header() + * Note that queue_mapping is here mostly to fill a hole. + */ kmemcheck_bitfield_begin(flags1); - __u8 ignore_df:1, - cloned:1, - ip_summed:2, + __u16 queue_mapping; + __u8 cloned:1, nohdr:1, - nfctinfo:3; - __u8 pkt_type:3, fclone:2, - ipvs_property:1, peeked:1, - nf_trace:1; + head_frag:1, + xmit_more:1; + /* one bit hole */ kmemcheck_bitfield_end(flags1); - __be16 protocol; - - void (*destructor)(struct sk_buff *skb); -#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) - struct nf_conntrack *nfct; -#endif -#ifdef CONFIG_BRIDGE_NETFILTER - struct nf_bridge_info *nf_bridge; -#endif - - int skb_iif; - - __u32 hash; - __be16 vlan_proto; - __u16 vlan_tci; + /* fields enclosed in headers_start/headers_end are copied + * using a single memcpy() in __copy_skb_header() + */ + __u32 headers_start[0]; -#ifdef CONFIG_NET_SCHED - __u16 tc_index; /* traffic control index */ -#ifdef CONFIG_NET_CLS_ACT - __u16 tc_verd; /* traffic control verdict */ -#endif +/* if you move pkt_type around you also must adapt those constants */ +#ifdef __BIG_ENDIAN_BITFIELD +#define PKT_TYPE_MAX (7 << 5) +#else +#define PKT_TYPE_MAX 7 #endif +#define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset) - __u16 queue_mapping; - kmemcheck_bitfield_begin(flags2); -#ifdef CONFIG_IPV6_NDISC_NODETYPE - __u8 ndisc_nodetype:2; -#endif + __u8 __pkt_type_offset[0]; + __u8 pkt_type:3; __u8 pfmemalloc:1; + __u8 ignore_df:1; + __u8 nfctinfo:3; + + __u8 nf_trace:1; + __u8 ip_summed:2; __u8 ooo_okay:1; __u8 l4_hash:1; __u8 sw_hash:1; __u8 wifi_acked_valid:1; __u8 wifi_acked:1; + __u8 no_fcs:1; - __u8 head_frag:1; - /* Encapsulation protocol and NIC drivers should use - * this flag to indicate to each other if the skb contains - * encapsulated packet or not and maybe use the inner packet - * headers if needed - */ + /* Indicates the inner headers are valid in the skbuff. */ __u8 encapsulation:1; __u8 encap_hdr_csum:1; __u8 csum_valid:1; __u8 csum_complete_sw:1; - /* 2/4 bit hole (depending on ndisc_nodetype presence) */ - kmemcheck_bitfield_end(flags2); + __u8 csum_level:2; + __u8 csum_bad:1; + +#ifdef CONFIG_IPV6_NDISC_NODETYPE + __u8 ndisc_nodetype:2; +#endif + __u8 ipvs_property:1; + __u8 inner_protocol_type:1; + /* 4 or 6 bit hole */ + +#ifdef CONFIG_NET_SCHED + __u16 tc_index; /* traffic control index */ +#ifdef CONFIG_NET_CLS_ACT + __u16 tc_verd; /* traffic control verdict */ +#endif +#endif -#if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL union { - unsigned int napi_id; - dma_cookie_t dma_cookie; + __wsum csum; + struct { + __u16 csum_start; + __u16 csum_offset; + }; }; + __u32 priority; + int skb_iif; + __u32 hash; + __be16 vlan_proto; + __u16 vlan_tci; +#ifdef CONFIG_NET_RX_BUSY_POLL + unsigned int napi_id; #endif #ifdef CONFIG_NETWORK_SECMARK __u32 secmark; @@ -596,13 +628,22 @@ struct sk_buff { __u32 reserved_tailroom; }; - __be16 inner_protocol; + union { + __be16 inner_protocol; + __u8 inner_ipproto; + }; + __u16 inner_transport_header; __u16 inner_network_header; __u16 inner_mac_header; + + __be16 protocol; __u16 transport_header; __u16 network_header; __u16 mac_header; + + __u32 headers_end[0]; + /* These elements must be at the end, see alloc_skb() for details. */ sk_buff_data_t tail; sk_buff_data_t end; @@ -734,6 +775,37 @@ static inline struct sk_buff *alloc_skb(unsigned int size, return __alloc_skb(size, priority, 0, NUMA_NO_NODE); } +struct sk_buff *alloc_skb_with_frags(unsigned long header_len, + unsigned long data_len, + int max_page_order, + int *errcode, + gfp_t gfp_mask); + +/* Layout of fast clones : [skb1][skb2][fclone_ref] */ +struct sk_buff_fclones { + struct sk_buff skb1; + + struct sk_buff skb2; + + atomic_t fclone_ref; +}; + +/** + * skb_fclone_busy - check if fclone is busy + * @skb: buffer + * + * Returns true is skb is a fast clone, and its clone is not freed. + */ +static inline bool skb_fclone_busy(const struct sk_buff *skb) +{ + const struct sk_buff_fclones *fclones; + + fclones = container_of(skb, struct sk_buff_fclones, skb1); + + return skb->fclone == SKB_FCLONE_ORIG && + fclones->skb2.fclone == SKB_FCLONE_CLONE; +} + static inline struct sk_buff *alloc_skb_fclone(unsigned int size, gfp_t priority) { @@ -1042,6 +1114,7 @@ static inline int skb_header_cloned(const struct sk_buff *skb) * Drop a reference to the header part of the buffer. This is done * by acquiring a payload reference. You must not read from the header * part of skb->data after this. + * Note : Check if you can use __skb_header_release() instead. */ static inline void skb_header_release(struct sk_buff *skb) { @@ -1051,6 +1124,20 @@ static inline void skb_header_release(struct sk_buff *skb) } /** + * __skb_header_release - release reference to header + * @skb: buffer to operate on + * + * Variant of skb_header_release() assuming skb is private to caller. + * We can avoid one atomic operation. + */ +static inline void __skb_header_release(struct sk_buff *skb) +{ + skb->nohdr = 1; + atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT)); +} + + +/** * skb_shared - is the buffer shared * @skb: buffer to check * @@ -1675,6 +1762,23 @@ static inline void skb_reserve(struct sk_buff *skb, int len) skb->tail += len; } +#define ENCAP_TYPE_ETHER 0 +#define ENCAP_TYPE_IPPROTO 1 + +static inline void skb_set_inner_protocol(struct sk_buff *skb, + __be16 protocol) +{ + skb->inner_protocol = protocol; + skb->inner_protocol_type = ENCAP_TYPE_ETHER; +} + +static inline void skb_set_inner_ipproto(struct sk_buff *skb, + __u8 ipproto) +{ + skb->inner_ipproto = ipproto; + skb->inner_protocol_type = ENCAP_TYPE_IPPROTO; +} + static inline void skb_reset_inner_headers(struct sk_buff *skb) { skb->inner_mac_header = skb->mac_header; @@ -1860,18 +1964,6 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len) return pskb_may_pull(skb, skb_network_offset(skb) + len); } -static inline void skb_pop_rcv_encapsulation(struct sk_buff *skb) -{ - /* Only continue with checksum unnecessary if device indicated - * it is valid across encapsulation (skb->encapsulation was set). - */ - if (skb->ip_summed == CHECKSUM_UNNECESSARY && !skb->encapsulation) - skb->ip_summed = CHECKSUM_NONE; - - skb->encapsulation = 0; - skb->csum_valid = 0; -} - /* * CPUs often take a performance hit when accessing unaligned memory * locations. The actual performance hit varies, it can be small if the @@ -2567,20 +2659,26 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, __wsum skb_checksum(const struct sk_buff *skb, int offset, int len, __wsum csum); -static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, - int len, void *buffer) +static inline void *__skb_header_pointer(const struct sk_buff *skb, int offset, + int len, void *data, int hlen, void *buffer) { - int hlen = skb_headlen(skb); - if (hlen - offset >= len) - return skb->data + offset; + return data + offset; - if (skb_copy_bits(skb, offset, buffer, len) < 0) + if (!skb || + skb_copy_bits(skb, offset, buffer, len) < 0) return NULL; return buffer; } +static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, + int len, void *buffer) +{ + return __skb_header_pointer(skb, offset, len, skb->data, + skb_headlen(skb), buffer); +} + /** * skb_needs_linearize - check if we need to linearize a given skb * depending on the given device features. @@ -2671,6 +2769,8 @@ static inline ktime_t net_invalid_timestamp(void) return ktime_set(0, 0); } +struct sk_buff *skb_clone_sk(struct sk_buff *skb); + #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING void skb_clone_tx_timestamp(struct sk_buff *skb); @@ -2786,6 +2886,42 @@ static inline __sum16 skb_checksum_complete(struct sk_buff *skb) 0 : __skb_checksum_complete(skb); } +static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb) +{ + if (skb->ip_summed == CHECKSUM_UNNECESSARY) { + if (skb->csum_level == 0) + skb->ip_summed = CHECKSUM_NONE; + else + skb->csum_level--; + } +} + +static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb) +{ + if (skb->ip_summed == CHECKSUM_UNNECESSARY) { + if (skb->csum_level < SKB_MAX_CSUM_LEVEL) + skb->csum_level++; + } else if (skb->ip_summed == CHECKSUM_NONE) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum_level = 0; + } +} + +static inline void __skb_mark_checksum_bad(struct sk_buff *skb) +{ + /* Mark current checksum as bad (typically called from GRO + * path). In the case that ip_summed is CHECKSUM_NONE + * this must be the first checksum encountered in the packet. + * When ip_summed is CHECKSUM_UNNECESSARY, this is the first + * checksum after the last one validated. For UDP, a zero + * checksum can not be marked as bad. + */ + + if (skb->ip_summed == CHECKSUM_NONE || + skb->ip_summed == CHECKSUM_UNNECESSARY) + skb->csum_bad = 1; +} + /* Check if we need to perform checksum complete validation. * * Returns true if checksum complete is needed, false otherwise @@ -2797,6 +2933,7 @@ static inline bool __skb_checksum_validate_needed(struct sk_buff *skb, { if (skb_csum_unnecessary(skb) || (zero_okay && !check)) { skb->csum_valid = 1; + __skb_decr_checksum_unnecessary(skb); return false; } @@ -2826,6 +2963,9 @@ static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb, skb->csum_valid = 1; return 0; } + } else if (skb->csum_bad) { + /* ip_summed == CHECKSUM_NONE in this case */ + return 1; } skb->csum = psum; @@ -2883,6 +3023,26 @@ static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto) #define skb_checksum_simple_validate(skb) \ __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo) +static inline bool __skb_checksum_convert_check(struct sk_buff *skb) +{ + return (skb->ip_summed == CHECKSUM_NONE && + skb->csum_valid && !skb->csum_bad); +} + +static inline void __skb_checksum_convert(struct sk_buff *skb, + __sum16 check, __wsum pseudo) +{ + skb->csum = ~pseudo; + skb->ip_summed = CHECKSUM_COMPLETE; +} + +#define skb_checksum_try_convert(skb, proto, check, compute_pseudo) \ +do { \ + if (__skb_checksum_convert_check(skb)) \ + __skb_checksum_convert(skb, check, \ + compute_pseudo(skb, proto)); \ +} while (0) + #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) void nf_conntrack_destroy(struct nf_conntrack *nfct); static inline void nf_conntrack_put(struct nf_conntrack *nfct) @@ -2896,7 +3056,7 @@ static inline void nf_conntrack_get(struct nf_conntrack *nfct) atomic_inc(&nfct->use); } #endif -#ifdef CONFIG_BRIDGE_NETFILTER +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge) { if (nf_bridge && atomic_dec_and_test(&nf_bridge->use)) @@ -2914,7 +3074,7 @@ static inline void nf_reset(struct sk_buff *skb) nf_conntrack_put(skb->nfct); skb->nfct = NULL; #endif -#ifdef CONFIG_BRIDGE_NETFILTER +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) nf_bridge_put(skb->nf_bridge); skb->nf_bridge = NULL; #endif @@ -2928,19 +3088,22 @@ static inline void nf_reset_trace(struct sk_buff *skb) } /* Note: This doesn't put any conntrack and bridge info in dst. */ -static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src) +static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src, + bool copy) { #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) dst->nfct = src->nfct; nf_conntrack_get(src->nfct); - dst->nfctinfo = src->nfctinfo; + if (copy) + dst->nfctinfo = src->nfctinfo; #endif -#ifdef CONFIG_BRIDGE_NETFILTER +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) dst->nf_bridge = src->nf_bridge; nf_bridge_get(src->nf_bridge); #endif #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES) - dst->nf_trace = src->nf_trace; + if (copy) + dst->nf_trace = src->nf_trace; #endif } @@ -2949,10 +3112,10 @@ static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) nf_conntrack_put(dst->nfct); #endif -#ifdef CONFIG_BRIDGE_NETFILTER +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) nf_bridge_put(dst->nf_bridge); #endif - __nf_copy(dst, src); + __nf_copy(dst, src, true); } #ifdef CONFIG_NETWORK_SECMARK @@ -3137,7 +3300,9 @@ bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); int skb_checksum_setup(struct sk_buff *skb, bool recalculate); -u32 __skb_get_poff(const struct sk_buff *skb); +u32 skb_get_poff(const struct sk_buff *skb); +u32 __skb_get_poff(const struct sk_buff *skb, void *data, + const struct flow_keys *keys, int hlen); /** * skb_head_is_locked - Determine if the skb->head is locked down diff --git a/include/linux/slab.h b/include/linux/slab.h index 1d9abb7d22a0..c265bec6a57d 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -158,31 +158,6 @@ size_t ksize(const void *); #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) #endif -#ifdef CONFIG_SLOB -/* - * Common fields provided in kmem_cache by all slab allocators - * This struct is either used directly by the allocator (SLOB) - * or the allocator must include definitions for all fields - * provided in kmem_cache_common in their definition of kmem_cache. - * - * Once we can do anonymous structs (C11 standard) we could put a - * anonymous struct definition in these allocators so that the - * separate allocations in the kmem_cache structure of SLAB and - * SLUB is no longer needed. - */ -struct kmem_cache { - unsigned int object_size;/* The original size of the object */ - unsigned int size; /* The aligned/padded/added on size */ - unsigned int align; /* Alignment as calculated */ - unsigned long flags; /* Active flags on the slab */ - const char *name; /* Slab name for sysfs */ - int refcount; /* Use counter */ - void (*ctor)(void *); /* Called on object slot creation */ - struct list_head list; /* List of all slab caches on the system */ -}; - -#endif /* CONFIG_SLOB */ - /* * Kmalloc array related definitions */ @@ -363,14 +338,6 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s, } #endif /* CONFIG_TRACING */ -#ifdef CONFIG_SLAB -#include <linux/slab_def.h> -#endif - -#ifdef CONFIG_SLUB -#include <linux/slub_def.h> -#endif - extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order); #ifdef CONFIG_TRACING @@ -582,37 +549,15 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags) * allocator where we care about the real place the memory allocation * request comes from. */ -#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ - (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \ - (defined(CONFIG_SLOB) && defined(CONFIG_TRACING)) extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); #define kmalloc_track_caller(size, flags) \ __kmalloc_track_caller(size, flags, _RET_IP_) -#else -#define kmalloc_track_caller(size, flags) \ - __kmalloc(size, flags) -#endif /* DEBUG_SLAB */ #ifdef CONFIG_NUMA -/* - * kmalloc_node_track_caller is a special version of kmalloc_node that - * records the calling function of the routine calling it for slab leak - * tracking instead of just the calling function (confusing, eh?). - * It's useful when the call to kmalloc_node comes from a widely-used - * standard allocator where we care about the real place the memory - * allocation request comes from. - */ -#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ - (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \ - (defined(CONFIG_SLOB) && defined(CONFIG_TRACING)) extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); #define kmalloc_node_track_caller(size, flags, node) \ __kmalloc_node_track_caller(size, flags, node, \ _RET_IP_) -#else -#define kmalloc_node_track_caller(size, flags, node) \ - __kmalloc_node(size, flags, node) -#endif #else /* CONFIG_NUMA */ @@ -650,14 +595,7 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node) return kmalloc_node(size, flags | __GFP_ZERO, node); } -/* - * Determine the size of a slab object - */ -static inline unsigned int kmem_cache_size(struct kmem_cache *s) -{ - return s->object_size; -} - +unsigned int kmem_cache_size(struct kmem_cache *s); void __init kmem_cache_init_late(void); #endif /* _LINUX_SLAB_H */ diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 8235dfbb3b05..b869d1662ba3 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -8,6 +8,8 @@ */ struct kmem_cache { + struct array_cache __percpu *cpu_cache; + /* 1) Cache tunables. Protected by slab_mutex */ unsigned int batchcount; unsigned int limit; @@ -71,23 +73,7 @@ struct kmem_cache { struct memcg_cache_params *memcg_params; #endif -/* 6) per-cpu/per-node data, touched during every alloc/free */ - /* - * We put array[] at the end of kmem_cache, because we want to size - * this array to nr_cpu_ids slots instead of NR_CPUS - * (see kmem_cache_init()) - * We still use [NR_CPUS] and not [1] or [0] because cache_cache - * is statically defined, so we reserve the max number of cpus. - * - * We also need to guarantee that the list is able to accomodate a - * pointer for each node since "nodelists" uses the remainder of - * available pointers. - */ - struct kmem_cache_node **node; - struct array_cache *array[NR_CPUS + MAX_NUMNODES]; - /* - * Do not add fields after array[] - */ + struct kmem_cache_node *node[MAX_NUMNODES]; }; #endif /* _LINUX_SLAB_DEF_H */ diff --git a/include/linux/soc/ti/knav_dma.h b/include/linux/soc/ti/knav_dma.h new file mode 100644 index 000000000000..dad035c16d94 --- /dev/null +++ b/include/linux/soc/ti/knav_dma.h @@ -0,0 +1,175 @@ +/* + * Copyright (C) 2014 Texas Instruments Incorporated + * Authors: Sandeep Nair <sandeep_n@ti.com + * Cyril Chemparathy <cyril@ti.com + Santosh Shilimkar <santosh.shilimkar@ti.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __SOC_TI_KEYSTONE_NAVIGATOR_DMA_H__ +#define __SOC_TI_KEYSTONE_NAVIGATOR_DMA_H__ + +/* + * PKTDMA descriptor manipulation macros for host packet descriptor + */ +#define MASK(x) (BIT(x) - 1) +#define KNAV_DMA_DESC_PKT_LEN_MASK MASK(22) +#define KNAV_DMA_DESC_PKT_LEN_SHIFT 0 +#define KNAV_DMA_DESC_PS_INFO_IN_SOP BIT(22) +#define KNAV_DMA_DESC_PS_INFO_IN_DESC 0 +#define KNAV_DMA_DESC_TAG_MASK MASK(8) +#define KNAV_DMA_DESC_SAG_HI_SHIFT 24 +#define KNAV_DMA_DESC_STAG_LO_SHIFT 16 +#define KNAV_DMA_DESC_DTAG_HI_SHIFT 8 +#define KNAV_DMA_DESC_DTAG_LO_SHIFT 0 +#define KNAV_DMA_DESC_HAS_EPIB BIT(31) +#define KNAV_DMA_DESC_NO_EPIB 0 +#define KNAV_DMA_DESC_PSLEN_SHIFT 24 +#define KNAV_DMA_DESC_PSLEN_MASK MASK(6) +#define KNAV_DMA_DESC_ERR_FLAG_SHIFT 20 +#define KNAV_DMA_DESC_ERR_FLAG_MASK MASK(4) +#define KNAV_DMA_DESC_PSFLAG_SHIFT 16 +#define KNAV_DMA_DESC_PSFLAG_MASK MASK(4) +#define KNAV_DMA_DESC_RETQ_SHIFT 0 +#define KNAV_DMA_DESC_RETQ_MASK MASK(14) +#define KNAV_DMA_DESC_BUF_LEN_MASK MASK(22) + +#define KNAV_DMA_NUM_EPIB_WORDS 4 +#define KNAV_DMA_NUM_PS_WORDS 16 +#define KNAV_DMA_FDQ_PER_CHAN 4 + +/* Tx channel scheduling priority */ +enum knav_dma_tx_priority { + DMA_PRIO_HIGH = 0, + DMA_PRIO_MED_H, + DMA_PRIO_MED_L, + DMA_PRIO_LOW +}; + +/* Rx channel error handling mode during buffer starvation */ +enum knav_dma_rx_err_mode { + DMA_DROP = 0, + DMA_RETRY +}; + +/* Rx flow size threshold configuration */ +enum knav_dma_rx_thresholds { + DMA_THRESH_NONE = 0, + DMA_THRESH_0 = 1, + DMA_THRESH_0_1 = 3, + DMA_THRESH_0_1_2 = 7 +}; + +/* Descriptor type */ +enum knav_dma_desc_type { + DMA_DESC_HOST = 0, + DMA_DESC_MONOLITHIC = 2 +}; + +/** + * struct knav_dma_tx_cfg: Tx channel configuration + * @filt_einfo: Filter extended packet info + * @filt_pswords: Filter PS words present + * @knav_dma_tx_priority: Tx channel scheduling priority + */ +struct knav_dma_tx_cfg { + bool filt_einfo; + bool filt_pswords; + enum knav_dma_tx_priority priority; +}; + +/** + * struct knav_dma_rx_cfg: Rx flow configuration + * @einfo_present: Extended packet info present + * @psinfo_present: PS words present + * @knav_dma_rx_err_mode: Error during buffer starvation + * @knav_dma_desc_type: Host or Monolithic desc + * @psinfo_at_sop: PS word located at start of packet + * @sop_offset: Start of packet offset + * @dst_q: Destination queue for a given flow + * @thresh: Rx flow size threshold + * @fdq[]: Free desc Queue array + * @sz_thresh0: RX packet size threshold 0 + * @sz_thresh1: RX packet size threshold 1 + * @sz_thresh2: RX packet size threshold 2 + */ +struct knav_dma_rx_cfg { + bool einfo_present; + bool psinfo_present; + enum knav_dma_rx_err_mode err_mode; + enum knav_dma_desc_type desc_type; + bool psinfo_at_sop; + unsigned int sop_offset; + unsigned int dst_q; + enum knav_dma_rx_thresholds thresh; + unsigned int fdq[KNAV_DMA_FDQ_PER_CHAN]; + unsigned int sz_thresh0; + unsigned int sz_thresh1; + unsigned int sz_thresh2; +}; + +/** + * struct knav_dma_cfg: Pktdma channel configuration + * @sl_cfg: Slave configuration + * @tx: Tx channel configuration + * @rx: Rx flow configuration + */ +struct knav_dma_cfg { + enum dma_transfer_direction direction; + union { + struct knav_dma_tx_cfg tx; + struct knav_dma_rx_cfg rx; + } u; +}; + +/** + * struct knav_dma_desc: Host packet descriptor layout + * @desc_info: Descriptor information like id, type, length + * @tag_info: Flow tag info written in during RX + * @packet_info: Queue Manager, policy, flags etc + * @buff_len: Buffer length in bytes + * @buff: Buffer pointer + * @next_desc: For chaining the descriptors + * @orig_len: length since 'buff_len' can be overwritten + * @orig_buff: buff pointer since 'buff' can be overwritten + * @epib: Extended packet info block + * @psdata: Protocol specific + */ +struct knav_dma_desc { + u32 desc_info; + u32 tag_info; + u32 packet_info; + u32 buff_len; + u32 buff; + u32 next_desc; + u32 orig_len; + u32 orig_buff; + u32 epib[KNAV_DMA_NUM_EPIB_WORDS]; + u32 psdata[KNAV_DMA_NUM_PS_WORDS]; + u32 pad[4]; +} ____cacheline_aligned; + +#if IS_ENABLED(CONFIG_KEYSTONE_NAVIGATOR_DMA) +void *knav_dma_open_channel(struct device *dev, const char *name, + struct knav_dma_cfg *config); +void knav_dma_close_channel(void *channel); +#else +static inline void *knav_dma_open_channel(struct device *dev, const char *name, + struct knav_dma_cfg *config) +{ + return (void *) NULL; +} +static inline void knav_dma_close_channel(void *channel) +{} + +#endif + +#endif /* __SOC_TI_KEYSTONE_NAVIGATOR_DMA_H__ */ diff --git a/include/linux/soc/ti/knav_qmss.h b/include/linux/soc/ti/knav_qmss.h new file mode 100644 index 000000000000..9f0ebb3bad27 --- /dev/null +++ b/include/linux/soc/ti/knav_qmss.h @@ -0,0 +1,90 @@ +/* + * Keystone Navigator Queue Management Sub-System header + * + * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com + * Author: Sandeep Nair <sandeep_n@ti.com> + * Cyril Chemparathy <cyril@ti.com> + * Santosh Shilimkar <santosh.shilimkar@ti.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __SOC_TI_KNAV_QMSS_H__ +#define __SOC_TI_KNAV_QMSS_H__ + +#include <linux/err.h> +#include <linux/time.h> +#include <linux/atomic.h> +#include <linux/device.h> +#include <linux/fcntl.h> +#include <linux/dma-mapping.h> + +/* queue types */ +#define KNAV_QUEUE_QPEND ((unsigned)-2) /* interruptible qpend queue */ +#define KNAV_QUEUE_ACC ((unsigned)-3) /* Accumulated queue */ +#define KNAV_QUEUE_GP ((unsigned)-4) /* General purpose queue */ + +/* queue flags */ +#define KNAV_QUEUE_SHARED 0x0001 /* Queue can be shared */ + +/** + * enum knav_queue_ctrl_cmd - queue operations. + * @KNAV_QUEUE_GET_ID: Get the ID number for an open queue + * @KNAV_QUEUE_FLUSH: forcibly empty a queue if possible + * @KNAV_QUEUE_SET_NOTIFIER: Set a notifier callback to a queue handle. + * @KNAV_QUEUE_ENABLE_NOTIFY: Enable notifier callback for a queue handle. + * @KNAV_QUEUE_DISABLE_NOTIFY: Disable notifier callback for a queue handle. + * @KNAV_QUEUE_GET_COUNT: Get number of queues. + */ +enum knav_queue_ctrl_cmd { + KNAV_QUEUE_GET_ID, + KNAV_QUEUE_FLUSH, + KNAV_QUEUE_SET_NOTIFIER, + KNAV_QUEUE_ENABLE_NOTIFY, + KNAV_QUEUE_DISABLE_NOTIFY, + KNAV_QUEUE_GET_COUNT +}; + +/* Queue notifier callback prototype */ +typedef void (*knav_queue_notify_fn)(void *arg); + +/** + * struct knav_queue_notify_config: Notifier configuration + * @fn: Notifier function + * @fn_arg: Notifier function arguments + */ +struct knav_queue_notify_config { + knav_queue_notify_fn fn; + void *fn_arg; +}; + +void *knav_queue_open(const char *name, unsigned id, + unsigned flags); +void knav_queue_close(void *qhandle); +int knav_queue_device_control(void *qhandle, + enum knav_queue_ctrl_cmd cmd, + unsigned long arg); +dma_addr_t knav_queue_pop(void *qhandle, unsigned *size); +int knav_queue_push(void *qhandle, dma_addr_t dma, + unsigned size, unsigned flags); + +void *knav_pool_create(const char *name, + int num_desc, int region_id); +void knav_pool_destroy(void *ph); +int knav_pool_count(void *ph); +void *knav_pool_desc_get(void *ph); +void knav_pool_desc_put(void *ph, void *desc); +int knav_pool_desc_map(void *ph, void *desc, unsigned size, + dma_addr_t *dma, unsigned *dma_sz); +void *knav_pool_desc_unmap(void *ph, dma_addr_t dma, unsigned dma_sz); +dma_addr_t knav_pool_desc_virt_to_dma(void *ph, void *virt); +void *knav_pool_desc_dma_to_virt(void *ph, dma_addr_t dma); + +#endif /* __SOC_TI_KNAV_QMSS_H__ */ diff --git a/include/linux/spi/mcp23s08.h b/include/linux/spi/mcp23s08.h index 2d676d5aaa89..aa07d7b32568 100644 --- a/include/linux/spi/mcp23s08.h +++ b/include/linux/spi/mcp23s08.h @@ -22,4 +22,22 @@ struct mcp23s08_platform_data { * base to base+15 (or base+31 for s17 variant). */ unsigned base; + /* Marks the device as a interrupt controller. + * NOTE: The interrupt functionality is only supported for i2c + * versions of the chips. The spi chips can also do the interrupts, + * but this is not supported by the linux driver yet. + */ + bool irq_controller; + + /* Sets the mirror flag in the IOCON register. Devices + * with two interrupt outputs (these are the devices ending with 17 and + * those that have 16 IOs) have two IO banks: IO 0-7 form bank 1 and + * IO 8-15 are bank 2. These chips have two different interrupt outputs: + * One for bank 1 and another for bank 2. If irq-mirror is set, both + * interrupts are generated regardless of the bank that an input change + * occurred on. If it is not set, the interrupt are only generated for + * the bank they belong to. + * On devices with only one interrupt output this property is useless. + */ + bool mirror; }; diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 3f2867ff0ced..262ba4ef9a8e 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -197,7 +197,13 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ } while (0) #else -# define raw_spin_lock_nested(lock, subclass) _raw_spin_lock(lock) +/* + * Always evaluate the 'subclass' argument to avoid that the compiler + * warns about set-but-not-used variables when building with + * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1. + */ +# define raw_spin_lock_nested(lock, subclass) \ + _raw_spin_lock(((void)(subclass), (lock))) # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) #endif diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index cf61ecd148e0..21678464883a 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -280,7 +280,6 @@ struct svc_rqst { bool rq_splice_ok; /* turned off in gss privacy * to prevent encrypting page * cache pages */ - wait_queue_head_t rq_wait; /* synchronization */ struct task_struct *rq_task; /* service thread */ }; diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index fcbfe8783243..cf391eef2e6d 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -357,6 +357,7 @@ int xs_swapper(struct rpc_xprt *xprt, int enable); #define XPRT_CONNECTION_ABORT (7) #define XPRT_CONNECTION_CLOSE (8) #define XPRT_CONGESTED (9) +#define XPRT_CONNECTION_REUSE (10) static inline void xprt_set_connected(struct rpc_xprt *xprt) { diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 519064e0c943..3388c1b6f7d8 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -189,6 +189,8 @@ struct platform_suspend_ops { struct platform_freeze_ops { int (*begin)(void); + int (*prepare)(void); + void (*restore)(void); void (*end)(void); }; @@ -371,6 +373,8 @@ extern int unregister_pm_notifier(struct notifier_block *nb); extern bool events_check_enabled; extern bool pm_wakeup_pending(void); +extern void pm_system_wakeup(void); +extern void pm_wakeup_clear(void); extern bool pm_get_wakeup_count(unsigned int *count, bool block); extern bool pm_save_wakeup_count(unsigned int count); extern void pm_wakep_autosleep_enabled(bool set); @@ -418,6 +422,8 @@ static inline int unregister_pm_notifier(struct notifier_block *nb) #define pm_notifier(fn, pri) do { (void)(fn); } while (0) static inline bool pm_wakeup_pending(void) { return false; } +static inline void pm_system_wakeup(void) {} +static inline void pm_wakeup_clear(void) {} static inline void lock_system_sleep(void) {} static inline void unlock_system_sleep(void) {} diff --git a/include/linux/swap.h b/include/linux/swap.h index 1b72060f093a..37a585beef5c 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -327,8 +327,10 @@ extern void lru_cache_add_active_or_unevictable(struct page *page, extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask); extern int __isolate_lru_page(struct page *page, isolate_mode_t mode); -extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, - gfp_t gfp_mask, bool noswap); +extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, + unsigned long nr_pages, + gfp_t gfp_mask, + bool may_swap); extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, gfp_t gfp_mask, bool noswap, struct zone *zone, @@ -354,22 +356,6 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order) extern int page_evictable(struct page *page); extern void check_move_unevictable_pages(struct page **, int nr_pages); -extern unsigned long scan_unevictable_pages; -extern int scan_unevictable_handler(struct ctl_table *, int, - void __user *, size_t *, loff_t *); -#ifdef CONFIG_NUMA -extern int scan_unevictable_register_node(struct node *node); -extern void scan_unevictable_unregister_node(struct node *node); -#else -static inline int scan_unevictable_register_node(struct node *node) -{ - return 0; -} -static inline void scan_unevictable_unregister_node(struct node *node) -{ -} -#endif - extern int kswapd_run(int nid); extern void kswapd_stop(int nid); #ifdef CONFIG_MEMCG diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 0f86d85a9ce4..bda9b81357cc 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -65,6 +65,7 @@ struct old_linux_dirent; struct perf_event_attr; struct file_handle; struct sigaltstack; +union bpf_attr; #include <linux/types.h> #include <linux/aio_abi.h> @@ -875,5 +876,5 @@ asmlinkage long sys_seccomp(unsigned int op, unsigned int flags, const char __user *uargs); asmlinkage long sys_getrandom(char __user *buf, size_t count, unsigned int flags); - +asmlinkage long sys_bpf(int cmd, union bpf_attr *attr, unsigned int size); #endif diff --git a/include/linux/tcp.h b/include/linux/tcp.h index fa5258f322e7..c2dee7deefa8 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -19,7 +19,6 @@ #include <linux/skbuff.h> -#include <linux/dmaengine.h> #include <net/sock.h> #include <net/inet_connection_sock.h> #include <net/inet_timewait_sock.h> @@ -166,13 +165,6 @@ struct tcp_sock { struct iovec *iov; int memory; int len; -#ifdef CONFIG_NET_DMA - /* members for async copy */ - struct dma_chan *dma_chan; - int wakeup; - struct dma_pinned_list *pinned_list; - dma_cookie_t dma_cookie; -#endif } ucopy; u32 snd_wl1; /* Sequence for window update */ @@ -276,7 +268,7 @@ struct tcp_sock { u32 retrans_stamp; /* Timestamp of the last retransmit, * also used in SYN-SENT to remember stamp of * the first SYN. */ - u32 undo_marker; /* tracking retrans started here. */ + u32 undo_marker; /* snd_una upon a new recovery episode. */ int undo_retrans; /* number of undoable retransmissions. */ u32 total_retrans; /* Total retransmits for entire connection */ diff --git a/include/linux/ti_wilink_st.h b/include/linux/ti_wilink_st.h index 932b76392248..884d6263e962 100644 --- a/include/linux/ti_wilink_st.h +++ b/include/linux/ti_wilink_st.h @@ -268,7 +268,7 @@ struct kim_data_s { struct st_data_s *core_data; struct chip_version version; unsigned char ldisc_install; - unsigned char dev_name[UART_DEV_NAME_LEN]; + unsigned char dev_name[UART_DEV_NAME_LEN + 1]; unsigned char flow_cntrl; unsigned long baud_rate; }; diff --git a/include/linux/tick.h b/include/linux/tick.h index 9a82c7dc3fdd..595ee86f5e0d 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -181,14 +181,12 @@ static inline bool tick_nohz_full_cpu(int cpu) return cpumask_test_cpu(cpu, tick_nohz_full_mask); } -extern void tick_nohz_init(void); extern void __tick_nohz_full_check(void); extern void tick_nohz_full_kick(void); extern void tick_nohz_full_kick_cpu(int cpu); extern void tick_nohz_full_kick_all(void); extern void __tick_nohz_task_switch(struct task_struct *tsk); #else -static inline void tick_nohz_init(void) { } static inline bool tick_nohz_full_enabled(void) { return false; } static inline bool tick_nohz_full_cpu(int cpu) { return false; } static inline void __tick_nohz_full_check(void) { } diff --git a/include/linux/topology.h b/include/linux/topology.h index dda6ee521e74..909b6e43b694 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h @@ -119,11 +119,20 @@ static inline int numa_node_id(void) * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem(). */ DECLARE_PER_CPU(int, _numa_mem_); +extern int _node_numa_mem_[MAX_NUMNODES]; #ifndef set_numa_mem static inline void set_numa_mem(int node) { this_cpu_write(_numa_mem_, node); + _node_numa_mem_[numa_node_id()] = node; +} +#endif + +#ifndef node_to_mem_node +static inline int node_to_mem_node(int node) +{ + return _node_numa_mem_[node]; } #endif @@ -146,6 +155,7 @@ static inline int cpu_to_mem(int cpu) static inline void set_cpu_numa_mem(int cpu, int node) { per_cpu(_numa_mem_, cpu) = node; + _node_numa_mem_[cpu_to_node(cpu)] = node; } #endif @@ -159,6 +169,13 @@ static inline int numa_mem_id(void) } #endif +#ifndef node_to_mem_node +static inline int node_to_mem_node(int node) +{ + return node; +} +#endif + #ifndef cpu_to_mem static inline int cpu_to_mem(int cpu) { diff --git a/include/linux/torture.h b/include/linux/torture.h index 5ca58fcbaf1b..7759fc3c622d 100644 --- a/include/linux/torture.h +++ b/include/linux/torture.h @@ -51,7 +51,7 @@ /* Definitions for online/offline exerciser. */ int torture_onoff_init(long ooholdoff, long oointerval); -char *torture_onoff_stats(char *page); +void torture_onoff_stats(void); bool torture_onoff_failures(void); /* Low-rider random number generator. */ @@ -77,7 +77,8 @@ int torture_stutter_init(int s); /* Initialization and cleanup. */ bool torture_init_begin(char *ttype, bool v, int *runnable); void torture_init_end(void); -bool torture_cleanup(void); +bool torture_cleanup_begin(void); +void torture_cleanup_end(void); bool torture_must_stop(void); bool torture_must_stop_irq(void); void torture_kthread_stopping(char *title); diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index b1293f15f592..e08e21e5f601 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -157,6 +157,12 @@ extern void syscall_unregfunc(void); * Make sure the alignment of the structure in the __tracepoints section will * not add unwanted padding between the beginning of the section and the * structure. Force alignment to the same alignment as the section start. + * + * When lockdep is enabled, we make sure to always do the RCU portions of + * the tracepoint code, regardless of whether tracing is on or we match the + * condition. This lets us find RCU issues triggered with tracepoints even + * when this tracepoint is off. This code has no purpose other than poking + * RCU a bit. */ #define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \ extern struct tracepoint __tracepoint_##name; \ @@ -167,6 +173,11 @@ extern void syscall_unregfunc(void); TP_PROTO(data_proto), \ TP_ARGS(data_args), \ TP_CONDITION(cond),,); \ + if (IS_ENABLED(CONFIG_LOCKDEP)) { \ + rcu_read_lock_sched_notrace(); \ + rcu_dereference_sched(__tracepoint_##name.funcs);\ + rcu_read_unlock_sched_notrace(); \ + } \ } \ __DECLARE_TRACE_RCU(name, PARAMS(proto), PARAMS(args), \ PARAMS(cond), PARAMS(data_proto), PARAMS(data_args)) \ diff --git a/include/linux/tty.h b/include/linux/tty.h index 84132942902a..5171ef8f7b85 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -252,6 +252,7 @@ struct tty_struct { struct rw_semaphore termios_rwsem; struct mutex winsize_mutex; spinlock_t ctrl_lock; + spinlock_t flow_lock; /* Termios values are protected by the termios rwsem */ struct ktermios termios, termios_locked; struct termiox *termiox; /* May be NULL for unsupported */ @@ -261,8 +262,13 @@ struct tty_struct { unsigned long flags; int count; struct winsize winsize; /* winsize_mutex */ - unsigned char stopped:1, hw_stopped:1, flow_stopped:1, packet:1; - unsigned char ctrl_status; /* ctrl_lock */ + unsigned long stopped:1, /* flow_lock */ + flow_stopped:1, + unused:BITS_PER_LONG - 2; + int hw_stopped; + unsigned long ctrl_status:8, /* ctrl_lock */ + packet:1, + unused_ctrl:BITS_PER_LONG - 9; unsigned int receive_room; /* Bytes free for queue */ int flow_change; @@ -397,7 +403,9 @@ extern int tty_paranoia_check(struct tty_struct *tty, struct inode *inode, extern char *tty_name(struct tty_struct *tty, char *buf); extern void tty_wait_until_sent(struct tty_struct *tty, long timeout); extern int tty_check_change(struct tty_struct *tty); +extern void __stop_tty(struct tty_struct *tty); extern void stop_tty(struct tty_struct *tty); +extern void __start_tty(struct tty_struct *tty); extern void start_tty(struct tty_struct *tty); extern int tty_register_driver(struct tty_driver *driver); extern int tty_unregister_driver(struct tty_driver *driver); @@ -411,6 +419,7 @@ extern void tty_unregister_device(struct tty_driver *driver, unsigned index); extern int tty_read_raw_data(struct tty_struct *tty, unsigned char *bufp, int buflen); extern void tty_write_message(struct tty_struct *tty, char *msg); +extern int tty_send_xchar(struct tty_struct *tty, char ch); extern int tty_put_char(struct tty_struct *tty, unsigned char c); extern int tty_chars_in_buffer(struct tty_struct *tty); extern int tty_write_room(struct tty_struct *tty); @@ -495,8 +504,6 @@ extern struct tty_struct *tty_pair_get_pty(struct tty_struct *tty); extern struct mutex tty_mutex; extern spinlock_t tty_files_lock; -extern void tty_write_unlock(struct tty_struct *tty); -extern int tty_write_lock(struct tty_struct *tty, int ndelay); #define tty_is_writelocked(tty) (mutex_is_locked(&tty->atomic_write_lock)) extern void tty_port_init(struct tty_port *port); diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h index e48c608a8fa8..92e337c18839 100644 --- a/include/linux/tty_driver.h +++ b/include/linux/tty_driver.h @@ -152,6 +152,8 @@ * This routine notifies the tty driver that it should stop * outputting characters to the tty device. * + * Called with ->flow_lock held. Serialized with start() method. + * * Optional: * * Note: Call stop_tty not this method. @@ -161,6 +163,8 @@ * This routine notifies the tty driver that it resume sending * characters to the tty device. * + * Called with ->flow_lock held. Serialized with stop() method. + * * Optional: * * Note: Call start_tty not this method. diff --git a/include/linux/udp.h b/include/linux/udp.h index 247cfdcc4b08..ee3277593222 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h @@ -49,7 +49,11 @@ struct udp_sock { unsigned int corkflag; /* Cork is required */ __u8 encap_type; /* Is this an Encapsulation socket? */ unsigned char no_check6_tx:1,/* Send zero UDP6 checksums on TX? */ - no_check6_rx:1;/* Allow zero UDP6 checksums on RX? */ + no_check6_rx:1,/* Allow zero UDP6 checksums on RX? */ + convert_csum:1;/* On receive, convert checksum + * unnecessary to checksum complete + * if possible. + */ /* * Following member retains the information to create a UDP header * when the socket is uncorked. @@ -98,6 +102,16 @@ static inline bool udp_get_no_check6_rx(struct sock *sk) return udp_sk(sk)->no_check6_rx; } +static inline void udp_set_convert_csum(struct sock *sk, bool val) +{ + udp_sk(sk)->convert_csum = val; +} + +static inline bool udp_get_convert_csum(struct sock *sk) +{ + return udp_sk(sk)->convert_csum; +} + #define udp_portaddr_for_each_entry(__sk, node, list) \ hlist_nulls_for_each_entry(__sk, node, list, __sk_common.skc_portaddr_node) diff --git a/include/linux/uio.h b/include/linux/uio.h index 48d64e6ab292..9b1581414cd4 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -80,11 +80,14 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i); size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i); +size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i); +size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i); +size_t iov_iter_zero(size_t bytes, struct iov_iter *); unsigned long iov_iter_alignment(const struct iov_iter *i); void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov, unsigned long nr_segs, size_t count); ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages, - unsigned maxpages, size_t *start); + size_t maxsize, unsigned maxpages, size_t *start); ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, size_t maxsize, size_t *start); int iov_iter_npages(const struct iov_iter *i, int maxpages); diff --git a/include/linux/usb.h b/include/linux/usb.h index d2465bc0e73c..447a7e2fc19b 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -1862,6 +1862,18 @@ extern void usb_unregister_notify(struct notifier_block *nb); /* debugfs stuff */ extern struct dentry *usb_debug_root; +/* LED triggers */ +enum usb_led_event { + USB_LED_EVENT_HOST = 0, + USB_LED_EVENT_GADGET = 1, +}; + +#ifdef CONFIG_USB_LED_TRIG +extern void usb_led_activity(enum usb_led_event ev); +#else +static inline void usb_led_activity(enum usb_led_event ev) {} +#endif + #endif /* __KERNEL__ */ #endif diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h index bbe779f640be..e14c09a45c5a 100644 --- a/include/linux/usb/chipidea.h +++ b/include/linux/usb/chipidea.h @@ -31,6 +31,7 @@ struct ci_hdrc_platform_data { #define CI_HDRC_CONTROLLER_STOPPED_EVENT 1 void (*notify_event) (struct ci_hdrc *ci, unsigned event); struct regulator *reg_vbus; + bool tpl_support; }; /* Default offset of capability registers */ diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index c3a61853cd13..522cafe26790 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -345,12 +345,13 @@ static inline int usb_ep_queue(struct usb_ep *ep, * @ep:the endpoint associated with the request * @req:the request being canceled * - * if the request is still active on the endpoint, it is dequeued and its + * If the request is still active on the endpoint, it is dequeued and its * completion routine is called (with status -ECONNRESET); else a negative - * error code is returned. + * error code is returned. This is guaranteed to happen before the call to + * usb_ep_dequeue() returns. * - * note that some hardware can't clear out write fifos (to unlink the request - * at the head of the queue) except as part of disconnecting from usb. such + * Note that some hardware can't clear out write fifos (to unlink the request + * at the head of the queue) except as part of disconnecting from usb. Such * restrictions prevent drivers from supporting configuration changes, * even to configuration zero (a "chapter 9" requirement). */ @@ -816,6 +817,8 @@ static inline int usb_gadget_disconnect(struct usb_gadget *gadget) * Called in a context that permits sleeping. * @suspend: Invoked on USB suspend. May be called in_interrupt. * @resume: Invoked on USB resume. May be called in_interrupt. + * @reset: Invoked on USB bus reset. It is mandatory for all gadget drivers + * and should be called in_interrupt. * @driver: Driver model state for this driver. * * Devices are disabled till a gadget driver successfully bind()s, which @@ -873,6 +876,7 @@ struct usb_gadget_driver { void (*disconnect)(struct usb_gadget *); void (*suspend)(struct usb_gadget *); void (*resume)(struct usb_gadget *); + void (*reset)(struct usb_gadget *); /* FIXME support safe rmmod */ struct device_driver driver; @@ -1013,6 +1017,20 @@ extern void usb_gadget_set_state(struct usb_gadget *gadget, /*-------------------------------------------------------------------------*/ +/* utility to tell udc core that the bus reset occurs */ +extern void usb_gadget_udc_reset(struct usb_gadget *gadget, + struct usb_gadget_driver *driver); + +/*-------------------------------------------------------------------------*/ + +/* utility to give requests back to the gadget layer */ + +extern void usb_gadget_giveback_request(struct usb_ep *ep, + struct usb_request *req); + + +/*-------------------------------------------------------------------------*/ + /* utility wrapping a simple endpoint selection policy */ extern struct usb_ep *usb_ep_autoconfig(struct usb_gadget *, diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index 485cd5e2100c..cd96a2bc3388 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -106,7 +106,8 @@ struct usb_hcd { * OTG and some Host controllers need software interaction with phys; * other external phys should be software-transparent */ - struct usb_phy *phy; + struct usb_phy *usb_phy; + struct phy *phy; /* Flags that need to be manipulated atomically because they can * change while the host controller is running. Always use @@ -144,6 +145,7 @@ struct usb_hcd { unsigned has_tt:1; /* Integrated TT in root hub */ unsigned amd_resume_bug:1; /* AMD remote wakeup quirk */ unsigned can_do_streams:1; /* HC supports streams */ + unsigned tpl_support:1; /* OTG & EH TPL support */ unsigned int irq; /* irq allocated */ void __iomem *regs; /* device memory/io */ diff --git a/include/linux/usb/of.h b/include/linux/usb/of.h index 8c38aa26b3bb..cfe0528cdbb1 100644 --- a/include/linux/usb/of.h +++ b/include/linux/usb/of.h @@ -14,6 +14,7 @@ #if IS_ENABLED(CONFIG_OF) enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np); enum usb_device_speed of_usb_get_maximum_speed(struct device_node *np); +bool of_usb_host_tpl_support(struct device_node *np); #else static inline enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np) { @@ -25,6 +26,10 @@ of_usb_get_maximum_speed(struct device_node *np) { return USB_SPEED_UNKNOWN; } +static inline bool of_usb_host_tpl_support(struct device_node *np) +{ + return false; +} #endif #if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_USB_SUPPORT) diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h index 55a17b188daa..9948c874e3f1 100644 --- a/include/linux/usb/quirks.h +++ b/include/linux/usb/quirks.h @@ -8,27 +8,27 @@ #define __LINUX_USB_QUIRKS_H /* string descriptors must not be fetched using a 255-byte read */ -#define USB_QUIRK_STRING_FETCH_255 0x00000001 +#define USB_QUIRK_STRING_FETCH_255 BIT(0) /* device can't resume correctly so reset it instead */ -#define USB_QUIRK_RESET_RESUME 0x00000002 +#define USB_QUIRK_RESET_RESUME BIT(1) /* device can't handle Set-Interface requests */ -#define USB_QUIRK_NO_SET_INTF 0x00000004 +#define USB_QUIRK_NO_SET_INTF BIT(2) /* device can't handle its Configuration or Interface strings */ -#define USB_QUIRK_CONFIG_INTF_STRINGS 0x00000008 +#define USB_QUIRK_CONFIG_INTF_STRINGS BIT(3) /* device can't be reset(e.g morph devices), don't use reset */ -#define USB_QUIRK_RESET 0x00000010 +#define USB_QUIRK_RESET BIT(4) /* device has more interface descriptions than the bNumInterfaces count, and can't handle talking to these interfaces */ -#define USB_QUIRK_HONOR_BNUMINTERFACES 0x00000020 +#define USB_QUIRK_HONOR_BNUMINTERFACES BIT(5) /* device needs a pause during initialization, after we read the device descriptor */ -#define USB_QUIRK_DELAY_INIT 0x00000040 +#define USB_QUIRK_DELAY_INIT BIT(6) /* * For high speed and super speed interupt endpoints, the USB 2.0 and @@ -39,6 +39,12 @@ * Devices with this quirk report their bInterval as the result of this * calculation instead of the exponent variable used in the calculation. */ -#define USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL 0x00000080 +#define USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL BIT(7) + +/* device can't handle device_qualifier descriptor requests */ +#define USB_QUIRK_DEVICE_QUALIFIER BIT(8) + +/* device generates spurious wakeup, ignore remote wakeup capability */ +#define USB_QUIRK_IGNORE_REMOTE_WAKEUP BIT(9) #endif /* __LINUX_USB_QUIRKS_H */ diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h index 9b7de1b46437..a7f2604c5f25 100644 --- a/include/linux/usb_usual.h +++ b/include/linux/usb_usual.h @@ -73,6 +73,10 @@ /* Device advertises UAS but it is broken */ \ US_FLAG(BROKEN_FUA, 0x01000000) \ /* Cannot handle FUA in WRITE or READ CDBs */ \ + US_FLAG(NO_ATA_1X, 0x02000000) \ + /* Cannot handle ATA_12 or ATA_16 CDBs */ \ + US_FLAG(NO_REPORT_OPCODES, 0x04000000) \ + /* Cannot handle MI_REPORT_SUPPORTED_OPERATION_CODES */ \ #define US_FLAG(name, value) US_FL_##name = value , enum { US_DO_ALL_FLAGS }; diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h index 502073a53dd3..b483abd34493 100644 --- a/include/linux/vga_switcheroo.h +++ b/include/linux/vga_switcheroo.h @@ -64,6 +64,7 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev); void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic); int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain); +void vga_switcheroo_fini_domain_pm_ops(struct device *dev); int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain); #else @@ -82,6 +83,7 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {} static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; } +static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {} static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; } #endif diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h index 2c02f3a8d2ba..c37bd4d06739 100644 --- a/include/linux/vgaarb.h +++ b/include/linux/vgaarb.h @@ -182,7 +182,6 @@ extern void vga_put(struct pci_dev *pdev, unsigned int rsrc); * vga_get()... */ -#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE #ifdef CONFIG_VGA_ARB extern struct pci_dev *vga_default_device(void); extern void vga_set_default_device(struct pci_dev *pdev); @@ -190,7 +189,6 @@ extern void vga_set_default_device(struct pci_dev *pdev); static inline struct pci_dev *vga_default_device(void) { return NULL; }; static inline void vga_set_default_device(struct pci_dev *pdev) { }; #endif -#endif /** * vga_conflicts diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index ced92345c963..730334cdf037 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -72,6 +72,13 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, THP_ZERO_PAGE_ALLOC, THP_ZERO_PAGE_ALLOC_FAILED, #endif +#ifdef CONFIG_MEMORY_BALLOON + BALLOON_INFLATE, + BALLOON_DEFLATE, +#ifdef CONFIG_BALLOON_COMPACTION + BALLOON_MIGRATE, +#endif +#endif #ifdef CONFIG_DEBUG_TLBFLUSH #ifdef CONFIG_SMP NR_TLB_REMOTE_FLUSH, /* cpu tried to flush others' tlbs */ diff --git a/include/linux/wait.h b/include/linux/wait.h index 034f6fc7c65f..e4a8eb9312ea 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -25,7 +25,7 @@ struct wait_bit_key { void *flags; int bit_nr; #define WAIT_ATOMIC_T_BIT_NR -1 - unsigned long private; + unsigned long timeout; }; struct wait_bit_queue { @@ -154,6 +154,7 @@ int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_ac void wake_up_bit(void *, int); void wake_up_atomic_t(atomic_t *); int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned); +int out_of_line_wait_on_bit_timeout(void *, int, wait_bit_action_f *, unsigned, unsigned long); int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned); int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned); wait_queue_head_t *bit_waitqueue(void *, int); @@ -863,6 +864,8 @@ int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); extern int bit_wait(struct wait_bit_key *); extern int bit_wait_io(struct wait_bit_key *); +extern int bit_wait_timeout(struct wait_bit_key *); +extern int bit_wait_io_timeout(struct wait_bit_key *); /** * wait_on_bit - wait for a bit to be cleared diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index a0cc2e95ed1b..b996e6cde6bb 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -419,7 +419,7 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, alloc_workqueue("%s", WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, \ 1, (name)) #define create_singlethread_workqueue(name) \ - alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, (name)) + alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name) extern void destroy_workqueue(struct workqueue_struct *wq); diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h index e44d634e7fb7..05c214760977 100644 --- a/include/linux/zsmalloc.h +++ b/include/linux/zsmalloc.h @@ -46,6 +46,6 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle, enum zs_mapmode mm); void zs_unmap_object(struct zs_pool *pool, unsigned long handle); -u64 zs_get_total_size_bytes(struct zs_pool *pool); +unsigned long zs_get_total_pages(struct zs_pool *pool); #endif diff --git a/include/media/davinci/dm644x_ccdc.h b/include/media/davinci/dm644x_ccdc.h index 852e96c4bb46..984fb79031de 100644 --- a/include/media/davinci/dm644x_ccdc.h +++ b/include/media/davinci/dm644x_ccdc.h @@ -114,7 +114,7 @@ struct ccdc_fault_pixel { /* Number of fault pixel */ unsigned short fp_num; /* Address of fault pixel table */ - unsigned int fpc_table_addr; + unsigned long fpc_table_addr; }; /* Structure for CCDC configuration parameters for raw capture mode passed diff --git a/include/media/omap3isp.h b/include/media/omap3isp.h index c9d06d9f7e6e..398279dd1922 100644 --- a/include/media/omap3isp.h +++ b/include/media/omap3isp.h @@ -57,6 +57,8 @@ enum { * 0 - Active high, 1 - Active low * @vs_pol: Vertical synchronization polarity * 0 - Active high, 1 - Active low + * @fld_pol: Field signal polarity + * 0 - Positive, 1 - Negative * @data_pol: Data polarity * 0 - Normal, 1 - One's complement */ @@ -65,6 +67,7 @@ struct isp_parallel_platform_data { unsigned int clk_pol:1; unsigned int hs_pol:1; unsigned int vs_pol:1; + unsigned int fld_pol:1; unsigned int data_pol:1; }; diff --git a/include/media/rc-map.h b/include/media/rc-map.h index 80f951890b4c..e7a1514075ec 100644 --- a/include/media/rc-map.h +++ b/include/media/rc-map.h @@ -135,6 +135,7 @@ void rc_map_init(void); #define RC_MAP_DM1105_NEC "rc-dm1105-nec" #define RC_MAP_DNTV_LIVE_DVBT_PRO "rc-dntv-live-dvbt-pro" #define RC_MAP_DNTV_LIVE_DVB_T "rc-dntv-live-dvb-t" +#define RC_MAP_DVBSKY "rc-dvbsky" #define RC_MAP_EMPTY "rc-empty" #define RC_MAP_EM_TERRATEC "rc-em-terratec" #define RC_MAP_ENCORE_ENLTV2 "rc-encore-enltv2" diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h index fc910a622451..6ef2d01197da 100644 --- a/include/media/videobuf2-core.h +++ b/include/media/videobuf2-core.h @@ -295,7 +295,7 @@ struct vb2_buffer { * can return an error if hardware fails, in that case all * buffers that have been already given by the @buf_queue * callback are to be returned by the driver by calling - * @vb2_buffer_done(VB2_BUF_STATE_DEQUEUED). + * @vb2_buffer_done(VB2_BUF_STATE_QUEUED). * If you need a minimum number of buffers before you can * start streaming, then set @min_buffers_needed in the * vb2_queue structure. If that is non-zero then @@ -356,8 +356,8 @@ struct v4l2_fh; * @buf_struct_size: size of the driver-specific buffer structure; * "0" indicates the driver doesn't want to use a custom buffer * structure type, so sizeof(struct vb2_buffer) will is used - * @timestamp_flags: Timestamp flags; V4L2_BUF_FLAGS_TIMESTAMP_* and - * V4L2_BUF_FLAGS_TSTAMP_SRC_* + * @timestamp_flags: Timestamp flags; V4L2_BUF_FLAG_TIMESTAMP_* and + * V4L2_BUF_FLAG_TSTAMP_SRC_* * @gfp_flags: additional gfp flags used when allocating the buffers. * Typically this is 0, but it may be e.g. GFP_DMA or __GFP_DMA32 * to force the buffer allocation to a specific memory zone. @@ -366,6 +366,7 @@ struct v4l2_fh; * cannot be started unless at least this number of buffers * have been queued into the driver. * + * @mmap_lock: private mutex used when buffers are allocated/freed/mmapped * @memory: current memory type used * @bufs: videobuf buffer structures * @num_buffers: number of allocated/used buffers @@ -380,6 +381,9 @@ struct v4l2_fh; * @start_streaming_called: start_streaming() was called successfully and we * started streaming. * @error: a fatal error occurred on the queue + * @waiting_for_buffers: used in poll() to check if vb2 is still waiting for + * buffers. Only set for capture queues if qbuf has not yet been + * called since poll() needs to return POLLERR in that situation. * @fileio: file io emulator internal data, used only if emulator is active * @threadio: thread io internal data, used only if thread is active */ @@ -399,6 +403,7 @@ struct vb2_queue { u32 min_buffers_needed; /* private: internal use only */ + struct mutex mmap_lock; enum v4l2_memory memory; struct vb2_buffer *bufs[VIDEO_MAX_FRAME]; unsigned int num_buffers; @@ -417,6 +422,7 @@ struct vb2_queue { unsigned int streaming:1; unsigned int start_streaming_called:1; unsigned int error:1; + unsigned int waiting_for_buffers:1; struct vb2_fileio_data *fileio; struct vb2_threadio_data *threadio; @@ -588,6 +594,15 @@ vb2_plane_size(struct vb2_buffer *vb, unsigned int plane_no) return 0; } +/** + * vb2_start_streaming_called() - return streaming status of driver + * @q: videobuf queue + */ +static inline bool vb2_start_streaming_called(struct vb2_queue *q) +{ + return q->start_streaming_called; +} + /* * The following functions are not part of the vb2 core API, but are simple * helper functions that you can use in your struct v4l2_file_operations, diff --git a/include/misc/cxl.h b/include/misc/cxl.h new file mode 100644 index 000000000000..975cc7861f18 --- /dev/null +++ b/include/misc/cxl.h @@ -0,0 +1,48 @@ +/* + * Copyright 2014 IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _MISC_CXL_H +#define _MISC_CXL_H + +#ifdef CONFIG_CXL_BASE + +#define CXL_IRQ_RANGES 4 + +struct cxl_irq_ranges { + irq_hw_number_t offset[CXL_IRQ_RANGES]; + irq_hw_number_t range[CXL_IRQ_RANGES]; +}; + +extern atomic_t cxl_use_count; + +static inline bool cxl_ctx_in_use(void) +{ + return (atomic_read(&cxl_use_count) != 0); +} + +static inline void cxl_ctx_get(void) +{ + atomic_inc(&cxl_use_count); +} + +static inline void cxl_ctx_put(void) +{ + atomic_dec(&cxl_use_count); +} + +void cxl_slbia(struct mm_struct *mm); + +#else /* CONFIG_CXL_BASE */ + +static inline bool cxl_ctx_in_use(void) { return false; } +static inline void cxl_slbia(struct mm_struct *mm) {} + +#endif /* CONFIG_CXL_BASE */ + +#endif diff --git a/include/net/addrconf.h b/include/net/addrconf.h index f679877bb601..d13573bb879e 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -202,8 +202,9 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr); void ipv6_sock_ac_close(struct sock *sk); -int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr); +int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr); int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr); +void ipv6_ac_destroy_dev(struct inet6_dev *idev); bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev, const struct in6_addr *addr); bool ipv6_chk_acast_addr_src(struct net *net, struct net_device *dev, diff --git a/include/net/ah.h b/include/net/ah.h index ca95b98969dd..4e2dfa474a7e 100644 --- a/include/net/ah.h +++ b/include/net/ah.h @@ -3,9 +3,6 @@ #include <linux/skbuff.h> -/* This is the maximum truncated ICV length that we know of. */ -#define MAX_AH_AUTH_LEN 64 - struct crypto_ahash; struct ah_data { diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index 373000de610d..58695ffeb138 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h @@ -120,9 +120,9 @@ struct bt_voice { #define BT_RCVMTU 13 __printf(1, 2) -int bt_info(const char *fmt, ...); +void bt_info(const char *fmt, ...); __printf(1, 2) -int bt_err(const char *fmt, ...); +void bt_err(const char *fmt, ...); #define BT_INFO(fmt, ...) bt_info(fmt "\n", ##__VA_ARGS__) #define BT_ERR(fmt, ...) bt_err(fmt "\n", ##__VA_ARGS__) @@ -284,6 +284,7 @@ struct hci_req_ctrl { struct bt_skb_cb { __u8 pkt_type; __u8 incoming; + __u16 opcode; __u16 expect; __u8 force_active; struct l2cap_chan *chan; diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 3f8547f1c6f8..6e8f24967308 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -385,6 +385,7 @@ enum { #define HCI_ERROR_AUTH_FAILURE 0x05 #define HCI_ERROR_MEMORY_EXCEEDED 0x07 #define HCI_ERROR_CONNECTION_TIMEOUT 0x08 +#define HCI_ERROR_REJ_LIMITED_RESOURCES 0x0d #define HCI_ERROR_REJ_BAD_ADDR 0x0f #define HCI_ERROR_REMOTE_USER_TERM 0x13 #define HCI_ERROR_REMOTE_LOW_RESOURCES 0x14 diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index b5d5af3aa469..37ff1aef0845 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -302,7 +302,7 @@ struct hci_dev { __u32 req_status; __u32 req_result; - struct crypto_blkcipher *tfm_aes; + void *smp_data; struct discovery_state discovery; struct hci_conn_hash conn_hash; @@ -464,6 +464,8 @@ struct hci_conn_params { HCI_AUTO_CONN_ALWAYS, HCI_AUTO_CONN_LINK_LOSS, } auto_connect; + + struct hci_conn *conn; }; extern struct list_head hci_dev_list; @@ -537,7 +539,6 @@ enum { HCI_CONN_RSWITCH_PEND, HCI_CONN_MODE_CHANGE_PEND, HCI_CONN_SCO_SETUP_PEND, - HCI_CONN_LE_SMP_PEND, HCI_CONN_MGMT_CONNECTED, HCI_CONN_SSP_ENABLED, HCI_CONN_SC_ENABLED, @@ -551,6 +552,7 @@ enum { HCI_CONN_FIPS, HCI_CONN_STK_ENCRYPT, HCI_CONN_AUTH_INITIATOR, + HCI_CONN_DROP, }; static inline bool hci_conn_ssp_enabled(struct hci_conn *conn) @@ -700,7 +702,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev, return NULL; } -void hci_disconnect(struct hci_conn *conn, __u8 reason); +int hci_disconnect(struct hci_conn *conn, __u8 reason); bool hci_setup_sync(struct hci_conn *conn, __u16 handle); void hci_sco_setup(struct hci_conn *conn, __u8 status); @@ -754,9 +756,10 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status); * _get()/_drop() in it, but require the caller to have a valid ref (FIXME). */ -static inline void hci_conn_get(struct hci_conn *conn) +static inline struct hci_conn *hci_conn_get(struct hci_conn *conn) { get_device(&conn->dev); + return conn; } static inline void hci_conn_put(struct hci_conn *conn) @@ -788,7 +791,7 @@ static inline void hci_conn_drop(struct hci_conn *conn) if (!conn->out) timeo *= 2; } else { - timeo = msecs_to_jiffies(10); + timeo = 0; } break; @@ -797,7 +800,7 @@ static inline void hci_conn_drop(struct hci_conn *conn) break; default: - timeo = msecs_to_jiffies(10); + timeo = 0; break; } @@ -923,7 +926,6 @@ int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr); void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb); int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb); -int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count); int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count); void hci_init_sysfs(struct hci_dev *hdev); @@ -968,6 +970,9 @@ void hci_conn_del_sysfs(struct hci_conn *conn); #define lmp_host_le_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE)) #define lmp_host_le_br_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE_BREDR)) +#define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \ + !test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) + /* ----- HCI protocols ----- */ #define HCI_PROTO_DEFER 0x01 @@ -1256,6 +1261,8 @@ bool hci_req_pending(struct hci_dev *hdev); void hci_req_add_le_scan_disable(struct hci_request *req); void hci_req_add_le_passive_scan(struct hci_request *req); +void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req); + struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param, u32 timeout); struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen, @@ -1334,8 +1341,7 @@ int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u32 passkey, u8 entered); -void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, - u8 addr_type, u8 status); +void mgmt_auth_failed(struct hci_conn *conn, u8 status); void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status); void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status); void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status); @@ -1351,6 +1357,7 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, s8 rssi, u8 *name, u8 name_len); void mgmt_discovering(struct hci_dev *hdev, u8 discovering); +bool mgmt_powering_down(struct hci_dev *hdev); void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent); void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk); void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk, diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h index 8df15ad0d43f..ead99f032f7a 100644 --- a/include/net/bluetooth/l2cap.h +++ b/include/net/bluetooth/l2cap.h @@ -633,10 +633,11 @@ struct l2cap_conn { struct sk_buff_head pending_rx; struct work_struct pending_rx_work; + struct work_struct id_addr_update_work; + __u8 disc_reason; - struct delayed_work security_timer; - struct smp_chan *smp_chan; + struct l2cap_chan *smp; struct list_head chan_l; struct mutex chan_lock; @@ -708,6 +709,8 @@ enum { FLAG_EFS_ENABLE, FLAG_DEFER_SETUP, FLAG_LE_CONN_REQ_SENT, + FLAG_PENDING_SECURITY, + FLAG_HOLD_HCI_CONN, }; enum { @@ -837,18 +840,43 @@ static inline struct l2cap_chan *l2cap_chan_no_new_connection(struct l2cap_chan return NULL; } +static inline int l2cap_chan_no_recv(struct l2cap_chan *chan, struct sk_buff *skb) +{ + return -ENOSYS; +} + +static inline struct sk_buff *l2cap_chan_no_alloc_skb(struct l2cap_chan *chan, + unsigned long hdr_len, + unsigned long len, int nb) +{ + return ERR_PTR(-ENOSYS); +} + static inline void l2cap_chan_no_teardown(struct l2cap_chan *chan, int err) { } +static inline void l2cap_chan_no_close(struct l2cap_chan *chan) +{ +} + static inline void l2cap_chan_no_ready(struct l2cap_chan *chan) { } +static inline void l2cap_chan_no_state_change(struct l2cap_chan *chan, + int state, int err) +{ +} + static inline void l2cap_chan_no_defer(struct l2cap_chan *chan) { } +static inline void l2cap_chan_no_suspend(struct l2cap_chan *chan) +{ +} + static inline void l2cap_chan_no_resume(struct l2cap_chan *chan) { } @@ -911,14 +939,13 @@ int l2cap_ertm_init(struct l2cap_chan *chan); void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan); void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan); void l2cap_chan_del(struct l2cap_chan *chan, int err); -void l2cap_conn_update_id_addr(struct hci_conn *hcon); void l2cap_send_conn_req(struct l2cap_chan *chan); void l2cap_move_start(struct l2cap_chan *chan); void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan, u8 status); void __l2cap_physical_cfm(struct l2cap_chan *chan, int result); -void l2cap_conn_get(struct l2cap_conn *conn); +struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn); void l2cap_conn_put(struct l2cap_conn *conn); int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user); diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 0a080c4de275..a2ddcf2398fd 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -4,6 +4,7 @@ * 802.11 device and configuration interface * * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net> + * Copyright 2013-2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -663,6 +664,7 @@ struct cfg80211_acl_data { * @crypto: crypto settings * @privacy: the BSS uses privacy * @auth_type: Authentication type (algorithm) + * @smps_mode: SMPS mode * @inactivity_timeout: time in seconds to determine station's inactivity. * @p2p_ctwindow: P2P CT Window * @p2p_opp_ps: P2P opportunistic PS @@ -681,6 +683,7 @@ struct cfg80211_ap_settings { struct cfg80211_crypto_settings crypto; bool privacy; enum nl80211_auth_type auth_type; + enum nl80211_smps_mode smps_mode; int inactivity_timeout; u8 p2p_ctwindow; bool p2p_opp_ps; @@ -1503,12 +1506,14 @@ enum cfg80211_signal_type { * @tsf: TSF contained in the frame that carried these IEs * @rcu_head: internal use, for freeing * @len: length of the IEs + * @from_beacon: these IEs are known to come from a beacon * @data: IE data */ struct cfg80211_bss_ies { u64 tsf; struct rcu_head rcu_head; int len; + bool from_beacon; u8 data[]; }; @@ -1605,10 +1610,12 @@ struct cfg80211_auth_request { * * @ASSOC_REQ_DISABLE_HT: Disable HT (802.11n) * @ASSOC_REQ_DISABLE_VHT: Disable VHT + * @ASSOC_REQ_USE_RRM: Declare RRM capability in this association */ enum cfg80211_assoc_req_flags { ASSOC_REQ_DISABLE_HT = BIT(0), ASSOC_REQ_DISABLE_VHT = BIT(1), + ASSOC_REQ_USE_RRM = BIT(2), }; /** @@ -1800,6 +1807,7 @@ struct cfg80211_connect_params { * @WIPHY_PARAM_FRAG_THRESHOLD: wiphy->frag_threshold has changed * @WIPHY_PARAM_RTS_THRESHOLD: wiphy->rts_threshold has changed * @WIPHY_PARAM_COVERAGE_CLASS: coverage class changed + * @WIPHY_PARAM_DYN_ACK: dynack has been enabled */ enum wiphy_params_flags { WIPHY_PARAM_RETRY_SHORT = 1 << 0, @@ -1807,6 +1815,7 @@ enum wiphy_params_flags { WIPHY_PARAM_FRAG_THRESHOLD = 1 << 2, WIPHY_PARAM_RTS_THRESHOLD = 1 << 3, WIPHY_PARAM_COVERAGE_CLASS = 1 << 4, + WIPHY_PARAM_DYN_ACK = 1 << 5, }; /* @@ -1973,14 +1982,12 @@ struct cfg80211_wowlan_wakeup { /** * struct cfg80211_gtk_rekey_data - rekey data - * @kek: key encryption key - * @kck: key confirmation key - * @replay_ctr: replay counter + * @kek: key encryption key (NL80211_KEK_LEN bytes) + * @kck: key confirmation key (NL80211_KCK_LEN bytes) + * @replay_ctr: replay counter (NL80211_REPLAY_CTR_LEN bytes) */ struct cfg80211_gtk_rekey_data { - u8 kek[NL80211_KEK_LEN]; - u8 kck[NL80211_KCK_LEN]; - u8 replay_ctr[NL80211_REPLAY_CTR_LEN]; + const u8 *kek, *kck, *replay_ctr; }; /** @@ -2313,6 +2320,17 @@ struct cfg80211_qos_map { * @set_ap_chanwidth: Set the AP (including P2P GO) mode channel width for the * given interface This is used e.g. for dynamic HT 20/40 MHz channel width * changes during the lifetime of the BSS. + * + * @add_tx_ts: validate (if admitted_time is 0) or add a TX TS to the device + * with the given parameters; action frame exchange has been handled by + * userspace so this just has to modify the TX path to take the TS into + * account. + * If the admitted time is 0 just validate the parameters to make sure + * the session can be created at all; it is valid to just always return + * success for that but that may result in inefficient behaviour (handshake + * with the peer followed by immediate teardown when the addition is later + * rejected) + * @del_tx_ts: remove an existing TX TS */ struct cfg80211_ops { int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow); @@ -2553,6 +2571,12 @@ struct cfg80211_ops { int (*set_ap_chanwidth)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_chan_def *chandef); + + int (*add_tx_ts)(struct wiphy *wiphy, struct net_device *dev, + u8 tsid, const u8 *peer, u8 user_prio, + u16 admitted_time); + int (*del_tx_ts)(struct wiphy *wiphy, struct net_device *dev, + u8 tsid, const u8 *peer); }; /* @@ -2599,9 +2623,13 @@ struct cfg80211_ops { * @WIPHY_FLAG_SUPPORTS_5_10_MHZ: Device supports 5 MHz and 10 MHz channels. * @WIPHY_FLAG_HAS_CHANNEL_SWITCH: Device supports channel switch in * beaconing mode (AP, IBSS, Mesh, ...). + * @WIPHY_FLAG_SUPPORTS_WMM_ADMISSION: the device supports setting up WMM + * TSPEC sessions (TID aka TSID 0-7) with the NL80211_CMD_ADD_TX_TS + * command. Standard IEEE 802.11 TSPEC setup is not yet supported, it + * needs to be able to handle Block-Ack agreements and other things. */ enum wiphy_flags { - /* use hole at 0 */ + WIPHY_FLAG_SUPPORTS_WMM_ADMISSION = BIT(0), /* use hole at 1 */ /* use hole at 2 */ WIPHY_FLAG_NETNS_OK = BIT(3), @@ -3765,11 +3793,25 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy, } /** - * cfg80211_inform_bss - inform cfg80211 of a new BSS + * enum cfg80211_bss_frame_type - frame type that the BSS data came from + * @CFG80211_BSS_FTYPE_UNKNOWN: driver doesn't know whether the data is + * from a beacon or probe response + * @CFG80211_BSS_FTYPE_BEACON: data comes from a beacon + * @CFG80211_BSS_FTYPE_PRESP: data comes from a probe response + */ +enum cfg80211_bss_frame_type { + CFG80211_BSS_FTYPE_UNKNOWN, + CFG80211_BSS_FTYPE_BEACON, + CFG80211_BSS_FTYPE_PRESP, +}; + +/** + * cfg80211_inform_bss_width - inform cfg80211 of a new BSS * * @wiphy: the wiphy reporting the BSS * @rx_channel: The channel the frame was received on * @scan_width: width of the control channel + * @ftype: frame type (if known) * @bssid: the BSSID of the BSS * @tsf: the TSF sent by the peer in the beacon/probe response (or 0) * @capability: the capability field sent by the peer @@ -3789,6 +3831,7 @@ struct cfg80211_bss * __must_check cfg80211_inform_bss_width(struct wiphy *wiphy, struct ieee80211_channel *rx_channel, enum nl80211_bss_scan_width scan_width, + enum cfg80211_bss_frame_type ftype, const u8 *bssid, u64 tsf, u16 capability, u16 beacon_interval, const u8 *ie, size_t ielen, s32 signal, gfp_t gfp); @@ -3796,12 +3839,13 @@ cfg80211_inform_bss_width(struct wiphy *wiphy, static inline struct cfg80211_bss * __must_check cfg80211_inform_bss(struct wiphy *wiphy, struct ieee80211_channel *rx_channel, + enum cfg80211_bss_frame_type ftype, const u8 *bssid, u64 tsf, u16 capability, u16 beacon_interval, const u8 *ie, size_t ielen, s32 signal, gfp_t gfp) { return cfg80211_inform_bss_width(wiphy, rx_channel, - NL80211_BSS_CHAN_WIDTH_20, + NL80211_BSS_CHAN_WIDTH_20, ftype, bssid, tsf, capability, beacon_interval, ie, ielen, signal, gfp); @@ -3902,6 +3946,7 @@ void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr); * moves to cfg80211 in this call * @buf: authentication frame (header + body) * @len: length of the frame data + * @uapsd_queues: bitmap of ACs configured to uapsd. -1 if n/a. * * After being asked to associate via cfg80211_ops::assoc() the driver must * call either this function or cfg80211_auth_timeout(). @@ -3910,7 +3955,8 @@ void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr); */ void cfg80211_rx_assoc_resp(struct net_device *dev, struct cfg80211_bss *bss, - const u8 *buf, size_t len); + const u8 *buf, size_t len, + int uapsd_queues); /** * cfg80211_assoc_timeout - notification of timed out association @@ -4412,7 +4458,6 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr, * @buf: Management frame (header + body) * @len: length of the frame data * @flags: flags, as defined in enum nl80211_rxmgmt_flags - * @gfp: context flags * * This function is called whenever an Action frame is received for a station * mode interface, but is not processed in kernel. @@ -4423,7 +4468,7 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr, * driver is responsible for rejecting the frame. */ bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_dbm, - const u8 *buf, size_t len, u32 flags, gfp_t gfp); + const u8 *buf, size_t len, u32 flags); /** * cfg80211_mgmt_tx_status - notification of TX status for management frame diff --git a/include/net/checksum.h b/include/net/checksum.h index 87cb1903640d..6465bae80a4f 100644 --- a/include/net/checksum.h +++ b/include/net/checksum.h @@ -122,9 +122,7 @@ static inline __wsum csum_partial_ext(const void *buff, int len, __wsum sum) static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to) { - __be32 diff[] = { ~from, to }; - - *sum = csum_fold(csum_partial(diff, sizeof(diff), ~csum_unfold(*sum))); + *sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), from), to)); } /* Implements RFC 1624 (Incremental Internet Checksum) diff --git a/include/net/codel.h b/include/net/codel.h index fe0eab32ce76..aeee28081245 100644 --- a/include/net/codel.h +++ b/include/net/codel.h @@ -66,7 +66,7 @@ typedef s32 codel_tdiff_t; static inline codel_time_t codel_get_time(void) { - u64 ns = ktime_to_ns(ktime_get()); + u64 ns = ktime_get_ns(); return ns >> CODEL_SHIFT; } diff --git a/include/net/dsa.h b/include/net/dsa.h index 6efce384451e..58ad8c6492db 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -15,6 +15,17 @@ #include <linux/list.h> #include <linux/timer.h> #include <linux/workqueue.h> +#include <linux/of.h> +#include <linux/phy.h> +#include <linux/phy_fixed.h> + +enum dsa_tag_protocol { + DSA_TAG_PROTO_NONE = 0, + DSA_TAG_PROTO_DSA, + DSA_TAG_PROTO_TRAILER, + DSA_TAG_PROTO_EDSA, + DSA_TAG_PROTO_BRCM, +}; #define DSA_MAX_SWITCHES 4 #define DSA_MAX_PORTS 12 @@ -23,9 +34,15 @@ struct dsa_chip_data { /* * How to access the switch configuration registers. */ - struct device *mii_bus; + struct device *host_dev; int sw_addr; + /* Device tree node pointer for this specific switch chip + * used during switch setup in case additional properties + * and resources needs to be used + */ + struct device_node *of_node; + /* * The names of the switch's ports. Use "cpu" to * designate the switch port that the cpu is connected to, @@ -34,6 +51,7 @@ struct dsa_chip_data { * or any other string to indicate this is a physical port. */ char *port_names[DSA_MAX_PORTS]; + struct device_node *port_dn[DSA_MAX_PORTS]; /* * An array (with nr_chips elements) of which element [a] @@ -59,6 +77,8 @@ struct dsa_platform_data { struct dsa_chip_data *chip; }; +struct packet_type; + struct dsa_switch_tree { /* * Configuration data for the platform device that owns @@ -71,7 +91,11 @@ struct dsa_switch_tree { * protocol to use. */ struct net_device *master_netdev; - __be16 tag_protocol; + int (*rcv)(struct sk_buff *skb, + struct net_device *dev, + struct packet_type *pt, + struct net_device *orig_dev); + enum dsa_tag_protocol tag_protocol; /* * The switch and port to which the CPU is attached. @@ -110,15 +134,16 @@ struct dsa_switch { struct dsa_switch_driver *drv; /* - * Reference to mii bus to use. + * Reference to host device to use. */ - struct mii_bus *master_mii_bus; + struct device *master_dev; /* * Slave mii_bus and devices for the individual ports. */ u32 dsa_port_mask; u32 phys_port_mask; + u32 phys_mii_mask; struct mii_bus *slave_mii_bus; struct net_device *ports[DSA_MAX_PORTS]; }; @@ -147,15 +172,16 @@ static inline u8 dsa_upstream_port(struct dsa_switch *ds) struct dsa_switch_driver { struct list_head list; - __be16 tag_protocol; + enum dsa_tag_protocol tag_protocol; int priv_size; /* * Probing and setup. */ - char *(*probe)(struct mii_bus *bus, int sw_addr); + char *(*probe)(struct device *host_dev, int sw_addr); int (*setup)(struct dsa_switch *ds); int (*set_addr)(struct dsa_switch *ds, u8 *addr); + u32 (*get_phy_flags)(struct dsa_switch *ds, int port); /* * Access to the switch's PHY registers. @@ -170,37 +196,64 @@ struct dsa_switch_driver { void (*poll_link)(struct dsa_switch *ds); /* + * Link state adjustment (called from libphy) + */ + void (*adjust_link)(struct dsa_switch *ds, int port, + struct phy_device *phydev); + void (*fixed_link_update)(struct dsa_switch *ds, int port, + struct fixed_phy_status *st); + + /* * ethtool hardware statistics. */ void (*get_strings)(struct dsa_switch *ds, int port, uint8_t *data); void (*get_ethtool_stats)(struct dsa_switch *ds, int port, uint64_t *data); int (*get_sset_count)(struct dsa_switch *ds); + + /* + * ethtool Wake-on-LAN + */ + void (*get_wol)(struct dsa_switch *ds, int port, + struct ethtool_wolinfo *w); + int (*set_wol)(struct dsa_switch *ds, int port, + struct ethtool_wolinfo *w); + + /* + * Suspend and resume + */ + int (*suspend)(struct dsa_switch *ds); + int (*resume)(struct dsa_switch *ds); + + /* + * Port enable/disable + */ + int (*port_enable)(struct dsa_switch *ds, int port, + struct phy_device *phy); + void (*port_disable)(struct dsa_switch *ds, int port, + struct phy_device *phy); + + /* + * EEE setttings + */ + int (*set_eee)(struct dsa_switch *ds, int port, + struct phy_device *phydev, + struct ethtool_eee *e); + int (*get_eee)(struct dsa_switch *ds, int port, + struct ethtool_eee *e); }; void register_switch_driver(struct dsa_switch_driver *type); void unregister_switch_driver(struct dsa_switch_driver *type); +struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev); static inline void *ds_to_priv(struct dsa_switch *ds) { return (void *)(ds + 1); } -/* - * The original DSA tag format and some other tag formats have no - * ethertype, which means that we need to add a little hack to the - * networking receive path to make sure that received frames get - * the right ->protocol assigned to them when one of those tag - * formats is in use. - */ -static inline bool dsa_uses_dsa_tags(struct dsa_switch_tree *dst) -{ - return !!(dst->tag_protocol == htons(ETH_P_DSA)); -} - -static inline bool dsa_uses_trailer_tags(struct dsa_switch_tree *dst) +static inline bool dsa_uses_tagged_protocol(struct dsa_switch_tree *dst) { - return !!(dst->tag_protocol == htons(ETH_P_TRAILER)); + return dst->rcv != NULL; } - #endif diff --git a/include/net/dst.h b/include/net/dst.h index 71c60f42be48..a8ae4e760778 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -480,6 +480,7 @@ void dst_init(void); /* Flags for xfrm_lookup flags argument. */ enum { XFRM_LOOKUP_ICMP = 1 << 0, + XFRM_LOOKUP_QUEUE = 1 << 1, }; struct flowi; @@ -490,7 +491,16 @@ static inline struct dst_entry *xfrm_lookup(struct net *net, int flags) { return dst_orig; -} +} + +static inline struct dst_entry *xfrm_lookup_route(struct net *net, + struct dst_entry *dst_orig, + const struct flowi *fl, + struct sock *sk, + int flags) +{ + return dst_orig; +} static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst) { @@ -502,6 +512,10 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, const struct flowi *fl, struct sock *sk, int flags); +struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig, + const struct flowi *fl, struct sock *sk, + int flags); + /* skb attached with this dst needs transformation if dst->xfrm is valid */ static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst) { diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h index 2f26dfb8450e..1f99a1de0e4f 100644 --- a/include/net/dst_ops.h +++ b/include/net/dst_ops.h @@ -63,7 +63,7 @@ static inline void dst_entries_add(struct dst_ops *dst, int val) static inline int dst_entries_init(struct dst_ops *dst) { - return percpu_counter_init(&dst->pcpuc_entries, 0); + return percpu_counter_init(&dst->pcpuc_entries, 0, GFP_KERNEL); } static inline void dst_entries_destroy(struct dst_ops *dst) diff --git a/include/net/flow_keys.h b/include/net/flow_keys.h index 6667a054763a..7ee2df083542 100644 --- a/include/net/flow_keys.h +++ b/include/net/flow_keys.h @@ -27,7 +27,19 @@ struct flow_keys { u8 ip_proto; }; -bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow); -__be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto); +bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow, + void *data, __be16 proto, int nhoff, int hlen); +static inline bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow) +{ + return __skb_flow_dissect(skb, flow, NULL, 0, 0, 0); +} +__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto, + void *data, int hlen_proto); +static inline __be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto) +{ + return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0); +} u32 flow_hash_from_keys(struct flow_keys *keys); +unsigned int flow_get_hlen(const unsigned char *data, unsigned int max_len, + __be16 protocol); #endif diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h index ea4271dceff0..cbafa3768d48 100644 --- a/include/net/gen_stats.h +++ b/include/net/gen_stats.h @@ -6,6 +6,11 @@ #include <linux/rtnetlink.h> #include <linux/pkt_sched.h> +struct gnet_stats_basic_cpu { + struct gnet_stats_basic_packed bstats; + struct u64_stats_sync syncp; +}; + struct gnet_dump { spinlock_t * lock; struct sk_buff * skb; @@ -27,21 +32,29 @@ int gnet_stats_start_copy_compat(struct sk_buff *skb, int type, spinlock_t *lock, struct gnet_dump *d); int gnet_stats_copy_basic(struct gnet_dump *d, + struct gnet_stats_basic_cpu __percpu *cpu, struct gnet_stats_basic_packed *b); +void __gnet_stats_copy_basic(struct gnet_stats_basic_packed *bstats, + struct gnet_stats_basic_cpu __percpu *cpu, + struct gnet_stats_basic_packed *b); int gnet_stats_copy_rate_est(struct gnet_dump *d, const struct gnet_stats_basic_packed *b, struct gnet_stats_rate_est64 *r); -int gnet_stats_copy_queue(struct gnet_dump *d, struct gnet_stats_queue *q); +int gnet_stats_copy_queue(struct gnet_dump *d, + struct gnet_stats_queue __percpu *cpu_q, + struct gnet_stats_queue *q, __u32 qlen); int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len); int gnet_stats_finish_copy(struct gnet_dump *d); int gen_new_estimator(struct gnet_stats_basic_packed *bstats, + struct gnet_stats_basic_cpu __percpu *cpu_bstats, struct gnet_stats_rate_est64 *rate_est, spinlock_t *stats_lock, struct nlattr *opt); void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, struct gnet_stats_rate_est64 *rate_est); int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, + struct gnet_stats_basic_cpu __percpu *cpu_bstats, struct gnet_stats_rate_est64 *rate_est, spinlock_t *stats_lock, struct nlattr *opt); bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, diff --git a/include/net/genetlink.h b/include/net/genetlink.h index 93695f0e22a5..af10c2cf8a1d 100644 --- a/include/net/genetlink.h +++ b/include/net/genetlink.h @@ -394,4 +394,12 @@ static inline int genl_set_err(struct genl_family *family, struct net *net, return netlink_set_err(net->genl_sock, portid, group, code); } +static inline int genl_has_listeners(struct genl_family *family, + struct sock *sk, unsigned int group) +{ + if (WARN_ON_ONCE(group >= family->n_mcgrps)) + return -EINVAL; + group = family->mcgrp_offset + group; + return netlink_has_listeners(sk, group); +} #endif /* __NET_GENERIC_NETLINK_H */ diff --git a/include/net/geneve.h b/include/net/geneve.h new file mode 100644 index 000000000000..112132cf8e2e --- /dev/null +++ b/include/net/geneve.h @@ -0,0 +1,97 @@ +#ifndef __NET_GENEVE_H +#define __NET_GENEVE_H 1 + +#ifdef CONFIG_INET +#include <net/udp_tunnel.h> +#endif + + +/* Geneve Header: + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * |Ver| Opt Len |O|C| Rsvd. | Protocol Type | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Virtual Network Identifier (VNI) | Reserved | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Variable Length Options | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * Option Header: + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Option Class | Type |R|R|R| Length | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Variable Option Data | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ + +struct geneve_opt { + __be16 opt_class; + u8 type; +#ifdef __LITTLE_ENDIAN_BITFIELD + u8 length:5; + u8 r3:1; + u8 r2:1; + u8 r1:1; +#else + u8 r1:1; + u8 r2:1; + u8 r3:1; + u8 length:5; +#endif + u8 opt_data[]; +}; + +#define GENEVE_CRIT_OPT_TYPE (1 << 7) + +struct genevehdr { +#ifdef __LITTLE_ENDIAN_BITFIELD + u8 opt_len:6; + u8 ver:2; + u8 rsvd1:6; + u8 critical:1; + u8 oam:1; +#else + u8 ver:2; + u8 opt_len:6; + u8 oam:1; + u8 critical:1; + u8 rsvd1:6; +#endif + __be16 proto_type; + u8 vni[3]; + u8 rsvd2; + struct geneve_opt options[]; +}; + +#ifdef CONFIG_INET +struct geneve_sock; + +typedef void (geneve_rcv_t)(struct geneve_sock *gs, struct sk_buff *skb); + +struct geneve_sock { + struct hlist_node hlist; + geneve_rcv_t *rcv; + void *rcv_data; + struct work_struct del_work; + struct socket *sock; + struct rcu_head rcu; + atomic_t refcnt; + struct udp_offload udp_offloads; +}; + +#define GENEVE_VER 0 +#define GENEVE_BASE_HLEN (sizeof(struct udphdr) + sizeof(struct genevehdr)) + +struct geneve_sock *geneve_sock_add(struct net *net, __be16 port, + geneve_rcv_t *rcv, void *data, + bool no_share, bool ipv6); + +void geneve_sock_release(struct geneve_sock *vs); + +int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt, + struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos, + __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port, + __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt, + bool xnet); +#endif /*ifdef CONFIG_INET */ + +#endif /*ifdef__NET_GENEVE_H */ diff --git a/include/net/gue.h b/include/net/gue.h new file mode 100644 index 000000000000..b6c332788084 --- /dev/null +++ b/include/net/gue.h @@ -0,0 +1,23 @@ +#ifndef __NET_GUE_H +#define __NET_GUE_H + +struct guehdr { + union { + struct { +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 hlen:4, + version:4; +#elif defined (__BIG_ENDIAN_BITFIELD) + __u8 version:4, + hlen:4; +#else +#error "Please fix <asm/byteorder.h>" +#endif + __u8 next_hdr; + __u16 flags; + }; + __u32 word; + }; +}; + +#endif diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h index d07b1a64b4e7..55a8d4056cc9 100644 --- a/include/net/if_inet6.h +++ b/include/net/if_inet6.h @@ -35,7 +35,6 @@ enum { INET6_IFADDR_STATE_DAD, INET6_IFADDR_STATE_POSTDAD, INET6_IFADDR_STATE_ERRDAD, - INET6_IFADDR_STATE_UP, INET6_IFADDR_STATE_DEAD, }; diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 5fbe6568c3cf..848e85cb5c61 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -242,6 +242,15 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what, #endif } +static inline unsigned long +inet_csk_rto_backoff(const struct inet_connection_sock *icsk, + unsigned long max_when) +{ + u64 when = (u64)icsk->icsk_rto << icsk->icsk_backoff; + + return (unsigned long)min_t(u64, when, max_when); +} + struct sock *inet_csk_accept(struct sock *sk, int flags, int *err); struct request_sock *inet_csk_search_req(const struct sock *sk, diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h index 65a8855e99fe..8d1765577acc 100644 --- a/include/net/inet_frag.h +++ b/include/net/inet_frag.h @@ -151,7 +151,7 @@ static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i) static inline void init_frag_mem_limit(struct netns_frags *nf) { - percpu_counter_init(&nf->mem, 0); + percpu_counter_init(&nf->mem, 0, GFP_KERNEL); } static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf) diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h index 01d590ee5e7e..80479abddf73 100644 --- a/include/net/inetpeer.h +++ b/include/net/inetpeer.h @@ -61,7 +61,6 @@ struct inet_peer { struct inet_peer_base { struct inet_peer __rcu *root; seqlock_t lock; - u32 flush_seq; int total; }; diff --git a/include/net/ip.h b/include/net/ip.h index db4a771b9ef3..0bb620702929 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -180,8 +180,10 @@ static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg) return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0; } -void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr, - __be32 saddr, const struct ip_reply_arg *arg, +void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, + const struct ip_options *sopt, + __be32 daddr, __be32 saddr, + const struct ip_reply_arg *arg, unsigned int len); #define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field) @@ -229,8 +231,6 @@ static inline int inet_is_local_reserved_port(struct net *net, int port) } #endif -extern int sysctl_ip_nonlocal_bind; - /* From inetpeer.c */ extern int inet_peer_threshold; extern int inet_peer_minttl; @@ -364,6 +364,14 @@ static inline void inet_set_txhash(struct sock *sk) sk->sk_txhash = flow_hash_from_keys(&keys); } +static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto) +{ + const struct iphdr *iph = skb_gro_network_header(skb); + + return csum_tcpudp_nofold(iph->saddr, iph->daddr, + skb_gro_len(skb), proto, 0); +} + /* * Map a multicast IP onto multicast MAC for type ethernet. */ @@ -505,7 +513,14 @@ int ip_forward(struct sk_buff *skb); void ip_options_build(struct sk_buff *skb, struct ip_options *opt, __be32 daddr, struct rtable *rt, int is_frag); -int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb); + +int __ip_options_echo(struct ip_options *dopt, struct sk_buff *skb, + const struct ip_options *sopt); +static inline int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb) +{ + return __ip_options_echo(dopt, skb, &IPCB(skb)->opt); +} + void ip_options_fragment(struct sk_buff *skb); int ip_options_compile(struct net *net, struct ip_options *opt, struct sk_buff *skb); @@ -542,6 +557,10 @@ void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport, u32 info); +bool icmp_global_allow(void); +extern int sysctl_icmp_msgs_per_sec; +extern int sysctl_icmp_msgs_burst; + #ifdef CONFIG_PROC_FS int ip_misc_proc_init(void); #endif diff --git a/include/net/ip6_checksum.h b/include/net/ip6_checksum.h index 55236cb71174..1a49b73f7f6e 100644 --- a/include/net/ip6_checksum.h +++ b/include/net/ip6_checksum.h @@ -48,6 +48,14 @@ static inline __wsum ip6_compute_pseudo(struct sk_buff *skb, int proto) skb->len, proto, 0)); } +static inline __wsum ip6_gro_compute_pseudo(struct sk_buff *skb, int proto) +{ + const struct ipv6hdr *iph = skb_gro_network_header(skb); + + return ~csum_unfold(csum_ipv6_magic(&iph->saddr, &iph->daddr, + skb_gro_len(skb), proto, 0)); +} + static __inline__ __sum16 tcp_v6_check(int len, const struct in6_addr *saddr, const struct in6_addr *daddr, diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 9bcb220bd4ad..8eea35d32a75 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -64,7 +64,7 @@ struct fib6_node { __u16 fn_bit; /* bit key */ __u16 fn_flags; - __u32 fn_sernum; + int fn_sernum; struct rt6_info *rr_ptr; }; @@ -114,16 +114,13 @@ struct rt6_info { u32 rt6i_flags; struct rt6key rt6i_src; struct rt6key rt6i_prefsrc; - u32 rt6i_metric; struct inet6_dev *rt6i_idev; unsigned long _rt6i_peer; - u32 rt6i_genid; - + u32 rt6i_metric; /* more non-fragment space at head required */ unsigned short rt6i_nfheader_len; - u8 rt6i_protocol; }; @@ -205,15 +202,25 @@ static inline void ip6_rt_put(struct rt6_info *rt) dst_release(&rt->dst); } -struct fib6_walker_t { +enum fib6_walk_state { +#ifdef CONFIG_IPV6_SUBTREES + FWS_S, +#endif + FWS_L, + FWS_R, + FWS_C, + FWS_U +}; + +struct fib6_walker { struct list_head lh; struct fib6_node *root, *node; struct rt6_info *leaf; - unsigned char state; - unsigned char prune; + enum fib6_walk_state state; + bool prune; unsigned int skip; unsigned int count; - int (*func)(struct fib6_walker_t *); + int (*func)(struct fib6_walker *); void *args; }; diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 9922093f575e..dc9d2a27c315 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -65,7 +65,8 @@ struct fnhe_hash_bucket { struct fib_nh_exception __rcu *chain; }; -#define FNHE_HASH_SIZE 2048 +#define FNHE_HASH_SHIFT 11 +#define FNHE_HASH_SIZE (1 << FNHE_HASH_SHIFT) #define FNHE_RECLAIM_DEPTH 5 struct fib_nh { @@ -87,7 +88,7 @@ struct fib_nh { int nh_saddr_genid; struct rtable __rcu * __percpu *nh_pcpu_rth_output; struct rtable __rcu *nh_rth_input; - struct fnhe_hash_bucket *nh_exceptions; + struct fnhe_hash_bucket __rcu *nh_exceptions; }; /* diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 8dd8cab88b87..5bc6edeb7143 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -10,6 +10,7 @@ #include <net/gro_cells.h> #include <net/inet_ecn.h> #include <net/ip.h> +#include <net/netns/generic.h> #include <net/rtnetlink.h> #if IS_ENABLED(CONFIG_IPV6) @@ -31,6 +32,13 @@ struct ip_tunnel_6rd_parm { }; #endif +struct ip_tunnel_encap { + __u16 type; + __u16 flags; + __be16 sport; + __be16 dport; +}; + struct ip_tunnel_prl_entry { struct ip_tunnel_prl_entry __rcu *next; __be32 addr; @@ -56,13 +64,18 @@ struct ip_tunnel { /* These four fields used only by GRE */ __u32 i_seqno; /* The last seen seqno */ __u32 o_seqno; /* The last output seqno */ - int hlen; /* Precalculated header length */ + int tun_hlen; /* Precalculated header length */ int mlink; struct ip_tunnel_dst __percpu *dst_cache; struct ip_tunnel_parm parms; + int encap_hlen; /* Encap header length (FOU,GUE) */ + struct ip_tunnel_encap encap; + + int hlen; /* tun_hlen + encap_hlen */ + /* for SIT */ #ifdef CONFIG_IPV6_SIT_6RD struct ip_tunnel_6rd_parm ip6rd; @@ -73,15 +86,18 @@ struct ip_tunnel { struct gro_cells gro_cells; }; -#define TUNNEL_CSUM __cpu_to_be16(0x01) -#define TUNNEL_ROUTING __cpu_to_be16(0x02) -#define TUNNEL_KEY __cpu_to_be16(0x04) -#define TUNNEL_SEQ __cpu_to_be16(0x08) -#define TUNNEL_STRICT __cpu_to_be16(0x10) -#define TUNNEL_REC __cpu_to_be16(0x20) -#define TUNNEL_VERSION __cpu_to_be16(0x40) -#define TUNNEL_NO_KEY __cpu_to_be16(0x80) +#define TUNNEL_CSUM __cpu_to_be16(0x01) +#define TUNNEL_ROUTING __cpu_to_be16(0x02) +#define TUNNEL_KEY __cpu_to_be16(0x04) +#define TUNNEL_SEQ __cpu_to_be16(0x08) +#define TUNNEL_STRICT __cpu_to_be16(0x10) +#define TUNNEL_REC __cpu_to_be16(0x20) +#define TUNNEL_VERSION __cpu_to_be16(0x40) +#define TUNNEL_NO_KEY __cpu_to_be16(0x80) #define TUNNEL_DONT_FRAGMENT __cpu_to_be16(0x0100) +#define TUNNEL_OAM __cpu_to_be16(0x0200) +#define TUNNEL_CRIT_OPT __cpu_to_be16(0x0400) +#define TUNNEL_OPTIONS_PRESENT __cpu_to_be16(0x0800) struct tnl_ptk_info { __be16 flags; @@ -114,6 +130,8 @@ void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops); void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, const struct iphdr *tnl_params, const u8 protocol); int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd); +int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t, + u8 *protocol, struct flowi4 *fl4); int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu); struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev, @@ -131,6 +149,8 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], struct ip_tunnel_parm *p); void ip_tunnel_setup(struct net_device *dev, int net_id); void ip_tunnel_dst_reset_all(struct ip_tunnel *t); +int ip_tunnel_encap_setup(struct ip_tunnel *t, + struct ip_tunnel_encap *ipencap); /* Extract dsfield from inner protocol */ static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph, diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 624a8a54806d..615b20b58545 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -1,6 +1,5 @@ -/* - * IP Virtual Server - * data structure and functionality definitions +/* IP Virtual Server + * data structure and functionality definitions */ #ifndef _NET_IP_VS_H @@ -12,7 +11,7 @@ #include <linux/list.h> /* for struct list_head */ #include <linux/spinlock.h> /* for struct rwlock_t */ -#include <linux/atomic.h> /* for struct atomic_t */ +#include <linux/atomic.h> /* for struct atomic_t */ #include <linux/compiler.h> #include <linux/timer.h> #include <linux/bug.h> @@ -30,15 +29,13 @@ #endif #include <net/net_namespace.h> /* Netw namespace */ -/* - * Generic access of ipvs struct - */ +/* Generic access of ipvs struct */ static inline struct netns_ipvs *net_ipvs(struct net* net) { return net->ipvs; } -/* - * Get net ptr from skb in traffic cases + +/* Get net ptr from skb in traffic cases * use skb_sknet when call is from userland (ioctl or netlink) */ static inline struct net *skb_net(const struct sk_buff *skb) @@ -90,8 +87,8 @@ static inline struct net *skb_sknet(const struct sk_buff *skb) return &init_net; #endif } -/* - * This one needed for single_open_net since net is stored directly in + +/* This one needed for single_open_net since net is stored directly in * private not as a struct i.e. seq_file_net can't be used. */ static inline struct net *seq_file_single_net(struct seq_file *seq) @@ -108,7 +105,7 @@ extern int ip_vs_conn_tab_size; struct ip_vs_iphdr { __u32 len; /* IPv4 simply where L4 starts - IPv6 where L4 Transport Header starts */ + * IPv6 where L4 Transport Header starts */ __u16 fragoffs; /* IPv6 fragment offset, 0 if first frag (or not frag)*/ __s16 protocol; __s32 flags; @@ -304,16 +301,11 @@ static inline const char *ip_vs_dbg_addr(int af, char *buf, size_t buf_len, #define LeaveFunction(level) do {} while (0) #endif - -/* - * The port number of FTP service (in network order). - */ +/* The port number of FTP service (in network order). */ #define FTPPORT cpu_to_be16(21) #define FTPDATA cpu_to_be16(20) -/* - * TCP State Values - */ +/* TCP State Values */ enum { IP_VS_TCP_S_NONE = 0, IP_VS_TCP_S_ESTABLISHED, @@ -329,25 +321,19 @@ enum { IP_VS_TCP_S_LAST }; -/* - * UDP State Values - */ +/* UDP State Values */ enum { IP_VS_UDP_S_NORMAL, IP_VS_UDP_S_LAST, }; -/* - * ICMP State Values - */ +/* ICMP State Values */ enum { IP_VS_ICMP_S_NORMAL, IP_VS_ICMP_S_LAST, }; -/* - * SCTP State Values - */ +/* SCTP State Values */ enum ip_vs_sctp_states { IP_VS_SCTP_S_NONE, IP_VS_SCTP_S_INIT1, @@ -366,21 +352,18 @@ enum ip_vs_sctp_states { IP_VS_SCTP_S_LAST }; -/* - * Delta sequence info structure - * Each ip_vs_conn has 2 (output AND input seq. changes). - * Only used in the VS/NAT. +/* Delta sequence info structure + * Each ip_vs_conn has 2 (output AND input seq. changes). + * Only used in the VS/NAT. */ struct ip_vs_seq { __u32 init_seq; /* Add delta from this seq */ __u32 delta; /* Delta in sequence numbers */ __u32 previous_delta; /* Delta in sequence numbers - before last resized pkt */ + * before last resized pkt */ }; -/* - * counters per cpu - */ +/* counters per cpu */ struct ip_vs_counters { __u32 conns; /* connections scheduled */ __u32 inpkts; /* incoming packets */ @@ -388,17 +371,13 @@ struct ip_vs_counters { __u64 inbytes; /* incoming bytes */ __u64 outbytes; /* outgoing bytes */ }; -/* - * Stats per cpu - */ +/* Stats per cpu */ struct ip_vs_cpu_stats { struct ip_vs_counters ustats; struct u64_stats_sync syncp; }; -/* - * IPVS statistics objects - */ +/* IPVS statistics objects */ struct ip_vs_estimator { struct list_head list; @@ -491,9 +470,7 @@ struct ip_vs_protocol { void (*timeout_change)(struct ip_vs_proto_data *pd, int flags); }; -/* - * protocol data per netns - */ +/* protocol data per netns */ struct ip_vs_proto_data { struct ip_vs_proto_data *next; struct ip_vs_protocol *pp; @@ -520,9 +497,7 @@ struct ip_vs_conn_param { __u8 pe_data_len; }; -/* - * IP_VS structure allocated for each dynamically scheduled connection - */ +/* IP_VS structure allocated for each dynamically scheduled connection */ struct ip_vs_conn { struct hlist_node c_list; /* hashed list heads */ /* Protocol, addresses and port numbers */ @@ -535,6 +510,7 @@ struct ip_vs_conn { union nf_inet_addr daddr; /* destination address */ volatile __u32 flags; /* status flags */ __u16 protocol; /* Which protocol (TCP/UDP) */ + __u16 daf; /* Address family of the dest */ #ifdef CONFIG_NET_NS struct net *net; /* Name space */ #endif @@ -560,17 +536,18 @@ struct ip_vs_conn { struct ip_vs_dest *dest; /* real server */ atomic_t in_pkts; /* incoming packet counter */ - /* packet transmitter for different forwarding methods. If it - mangles the packet, it must return NF_DROP or better NF_STOLEN, - otherwise this must be changed to a sk_buff **. - NF_ACCEPT can be returned when destination is local. + /* Packet transmitter for different forwarding methods. If it + * mangles the packet, it must return NF_DROP or better NF_STOLEN, + * otherwise this must be changed to a sk_buff **. + * NF_ACCEPT can be returned when destination is local. */ int (*packet_xmit)(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); /* Note: we can group the following members into a structure, - in order to save more space, and the following members are - only used in VS/NAT anyway */ + * in order to save more space, and the following members are + * only used in VS/NAT anyway + */ struct ip_vs_app *app; /* bound ip_vs_app object */ void *app_data; /* Application private data */ struct ip_vs_seq in_seq; /* incoming seq. struct */ @@ -583,9 +560,7 @@ struct ip_vs_conn { struct rcu_head rcu_head; }; -/* - * To save some memory in conn table when name space is disabled. - */ +/* To save some memory in conn table when name space is disabled. */ static inline struct net *ip_vs_conn_net(const struct ip_vs_conn *cp) { #ifdef CONFIG_NET_NS @@ -594,6 +569,7 @@ static inline struct net *ip_vs_conn_net(const struct ip_vs_conn *cp) return &init_net; #endif } + static inline void ip_vs_conn_net_set(struct ip_vs_conn *cp, struct net *net) { #ifdef CONFIG_NET_NS @@ -611,13 +587,12 @@ static inline int ip_vs_conn_net_eq(const struct ip_vs_conn *cp, #endif } -/* - * Extended internal versions of struct ip_vs_service_user and - * ip_vs_dest_user for IPv6 support. +/* Extended internal versions of struct ip_vs_service_user and ip_vs_dest_user + * for IPv6 support. * - * We need these to conveniently pass around service and destination - * options, but unfortunately, we also need to keep the old definitions to - * maintain userspace backwards compatibility for the setsockopt interface. + * We need these to conveniently pass around service and destination + * options, but unfortunately, we also need to keep the old definitions to + * maintain userspace backwards compatibility for the setsockopt interface. */ struct ip_vs_service_user_kern { /* virtual service addresses */ @@ -648,12 +623,15 @@ struct ip_vs_dest_user_kern { /* thresholds for active connections */ u32 u_threshold; /* upper threshold */ u32 l_threshold; /* lower threshold */ + + /* Address family of addr */ + u16 af; }; /* - * The information about the virtual service offered to the net - * and the forwarding entries + * The information about the virtual service offered to the net and the + * forwarding entries. */ struct ip_vs_service { struct hlist_node s_list; /* for normal service table */ @@ -693,9 +671,8 @@ struct ip_vs_dest_dst { struct rcu_head rcu_head; }; -/* - * The real server destination forwarding entry - * with ip address, port number, and so on. +/* The real server destination forwarding entry with ip address, port number, + * and so on. */ struct ip_vs_dest { struct list_head n_list; /* for the dests in the service */ @@ -734,10 +711,7 @@ struct ip_vs_dest { unsigned int in_rs_table:1; /* we are in rs_table */ }; - -/* - * The scheduler object - */ +/* The scheduler object */ struct ip_vs_scheduler { struct list_head n_list; /* d-linked list head */ char *name; /* scheduler name */ @@ -777,9 +751,7 @@ struct ip_vs_pe { int (*show_pe_data)(const struct ip_vs_conn *cp, char *buf); }; -/* - * The application module object (a.k.a. app incarnation) - */ +/* The application module object (a.k.a. app incarnation) */ struct ip_vs_app { struct list_head a_list; /* member in app list */ int type; /* IP_VS_APP_TYPE_xxx */ @@ -795,16 +767,14 @@ struct ip_vs_app { atomic_t usecnt; /* usage counter */ struct rcu_head rcu_head; - /* - * output hook: Process packet in inout direction, diff set for TCP. + /* output hook: Process packet in inout direction, diff set for TCP. * Return: 0=Error, 1=Payload Not Mangled/Mangled but checksum is ok, * 2=Mangled but checksum was not updated */ int (*pkt_out)(struct ip_vs_app *, struct ip_vs_conn *, struct sk_buff *, int *diff); - /* - * input hook: Process packet in outin direction, diff set for TCP. + /* input hook: Process packet in outin direction, diff set for TCP. * Return: 0=Error, 1=Payload Not Mangled/Mangled but checksum is ok, * 2=Mangled but checksum was not updated */ @@ -863,9 +833,7 @@ struct ipvs_master_sync_state { struct netns_ipvs { int gen; /* Generation */ int enable; /* enable like nf_hooks do */ - /* - * Hash table: for real service lookups - */ + /* Hash table: for real service lookups */ #define IP_VS_RTAB_BITS 4 #define IP_VS_RTAB_SIZE (1 << IP_VS_RTAB_BITS) #define IP_VS_RTAB_MASK (IP_VS_RTAB_SIZE - 1) @@ -899,7 +867,7 @@ struct netns_ipvs { struct list_head sctp_apps[SCTP_APP_TAB_SIZE]; #endif /* ip_vs_conn */ - atomic_t conn_count; /* connection counter */ + atomic_t conn_count; /* connection counter */ /* ip_vs_ctl */ struct ip_vs_stats tot_stats; /* Statistics & est. */ @@ -986,6 +954,10 @@ struct netns_ipvs { char backup_mcast_ifn[IP_VS_IFNAME_MAXLEN]; /* net name space ptr */ struct net *net; /* Needed by timer routines */ + /* Number of heterogeneous destinations, needed becaus heterogeneous + * are not supported when synchronization is enabled. + */ + unsigned int mixed_address_family_dests; }; #define DEFAULT_SYNC_THRESHOLD 3 @@ -1139,9 +1111,8 @@ static inline int sysctl_backup_only(struct netns_ipvs *ipvs) #endif -/* - * IPVS core functions - * (from ip_vs_core.c) +/* IPVS core functions + * (from ip_vs_core.c) */ const char *ip_vs_proto_name(unsigned int proto); void ip_vs_init_hash_table(struct list_head *table, int rows); @@ -1149,11 +1120,9 @@ void ip_vs_init_hash_table(struct list_head *table, int rows); #define IP_VS_APP_TYPE_FTP 1 -/* - * ip_vs_conn handling functions - * (from ip_vs_conn.c) +/* ip_vs_conn handling functions + * (from ip_vs_conn.c) */ - enum { IP_VS_DIR_INPUT = 0, IP_VS_DIR_OUTPUT, @@ -1210,7 +1179,7 @@ static inline void __ip_vs_conn_put(struct ip_vs_conn *cp) void ip_vs_conn_put(struct ip_vs_conn *cp); void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport); -struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p, +struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af, const union nf_inet_addr *daddr, __be16 dport, unsigned int flags, struct ip_vs_dest *dest, __u32 fwmark); @@ -1284,9 +1253,7 @@ ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp) atomic_inc(&ctl_cp->n_control); } -/* - * IPVS netns init & cleanup functions - */ +/* IPVS netns init & cleanup functions */ int ip_vs_estimator_net_init(struct net *net); int ip_vs_control_net_init(struct net *net); int ip_vs_protocol_net_init(struct net *net); @@ -1301,9 +1268,8 @@ void ip_vs_estimator_net_cleanup(struct net *net); void ip_vs_sync_net_cleanup(struct net *net); void ip_vs_service_net_cleanup(struct net *net); -/* - * IPVS application functions - * (from ip_vs_app.c) +/* IPVS application functions + * (from ip_vs_app.c) */ #define IP_VS_APP_MAX_PORTS 8 struct ip_vs_app *register_ip_vs_app(struct net *net, struct ip_vs_app *app); @@ -1323,9 +1289,7 @@ int unregister_ip_vs_pe(struct ip_vs_pe *pe); struct ip_vs_pe *ip_vs_pe_getbyname(const char *name); struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name); -/* - * Use a #define to avoid all of module.h just for these trivial ops - */ +/* Use a #define to avoid all of module.h just for these trivial ops */ #define ip_vs_pe_get(pe) \ if (pe && pe->module) \ __module_get(pe->module); @@ -1334,9 +1298,7 @@ struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name); if (pe && pe->module) \ module_put(pe->module); -/* - * IPVS protocol functions (from ip_vs_proto.c) - */ +/* IPVS protocol functions (from ip_vs_proto.c) */ int ip_vs_protocol_init(void); void ip_vs_protocol_cleanup(void); void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags); @@ -1354,9 +1316,8 @@ extern struct ip_vs_protocol ip_vs_protocol_esp; extern struct ip_vs_protocol ip_vs_protocol_ah; extern struct ip_vs_protocol ip_vs_protocol_sctp; -/* - * Registering/unregistering scheduler functions - * (from ip_vs_sched.c) +/* Registering/unregistering scheduler functions + * (from ip_vs_sched.c) */ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler); int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler); @@ -1375,10 +1336,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg); - -/* - * IPVS control data and functions (from ip_vs_ctl.c) - */ +/* IPVS control data and functions (from ip_vs_ctl.c) */ extern struct ip_vs_stats ip_vs_stats; extern int sysctl_ip_vs_sync_ver; @@ -1396,8 +1354,9 @@ void ip_vs_unregister_nl_ioctl(void); int ip_vs_control_init(void); void ip_vs_control_cleanup(void); struct ip_vs_dest * -ip_vs_find_dest(struct net *net, int af, const union nf_inet_addr *daddr, - __be16 dport, const union nf_inet_addr *vaddr, __be16 vport, +ip_vs_find_dest(struct net *net, int svc_af, int dest_af, + const union nf_inet_addr *daddr, __be16 dport, + const union nf_inet_addr *vaddr, __be16 vport, __u16 protocol, __u32 fwmark, __u32 flags); void ip_vs_try_bind_dest(struct ip_vs_conn *cp); @@ -1418,26 +1377,21 @@ static inline void ip_vs_dest_put_and_free(struct ip_vs_dest *dest) kfree(dest); } -/* - * IPVS sync daemon data and function prototypes - * (from ip_vs_sync.c) +/* IPVS sync daemon data and function prototypes + * (from ip_vs_sync.c) */ int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid); int stop_sync_thread(struct net *net, int state); void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts); -/* - * IPVS rate estimator prototypes (from ip_vs_est.c) - */ +/* IPVS rate estimator prototypes (from ip_vs_est.c) */ void ip_vs_start_estimator(struct net *net, struct ip_vs_stats *stats); void ip_vs_stop_estimator(struct net *net, struct ip_vs_stats *stats); void ip_vs_zero_estimator(struct ip_vs_stats *stats); void ip_vs_read_estimator(struct ip_vs_stats_user *dst, struct ip_vs_stats *stats); -/* - * Various IPVS packet transmitters (from ip_vs_xmit.c) - */ +/* Various IPVS packet transmitters (from ip_vs_xmit.c) */ int ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); int ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, @@ -1468,12 +1422,10 @@ int ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, #endif #ifdef CONFIG_SYSCTL -/* - * This is a simple mechanism to ignore packets when - * we are loaded. Just set ip_vs_drop_rate to 'n' and - * we start to drop 1/rate of the packets +/* This is a simple mechanism to ignore packets when + * we are loaded. Just set ip_vs_drop_rate to 'n' and + * we start to drop 1/rate of the packets */ - static inline int ip_vs_todrop(struct netns_ipvs *ipvs) { if (!ipvs->drop_rate) @@ -1487,9 +1439,7 @@ static inline int ip_vs_todrop(struct netns_ipvs *ipvs) static inline int ip_vs_todrop(struct netns_ipvs *ipvs) { return 0; } #endif -/* - * ip_vs_fwd_tag returns the forwarding tag of the connection - */ +/* ip_vs_fwd_tag returns the forwarding tag of the connection */ #define IP_VS_FWD_METHOD(cp) (cp->flags & IP_VS_CONN_F_FWD_MASK) static inline char ip_vs_fwd_tag(struct ip_vs_conn *cp) @@ -1548,9 +1498,7 @@ static inline __wsum ip_vs_check_diff2(__be16 old, __be16 new, __wsum oldsum) return csum_partial(diff, sizeof(diff), oldsum); } -/* - * Forget current conntrack (unconfirmed) and attach notrack entry - */ +/* Forget current conntrack (unconfirmed) and attach notrack entry */ static inline void ip_vs_notrack(struct sk_buff *skb) { #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) @@ -1567,9 +1515,8 @@ static inline void ip_vs_notrack(struct sk_buff *skb) } #ifdef CONFIG_IP_VS_NFCT -/* - * Netfilter connection tracking - * (from ip_vs_nfct.c) +/* Netfilter connection tracking + * (from ip_vs_nfct.c) */ static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs) { @@ -1608,14 +1555,12 @@ static inline int ip_vs_confirm_conntrack(struct sk_buff *skb) static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp) { } -/* CONFIG_IP_VS_NFCT */ -#endif +#endif /* CONFIG_IP_VS_NFCT */ static inline int ip_vs_dest_conn_overhead(struct ip_vs_dest *dest) { - /* - * We think the overhead of processing active connections is 256 + /* We think the overhead of processing active connections is 256 * times higher than that of inactive connections in average. (This * 256 times might not be accurate, we will change it later) We * use the following formula to estimate the overhead now: diff --git a/include/net/ipv6.h b/include/net/ipv6.h index a2db816e8461..97f472012438 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -121,6 +121,7 @@ struct frag_hdr { /* sysctls */ extern int sysctl_mld_max_msf; +extern int sysctl_mld_qrv; #define _DEVINC(net, statname, modifier, idev, field) \ ({ \ @@ -287,7 +288,8 @@ struct ipv6_txoptions *ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space, struct ipv6_txoptions *opt); -bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb); +bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb, + const struct inet6_skb_parm *opt); static inline bool ipv6_accept_ra(struct inet6_dev *idev) { diff --git a/include/net/mac80211.h b/include/net/mac80211.h index dae2e24616e1..0ad1f47d2dc7 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -4,6 +4,7 @@ * Copyright 2002-2005, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net> + * Copyright 2013-2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -1226,7 +1227,8 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev); * * @IEEE80211_KEY_FLAG_GENERATE_IV: This flag should be set by the * driver to indicate that it requires IV generation for this - * particular key. + * particular key. Setting this flag does not necessarily mean that SKBs + * will have sufficient tailroom for ICV or MIC. * @IEEE80211_KEY_FLAG_GENERATE_MMIC: This flag should be set by * the driver for a TKIP key if it requires Michael MIC * generation in software. @@ -1238,7 +1240,9 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev); * @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver * if space should be prepared for the IV, but the IV * itself should not be generated. Do not set together with - * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key. + * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key. Setting this flag does + * not necessarily mean that SKBs will have sufficient tailroom for ICV or + * MIC. * @IEEE80211_KEY_FLAG_RX_MGMT: This key will be used to decrypt received * management frames. The flag can help drivers that have a hardware * crypto implementation that doesn't deal with management frames @@ -1405,7 +1409,7 @@ struct ieee80211_sta_rates { * @supp_rates: Bitmap of supported rates (per band) * @ht_cap: HT capabilities of this STA; restricted to our own capabilities * @vht_cap: VHT capabilities of this STA; restricted to our own capabilities - * @wme: indicates whether the STA supports WME. Only valid during AP-mode. + * @wme: indicates whether the STA supports QoS/WME. * @drv_priv: data area for driver use, will always be aligned to * sizeof(void *), size is determined in hw information. * @uapsd_queues: bitmap of queues configured for uapsd. Only valid @@ -1533,16 +1537,6 @@ struct ieee80211_tx_control { * @IEEE80211_HW_MFP_CAPABLE: * Hardware supports management frame protection (MFP, IEEE 802.11w). * - * @IEEE80211_HW_SUPPORTS_STATIC_SMPS: - * Hardware supports static spatial multiplexing powersave, - * ie. can turn off all but one chain even on HT connections - * that should be using more chains. - * - * @IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS: - * Hardware supports dynamic spatial multiplexing powersave, - * ie. can turn off all but one chain and then wake the rest - * up as required after, for example, rts/cts handshake. - * * @IEEE80211_HW_SUPPORTS_UAPSD: * Hardware supports Unscheduled Automatic Power Save Delivery * (U-APSD) in managed mode. The mode is configured with @@ -1606,6 +1600,9 @@ struct ieee80211_tx_control { * is not enabled the default action is to disconnect when getting the * CSA frame. * + * @IEEE80211_HW_SUPPORTS_CLONED_SKBS: The driver will never modify the payload + * or tailroom of TX skbs without copying them first. + * * @IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS: The HW supports scanning on all bands * in one command, mac80211 doesn't have to run separate scans per band. */ @@ -1625,8 +1622,7 @@ enum ieee80211_hw_flags { IEEE80211_HW_SUPPORTS_DYNAMIC_PS = 1<<12, IEEE80211_HW_MFP_CAPABLE = 1<<13, IEEE80211_HW_WANT_MONITOR_VIF = 1<<14, - IEEE80211_HW_SUPPORTS_STATIC_SMPS = 1<<15, - IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS = 1<<16, + /* free slots */ IEEE80211_HW_SUPPORTS_UAPSD = 1<<17, IEEE80211_HW_REPORTS_TX_ACK_STATUS = 1<<18, IEEE80211_HW_CONNECTION_MONITOR = 1<<19, @@ -1639,7 +1635,7 @@ enum ieee80211_hw_flags { IEEE80211_HW_TIMING_BEACON_ONLY = 1<<26, IEEE80211_HW_SUPPORTS_HT_CCK_RATES = 1<<27, IEEE80211_HW_CHANCTX_STA_CSA = 1<<28, - /* bit 29 unused */ + IEEE80211_HW_SUPPORTS_CLONED_SKBS = 1<<29, IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS = 1<<30, }; @@ -2666,7 +2662,9 @@ enum ieee80211_roc_type { * * @set_coverage_class: Set slot time for given coverage class as specified * in IEEE 802.11-2007 section 17.3.8.6 and modify ACK timeout - * accordingly. This callback is not required and may sleep. + * accordingly; coverage class equals to -1 to enable ACK timeout + * estimation algorithm (dynack). To disable dynack set valid value for + * coverage class. This callback is not required and may sleep. * * @testmode_cmd: Implement a cfg80211 test mode command. The passed @vif may * be %NULL. The callback can sleep. @@ -2950,7 +2948,7 @@ struct ieee80211_ops { int (*get_survey)(struct ieee80211_hw *hw, int idx, struct survey_info *survey); void (*rfkill_poll)(struct ieee80211_hw *hw); - void (*set_coverage_class)(struct ieee80211_hw *hw, u8 coverage_class); + void (*set_coverage_class)(struct ieee80211_hw *hw, s16 coverage_class); #ifdef CONFIG_NL80211_TESTMODE int (*testmode_cmd)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void *data, int len); diff --git a/include/net/mld.h b/include/net/mld.h index faa1d161bf24..01d751303498 100644 --- a/include/net/mld.h +++ b/include/net/mld.h @@ -88,12 +88,15 @@ struct mld2_query { #define MLDV2_QQIC_EXP(value) (((value) >> 4) & 0x07) #define MLDV2_QQIC_MAN(value) ((value) & 0x0f) +#define MLD_EXP_MIN_LIMIT 32768UL +#define MLDV1_MRD_MAX_COMPAT (MLD_EXP_MIN_LIMIT - 1) + static inline unsigned long mldv2_mrc(const struct mld2_query *mlh2) { /* RFC3810, 5.1.3. Maximum Response Code */ unsigned long ret, mc_mrc = ntohs(mlh2->mld2q_mrc); - if (mc_mrc < 32768) { + if (mc_mrc < MLD_EXP_MIN_LIMIT) { ret = mc_mrc; } else { unsigned long mc_man, mc_exp; diff --git a/include/net/neighbour.h b/include/net/neighbour.h index 47f425464f84..f60558d0254c 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -373,7 +373,7 @@ static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) return 0; } -#ifdef CONFIG_BRIDGE_NETFILTER +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb) { unsigned int seq, hh_alen; diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 361d26077196..e0d64667a4b3 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -352,26 +352,12 @@ static inline void rt_genid_bump_ipv4(struct net *net) atomic_inc(&net->ipv4.rt_genid); } -#if IS_ENABLED(CONFIG_IPV6) -static inline int rt_genid_ipv6(struct net *net) -{ - return atomic_read(&net->ipv6.rt_genid); -} - -static inline void rt_genid_bump_ipv6(struct net *net) -{ - atomic_inc(&net->ipv6.rt_genid); -} -#else -static inline int rt_genid_ipv6(struct net *net) -{ - return 0; -} - +extern void (*__fib6_flush_trees)(struct net *net); static inline void rt_genid_bump_ipv6(struct net *net) { + if (__fib6_flush_trees) + __fib6_flush_trees(net); } -#endif #if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN) static inline struct netns_ieee802154_lowpan * diff --git a/include/net/netdma.h b/include/net/netdma.h deleted file mode 100644 index 8ba8ce284eeb..000000000000 --- a/include/net/netdma.h +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 - * Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * The full GNU General Public License is included in this distribution in the - * file called COPYING. - */ -#ifndef NETDMA_H -#define NETDMA_H -#ifdef CONFIG_NET_DMA -#include <linux/dmaengine.h> -#include <linux/skbuff.h> - -int dma_skb_copy_datagram_iovec(struct dma_chan* chan, - struct sk_buff *skb, int offset, struct iovec *to, - size_t len, struct dma_pinned_list *pinned_list); - -#endif /* CONFIG_NET_DMA */ -#endif /* NETDMA_H */ diff --git a/include/net/netfilter/br_netfilter.h b/include/net/netfilter/br_netfilter.h new file mode 100644 index 000000000000..2aa6048a55c1 --- /dev/null +++ b/include/net/netfilter/br_netfilter.h @@ -0,0 +1,6 @@ +#ifndef _BR_NETFILTER_H_ +#define _BR_NETFILTER_H_ + +void br_netfilter_enable(void); + +#endif /* _BR_NETFILTER_H_ */ diff --git a/include/net/netfilter/ipv4/nf_nat_masquerade.h b/include/net/netfilter/ipv4/nf_nat_masquerade.h new file mode 100644 index 000000000000..a9c001c646da --- /dev/null +++ b/include/net/netfilter/ipv4/nf_nat_masquerade.h @@ -0,0 +1,14 @@ +#ifndef _NF_NAT_MASQUERADE_IPV4_H_ +#define _NF_NAT_MASQUERADE_IPV4_H_ + +#include <net/netfilter/nf_nat.h> + +unsigned int +nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum, + const struct nf_nat_range *range, + const struct net_device *out); + +void nf_nat_masquerade_ipv4_register_notifier(void); +void nf_nat_masquerade_ipv4_unregister_notifier(void); + +#endif /*_NF_NAT_MASQUERADE_IPV4_H_ */ diff --git a/include/net/netfilter/ipv4/nf_reject.h b/include/net/netfilter/ipv4/nf_reject.h index 931fbf812171..e8427193c777 100644 --- a/include/net/netfilter/ipv4/nf_reject.h +++ b/include/net/netfilter/ipv4/nf_reject.h @@ -1,128 +1,13 @@ #ifndef _IPV4_NF_REJECT_H #define _IPV4_NF_REJECT_H -#include <net/ip.h> -#include <net/tcp.h> -#include <net/route.h> -#include <net/dst.h> +#include <net/icmp.h> static inline void nf_send_unreach(struct sk_buff *skb_in, int code) { icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0); } -/* Send RST reply */ -static void nf_send_reset(struct sk_buff *oldskb, int hook) -{ - struct sk_buff *nskb; - const struct iphdr *oiph; - struct iphdr *niph; - const struct tcphdr *oth; - struct tcphdr _otcph, *tcph; - - /* IP header checks: fragment. */ - if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET)) - return; - - oth = skb_header_pointer(oldskb, ip_hdrlen(oldskb), - sizeof(_otcph), &_otcph); - if (oth == NULL) - return; - - /* No RST for RST. */ - if (oth->rst) - return; - - if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) - return; - - /* Check checksum */ - if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP)) - return; - oiph = ip_hdr(oldskb); - - nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) + - LL_MAX_HEADER, GFP_ATOMIC); - if (!nskb) - return; - - skb_reserve(nskb, LL_MAX_HEADER); - - skb_reset_network_header(nskb); - niph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr)); - niph->version = 4; - niph->ihl = sizeof(struct iphdr) / 4; - niph->tos = 0; - niph->id = 0; - niph->frag_off = htons(IP_DF); - niph->protocol = IPPROTO_TCP; - niph->check = 0; - niph->saddr = oiph->daddr; - niph->daddr = oiph->saddr; - - skb_reset_transport_header(nskb); - tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); - memset(tcph, 0, sizeof(*tcph)); - tcph->source = oth->dest; - tcph->dest = oth->source; - tcph->doff = sizeof(struct tcphdr) / 4; - - if (oth->ack) - tcph->seq = oth->ack_seq; - else { - tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin + - oldskb->len - ip_hdrlen(oldskb) - - (oth->doff << 2)); - tcph->ack = 1; - } - - tcph->rst = 1; - tcph->check = ~tcp_v4_check(sizeof(struct tcphdr), niph->saddr, - niph->daddr, 0); - nskb->ip_summed = CHECKSUM_PARTIAL; - nskb->csum_start = (unsigned char *)tcph - nskb->head; - nskb->csum_offset = offsetof(struct tcphdr, check); - - /* ip_route_me_harder expects skb->dst to be set */ - skb_dst_set_noref(nskb, skb_dst(oldskb)); - - nskb->protocol = htons(ETH_P_IP); - if (ip_route_me_harder(nskb, RTN_UNSPEC)) - goto free_nskb; - - niph->ttl = ip4_dst_hoplimit(skb_dst(nskb)); - - /* "Never happens" */ - if (nskb->len > dst_mtu(skb_dst(nskb))) - goto free_nskb; - - nf_ct_attach(nskb, oldskb); - -#ifdef CONFIG_BRIDGE_NETFILTER - /* If we use ip_local_out for bridged traffic, the MAC source on - * the RST will be ours, instead of the destination's. This confuses - * some routers/firewalls, and they drop the packet. So we need to - * build the eth header using the original destination's MAC as the - * source, and send the RST packet directly. - */ - if (oldskb->nf_bridge) { - struct ethhdr *oeth = eth_hdr(oldskb); - nskb->dev = oldskb->nf_bridge->physindev; - niph->tot_len = htons(nskb->len); - ip_send_check(niph); - if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol), - oeth->h_source, oeth->h_dest, nskb->len) < 0) - goto free_nskb; - dev_queue_xmit(nskb); - } else -#endif - ip_local_out(nskb); - - return; - - free_nskb: - kfree_skb(nskb); -} - +void nf_send_reset(struct sk_buff *oldskb, int hook); #endif /* _IPV4_NF_REJECT_H */ diff --git a/include/net/netfilter/ipv6/nf_nat_masquerade.h b/include/net/netfilter/ipv6/nf_nat_masquerade.h new file mode 100644 index 000000000000..0a13396cd390 --- /dev/null +++ b/include/net/netfilter/ipv6/nf_nat_masquerade.h @@ -0,0 +1,10 @@ +#ifndef _NF_NAT_MASQUERADE_IPV6_H_ +#define _NF_NAT_MASQUERADE_IPV6_H_ + +unsigned int +nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range *range, + const struct net_device *out); +void nf_nat_masquerade_ipv6_register_notifier(void); +void nf_nat_masquerade_ipv6_unregister_notifier(void); + +#endif /* _NF_NAT_MASQUERADE_IPV6_H_ */ diff --git a/include/net/netfilter/ipv6/nf_reject.h b/include/net/netfilter/ipv6/nf_reject.h index 710d17ed70b4..48e18810a9be 100644 --- a/include/net/netfilter/ipv6/nf_reject.h +++ b/include/net/netfilter/ipv6/nf_reject.h @@ -1,11 +1,7 @@ #ifndef _IPV6_NF_REJECT_H #define _IPV6_NF_REJECT_H -#include <net/ipv6.h> -#include <net/ip6_route.h> -#include <net/ip6_fib.h> -#include <net/ip6_checksum.h> -#include <linux/netfilter_ipv6.h> +#include <linux/icmpv6.h> static inline void nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code, @@ -17,155 +13,6 @@ nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code, icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0); } -/* Send RST reply */ -static void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook) -{ - struct sk_buff *nskb; - struct tcphdr otcph, *tcph; - unsigned int otcplen, hh_len; - int tcphoff, needs_ack; - const struct ipv6hdr *oip6h = ipv6_hdr(oldskb); - struct ipv6hdr *ip6h; -#define DEFAULT_TOS_VALUE 0x0U - const __u8 tclass = DEFAULT_TOS_VALUE; - struct dst_entry *dst = NULL; - u8 proto; - __be16 frag_off; - struct flowi6 fl6; - - if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) || - (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) { - pr_debug("addr is not unicast.\n"); - return; - } - - proto = oip6h->nexthdr; - tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto, &frag_off); - - if ((tcphoff < 0) || (tcphoff > oldskb->len)) { - pr_debug("Cannot get TCP header.\n"); - return; - } - - otcplen = oldskb->len - tcphoff; - - /* IP header checks: fragment, too short. */ - if (proto != IPPROTO_TCP || otcplen < sizeof(struct tcphdr)) { - pr_debug("proto(%d) != IPPROTO_TCP, " - "or too short. otcplen = %d\n", - proto, otcplen); - return; - } - - if (skb_copy_bits(oldskb, tcphoff, &otcph, sizeof(struct tcphdr))) - BUG(); - - /* No RST for RST. */ - if (otcph.rst) { - pr_debug("RST is set\n"); - return; - } - - /* Check checksum. */ - if (nf_ip6_checksum(oldskb, hook, tcphoff, IPPROTO_TCP)) { - pr_debug("TCP checksum is invalid\n"); - return; - } - - memset(&fl6, 0, sizeof(fl6)); - fl6.flowi6_proto = IPPROTO_TCP; - fl6.saddr = oip6h->daddr; - fl6.daddr = oip6h->saddr; - fl6.fl6_sport = otcph.dest; - fl6.fl6_dport = otcph.source; - security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6)); - dst = ip6_route_output(net, NULL, &fl6); - if (dst == NULL || dst->error) { - dst_release(dst); - return; - } - dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0); - if (IS_ERR(dst)) - return; - - hh_len = (dst->dev->hard_header_len + 15)&~15; - nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr) - + sizeof(struct tcphdr) + dst->trailer_len, - GFP_ATOMIC); - - if (!nskb) { - net_dbg_ratelimited("cannot alloc skb\n"); - dst_release(dst); - return; - } - - skb_dst_set(nskb, dst); - - skb_reserve(nskb, hh_len + dst->header_len); - - skb_put(nskb, sizeof(struct ipv6hdr)); - skb_reset_network_header(nskb); - ip6h = ipv6_hdr(nskb); - ip6_flow_hdr(ip6h, tclass, 0); - ip6h->hop_limit = ip6_dst_hoplimit(dst); - ip6h->nexthdr = IPPROTO_TCP; - ip6h->saddr = oip6h->daddr; - ip6h->daddr = oip6h->saddr; - - skb_reset_transport_header(nskb); - tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); - /* Truncate to length (no data) */ - tcph->doff = sizeof(struct tcphdr)/4; - tcph->source = otcph.dest; - tcph->dest = otcph.source; - - if (otcph.ack) { - needs_ack = 0; - tcph->seq = otcph.ack_seq; - tcph->ack_seq = 0; - } else { - needs_ack = 1; - tcph->ack_seq = htonl(ntohl(otcph.seq) + otcph.syn + otcph.fin - + otcplen - (otcph.doff<<2)); - tcph->seq = 0; - } - - /* Reset flags */ - ((u_int8_t *)tcph)[13] = 0; - tcph->rst = 1; - tcph->ack = needs_ack; - tcph->window = 0; - tcph->urg_ptr = 0; - tcph->check = 0; - - /* Adjust TCP checksum */ - tcph->check = csum_ipv6_magic(&ipv6_hdr(nskb)->saddr, - &ipv6_hdr(nskb)->daddr, - sizeof(struct tcphdr), IPPROTO_TCP, - csum_partial(tcph, - sizeof(struct tcphdr), 0)); - - nf_ct_attach(nskb, oldskb); - -#ifdef CONFIG_BRIDGE_NETFILTER - /* If we use ip6_local_out for bridged traffic, the MAC source on - * the RST will be ours, instead of the destination's. This confuses - * some routers/firewalls, and they drop the packet. So we need to - * build the eth header using the original destination's MAC as the - * source, and send the RST packet directly. - */ - if (oldskb->nf_bridge) { - struct ethhdr *oeth = eth_hdr(oldskb); - nskb->dev = oldskb->nf_bridge->physindev; - nskb->protocol = htons(ETH_P_IPV6); - ip6h->payload_len = htons(sizeof(struct tcphdr)); - if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol), - oeth->h_source, oeth->h_dest, nskb->len) < 0) - return; - dev_queue_xmit(nskb); - } else -#endif - ip6_local_out(nskb); -} +void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook); #endif /* _IPV6_NF_REJECT_H */ diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h index a71dd333ac68..344b1ab19220 100644 --- a/include/net/netfilter/nf_nat.h +++ b/include/net/netfilter/nf_nat.h @@ -32,10 +32,8 @@ struct nf_conn_nat { struct hlist_node bysource; struct nf_conn *ct; union nf_conntrack_nat_help help; -#if defined(CONFIG_IP_NF_TARGET_MASQUERADE) || \ - defined(CONFIG_IP_NF_TARGET_MASQUERADE_MODULE) || \ - defined(CONFIG_IP6_NF_TARGET_MASQUERADE) || \ - defined(CONFIG_IP6_NF_TARGET_MASQUERADE_MODULE) +#if IS_ENABLED(CONFIG_NF_NAT_MASQUERADE_IPV4) || \ + IS_ENABLED(CONFIG_NF_NAT_MASQUERADE_IPV6) int masq_index; #endif }; @@ -68,8 +66,8 @@ static inline bool nf_nat_oif_changed(unsigned int hooknum, struct nf_conn_nat *nat, const struct net_device *out) { -#if IS_ENABLED(CONFIG_IP_NF_TARGET_MASQUERADE) || \ - IS_ENABLED(CONFIG_IP6_NF_TARGET_MASQUERADE) +#if IS_ENABLED(CONFIG_NF_NAT_MASQUERADE_IPV4) || \ + IS_ENABLED(CONFIG_NF_NAT_MASQUERADE_IPV6) return nat->masq_index && hooknum == NF_INET_POST_ROUTING && CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL && nat->masq_index != out->ifindex; diff --git a/include/net/netfilter/nf_nat_l3proto.h b/include/net/netfilter/nf_nat_l3proto.h index 5a2919b2e09a..340c013795a4 100644 --- a/include/net/netfilter/nf_nat_l3proto.h +++ b/include/net/netfilter/nf_nat_l3proto.h @@ -42,8 +42,83 @@ const struct nf_nat_l3proto *__nf_nat_l3proto_find(u8 l3proto); int nf_nat_icmp_reply_translation(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int hooknum); + +unsigned int nf_nat_ipv4_in(const struct nf_hook_ops *ops, struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + unsigned int (*do_chain)(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct)); + +unsigned int nf_nat_ipv4_out(const struct nf_hook_ops *ops, struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + unsigned int (*do_chain)(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct)); + +unsigned int nf_nat_ipv4_local_fn(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + unsigned int (*do_chain)(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct)); + +unsigned int nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + unsigned int (*do_chain)(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct)); + int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int hooknum, unsigned int hdrlen); +unsigned int nf_nat_ipv6_in(const struct nf_hook_ops *ops, struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + unsigned int (*do_chain)(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct)); + +unsigned int nf_nat_ipv6_out(const struct nf_hook_ops *ops, struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + unsigned int (*do_chain)(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct)); + +unsigned int nf_nat_ipv6_local_fn(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + unsigned int (*do_chain)(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct)); + +unsigned int nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + unsigned int (*do_chain)(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct)); + #endif /* _NF_NAT_L3PROTO_H */ diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index c4d86198d3d6..3d7292392fac 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -241,6 +241,7 @@ void nft_unregister_set(struct nft_set_ops *ops); * @dtype: data type (verdict or numeric type defined by userspace) * @size: maximum set size * @nelems: number of elements + * @policy: set parameterization (see enum nft_set_policies) * @ops: set ops * @flags: set flags * @klen: key length @@ -255,6 +256,7 @@ struct nft_set { u32 dtype; u32 size; u32 nelems; + u16 policy; /* runtime data below here */ const struct nft_set_ops *ops ____cacheline_aligned; u16 flags; diff --git a/include/net/netfilter/nft_masq.h b/include/net/netfilter/nft_masq.h new file mode 100644 index 000000000000..c72729f954f4 --- /dev/null +++ b/include/net/netfilter/nft_masq.h @@ -0,0 +1,16 @@ +#ifndef _NFT_MASQ_H_ +#define _NFT_MASQ_H_ + +struct nft_masq { + u32 flags; +}; + +extern const struct nla_policy nft_masq_policy[]; + +int nft_masq_init(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nlattr * const tb[]); + +int nft_masq_dump(struct sk_buff *skb, const struct nft_expr *expr); + +#endif /* _NFT_MASQ_H_ */ diff --git a/include/net/netfilter/nft_reject.h b/include/net/netfilter/nft_reject.h index 36b0da2d55bb..60fa1530006b 100644 --- a/include/net/netfilter/nft_reject.h +++ b/include/net/netfilter/nft_reject.h @@ -14,12 +14,7 @@ int nft_reject_init(const struct nft_ctx *ctx, int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr); -void nft_reject_ipv4_eval(const struct nft_expr *expr, - struct nft_data data[NFT_REG_MAX + 1], - const struct nft_pktinfo *pkt); - -void nft_reject_ipv6_eval(const struct nft_expr *expr, - struct nft_data data[NFT_REG_MAX + 1], - const struct nft_pktinfo *pkt); +int nft_reject_icmp_code(u8 code); +int nft_reject_icmpv6_code(u8 code); #endif diff --git a/include/net/netns/ieee802154_6lowpan.h b/include/net/netns/ieee802154_6lowpan.h index e2070960bac0..8170f8d7052b 100644 --- a/include/net/netns/ieee802154_6lowpan.h +++ b/include/net/netns/ieee802154_6lowpan.h @@ -16,7 +16,6 @@ struct netns_sysctl_lowpan { struct netns_ieee802154_lowpan { struct netns_sysctl_lowpan sysctl; struct netns_frags frags; - int max_dsize; }; #endif diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index aec5e12f9f19..24945cefc4fd 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -76,6 +76,7 @@ struct netns_ipv4 { int sysctl_tcp_ecn; int sysctl_ip_no_pmtu_disc; int sysctl_ip_fwd_use_pmtu; + int sysctl_ip_nonlocal_bind; int sysctl_fwmark_reflect; int sysctl_tcp_fwmark_accept; diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h index eade27adecf3..69ae41f2098c 100644 --- a/include/net/netns/ipv6.h +++ b/include/net/netns/ipv6.h @@ -76,7 +76,7 @@ struct netns_ipv6 { #endif #endif atomic_t dev_addr_genid; - atomic_t rt_genid; + atomic_t fib6_sernum; }; #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h index 3492434baf88..9da798256f0e 100644 --- a/include/net/netns/xfrm.h +++ b/include/net/netns/xfrm.h @@ -13,6 +13,19 @@ struct ctl_table_header; struct xfrm_policy_hash { struct hlist_head *table; unsigned int hmask; + u8 dbits4; + u8 sbits4; + u8 dbits6; + u8 sbits6; +}; + +struct xfrm_policy_hthresh { + struct work_struct work; + seqlock_t lock; + u8 lbits4; + u8 rbits4; + u8 lbits6; + u8 rbits6; }; struct netns_xfrm { @@ -41,6 +54,7 @@ struct netns_xfrm { struct xfrm_policy_hash policy_bydst[XFRM_POLICY_MAX * 2]; unsigned int policy_count[XFRM_POLICY_MAX * 2]; struct work_struct policy_hash_work; + struct xfrm_policy_hthresh policy_hthresh; struct sock *nlsk; diff --git a/include/net/nfc/nci.h b/include/net/nfc/nci.h index fbfa4e471abb..9eca9ae2280c 100644 --- a/include/net/nfc/nci.h +++ b/include/net/nfc/nci.h @@ -2,6 +2,7 @@ * The NFC Controller Interface is the communication protocol between an * NFC Controller (NFCC) and a Device Host (DH). * + * Copyright (C) 2014 Marvell International Ltd. * Copyright (C) 2011 Texas Instruments, Inc. * * Written by Ilan Elias <ilane@ti.com> @@ -65,19 +66,18 @@ #define NCI_NFC_F_PASSIVE_POLL_MODE 0x02 #define NCI_NFC_A_ACTIVE_POLL_MODE 0x03 #define NCI_NFC_F_ACTIVE_POLL_MODE 0x05 -#define NCI_NFC_15693_PASSIVE_POLL_MODE 0x06 +#define NCI_NFC_V_PASSIVE_POLL_MODE 0x06 #define NCI_NFC_A_PASSIVE_LISTEN_MODE 0x80 #define NCI_NFC_B_PASSIVE_LISTEN_MODE 0x81 #define NCI_NFC_F_PASSIVE_LISTEN_MODE 0x82 #define NCI_NFC_A_ACTIVE_LISTEN_MODE 0x83 #define NCI_NFC_F_ACTIVE_LISTEN_MODE 0x85 -#define NCI_NFC_15693_PASSIVE_LISTEN_MODE 0x86 /* NCI RF Technologies */ #define NCI_NFC_RF_TECHNOLOGY_A 0x00 #define NCI_NFC_RF_TECHNOLOGY_B 0x01 #define NCI_NFC_RF_TECHNOLOGY_F 0x02 -#define NCI_NFC_RF_TECHNOLOGY_15693 0x03 +#define NCI_NFC_RF_TECHNOLOGY_V 0x03 /* NCI Bit Rates */ #define NCI_NFC_BIT_RATE_106 0x00 @@ -87,6 +87,7 @@ #define NCI_NFC_BIT_RATE_1695 0x04 #define NCI_NFC_BIT_RATE_3390 0x05 #define NCI_NFC_BIT_RATE_6780 0x06 +#define NCI_NFC_BIT_RATE_26 0x20 /* NCI RF Protocols */ #define NCI_RF_PROTOCOL_UNKNOWN 0x00 @@ -95,6 +96,7 @@ #define NCI_RF_PROTOCOL_T3T 0x03 #define NCI_RF_PROTOCOL_ISO_DEP 0x04 #define NCI_RF_PROTOCOL_NFC_DEP 0x05 +#define NCI_RF_PROTOCOL_T5T 0x06 /* NCI RF Interfaces */ #define NCI_RF_INTERFACE_NFCEE_DIRECT 0x00 @@ -328,6 +330,12 @@ struct rf_tech_specific_params_nfcf_poll { __u8 sensf_res[18]; /* 16 or 18 Bytes */ } __packed; +struct rf_tech_specific_params_nfcv_poll { + __u8 res_flags; + __u8 dsfid; + __u8 uid[8]; /* 8 Bytes */ +} __packed; + struct nci_rf_discover_ntf { __u8 rf_discovery_id; __u8 rf_protocol; @@ -338,6 +346,7 @@ struct nci_rf_discover_ntf { struct rf_tech_specific_params_nfca_poll nfca_poll; struct rf_tech_specific_params_nfcb_poll nfcb_poll; struct rf_tech_specific_params_nfcf_poll nfcf_poll; + struct rf_tech_specific_params_nfcv_poll nfcv_poll; } rf_tech_specific_params; __u8 ntf_type; @@ -372,6 +381,7 @@ struct nci_rf_intf_activated_ntf { struct rf_tech_specific_params_nfca_poll nfca_poll; struct rf_tech_specific_params_nfcb_poll nfcb_poll; struct rf_tech_specific_params_nfcf_poll nfcf_poll; + struct rf_tech_specific_params_nfcv_poll nfcv_poll; } rf_tech_specific_params; __u8 data_exch_rf_tech_and_mode; diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h index 1f9a0f5272fe..75d10e625c49 100644 --- a/include/net/nfc/nci_core.h +++ b/include/net/nfc/nci_core.h @@ -64,10 +64,11 @@ enum nci_state { struct nci_dev; struct nci_ops { - int (*open)(struct nci_dev *ndev); - int (*close)(struct nci_dev *ndev); - int (*send)(struct nci_dev *ndev, struct sk_buff *skb); - int (*setup)(struct nci_dev *ndev); + int (*open)(struct nci_dev *ndev); + int (*close)(struct nci_dev *ndev); + int (*send)(struct nci_dev *ndev, struct sk_buff *skb); + int (*setup)(struct nci_dev *ndev); + __u32 (*get_rfprotocol)(struct nci_dev *ndev, __u8 rf_protocol); }; #define NCI_MAX_SUPPORTED_RF_INTERFACES 4 diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 6da46dcf1049..bc49967e1a68 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -20,11 +20,7 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops); static inline unsigned long __cls_set_class(unsigned long *clp, unsigned long cl) { - unsigned long old_cl; - - old_cl = *clp; - *clp = cl; - return old_cl; + return xchg(clp, cl); } static inline unsigned long @@ -137,7 +133,7 @@ tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts, int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr); -void tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts); +void tcf_exts_destroy(struct tcf_exts *exts); void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst, struct tcf_exts *src); int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts); @@ -170,6 +166,7 @@ struct tcf_ematch { unsigned int datalen; u16 matchid; u16 flags; + struct net *net; }; static inline int tcf_em_is_container(struct tcf_ematch *em) @@ -233,12 +230,11 @@ struct tcf_ematch_tree { struct tcf_ematch_ops { int kind; int datalen; - int (*change)(struct tcf_proto *, void *, + int (*change)(struct net *net, void *, int, struct tcf_ematch *); int (*match)(struct sk_buff *, struct tcf_ematch *, struct tcf_pkt_info *); - void (*destroy)(struct tcf_proto *, - struct tcf_ematch *); + void (*destroy)(struct tcf_ematch *); int (*dump)(struct sk_buff *, struct tcf_ematch *); struct module *owner; struct list_head link; @@ -248,7 +244,7 @@ int tcf_em_register(struct tcf_ematch_ops *); void tcf_em_unregister(struct tcf_ematch_ops *); int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *, struct tcf_ematch_tree *); -void tcf_em_tree_destroy(struct tcf_proto *, struct tcf_ematch_tree *); +void tcf_em_tree_destroy(struct tcf_ematch_tree *); int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int); int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *, struct tcf_pkt_info *); @@ -305,7 +301,7 @@ struct tcf_ematch_tree { }; #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0) -#define tcf_em_tree_destroy(tp, t) do { (void)(t); } while(0) +#define tcf_em_tree_destroy(t) do { (void)(t); } while(0) #define tcf_em_tree_dump(skb, t, tlv) (0) #define tcf_em_tree_change(tp, dst, src) do { } while(0) #define tcf_em_tree_match(skb, t, info) ((void)(info), 1) diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index ec030cd76616..27a33833ff4a 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -50,7 +50,7 @@ typedef long psched_tdiff_t; static inline psched_time_t psched_get_time(void) { - return PSCHED_NS2TICKS(ktime_to_ns(ktime_get())); + return PSCHED_NS2TICKS(ktime_get_ns()); } static inline psched_tdiff_t @@ -65,12 +65,12 @@ struct qdisc_watchdog { }; void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc); -void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires); +void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires, bool throttle); static inline void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires) { - qdisc_watchdog_schedule_ns(wd, PSCHED_TICKS2NS(expires)); + qdisc_watchdog_schedule_ns(wd, PSCHED_TICKS2NS(expires), true); } void qdisc_watchdog_cancel(struct qdisc_watchdog *wd); @@ -99,7 +99,7 @@ void qdisc_put_stab(struct qdisc_size_table *tab); void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc); int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, struct net_device *dev, struct netdev_queue *txq, - spinlock_t *root_lock); + spinlock_t *root_lock, bool validate); void __qdisc_run(struct Qdisc *q); diff --git a/include/net/regulatory.h b/include/net/regulatory.h index 259992444e80..dad7ab20a8cb 100644 --- a/include/net/regulatory.h +++ b/include/net/regulatory.h @@ -167,7 +167,7 @@ struct ieee80211_reg_rule { struct ieee80211_regdomain { struct rcu_head rcu_head; u32 n_reg_rules; - char alpha2[2]; + char alpha2[3]; enum nl80211_dfs_regions dfs_region; struct ieee80211_reg_rule reg_rules[]; }; diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index a3cfb8ebeb53..d17ed6fb2f70 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -6,6 +6,8 @@ #include <linux/rcupdate.h> #include <linux/pkt_sched.h> #include <linux/pkt_cls.h> +#include <linux/percpu.h> +#include <linux/dynamic_queue_limits.h> #include <net/gen_stats.h> #include <net/rtnetlink.h> @@ -58,6 +60,7 @@ struct Qdisc { * multiqueue device. */ #define TCQ_F_WARN_NONWC (1 << 16) +#define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */ u32 limit; const struct Qdisc_ops *ops; struct qdisc_size_table __rcu *stab; @@ -83,9 +86,15 @@ struct Qdisc { */ unsigned long state; struct sk_buff_head q; - struct gnet_stats_basic_packed bstats; + union { + struct gnet_stats_basic_packed bstats; + struct gnet_stats_basic_cpu __percpu *cpu_bstats; + } __packed; unsigned int __state; - struct gnet_stats_queue qstats; + union { + struct gnet_stats_queue qstats; + struct gnet_stats_queue __percpu *cpu_qstats; + } __packed; struct rcu_head rcu_head; int padded; atomic_t refcnt; @@ -111,6 +120,21 @@ static inline void qdisc_run_end(struct Qdisc *qdisc) qdisc->__state &= ~__QDISC___STATE_RUNNING; } +static inline bool qdisc_may_bulk(const struct Qdisc *qdisc) +{ + return qdisc->flags & TCQ_F_ONETXQUEUE; +} + +static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq) +{ +#ifdef CONFIG_BQL + /* Non-BQL migrated drivers will return 0, too. */ + return dql_avail(&txq->dql); +#else + return 0; +#endif +} + static inline bool qdisc_is_throttled(const struct Qdisc *qdisc) { return test_bit(__QDISC_STATE_THROTTLED, &qdisc->state) ? true : false; @@ -143,7 +167,7 @@ struct Qdisc_class_ops { void (*walk)(struct Qdisc *, struct qdisc_walker * arg); /* Filter manipulation */ - struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long); + struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long); unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, u32 classid); void (*unbind_tcf)(struct Qdisc *, unsigned long); @@ -212,8 +236,8 @@ struct tcf_proto_ops { struct tcf_proto { /* Fast access part */ - struct tcf_proto *next; - void *root; + struct tcf_proto __rcu *next; + void __rcu *root; int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *); @@ -225,13 +249,15 @@ struct tcf_proto { struct Qdisc *q; void *data; const struct tcf_proto_ops *ops; + struct rcu_head rcu; }; struct qdisc_skb_cb { unsigned int pkt_len; u16 slave_dev_queue_mapping; u16 _pad; - unsigned char data[24]; +#define QDISC_CB_PRIV_LEN 20 + unsigned char data[QDISC_CB_PRIV_LEN]; }; static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) @@ -259,7 +285,9 @@ static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc) static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc) { - return qdisc->dev_queue->qdisc; + struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc); + + return q; } static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc) @@ -376,7 +404,7 @@ struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, void __qdisc_calculate_pkt_len(struct sk_buff *skb, const struct qdisc_size_table *stab); void tcf_destroy(struct tcf_proto *tp); -void tcf_destroy_chain(struct tcf_proto **fl); +void tcf_destroy_chain(struct tcf_proto __rcu **fl); /* Reset all TX qdiscs greater then index of a device. */ static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) @@ -384,7 +412,7 @@ static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) struct Qdisc *qdisc; for (; i < dev->num_tx_queues; i++) { - qdisc = netdev_get_tx_queue(dev, i)->qdisc; + qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc); if (qdisc) { spin_lock_bh(qdisc_lock(qdisc)); qdisc_reset(qdisc); @@ -402,13 +430,18 @@ static inline void qdisc_reset_all_tx(struct net_device *dev) static inline bool qdisc_all_tx_empty(const struct net_device *dev) { unsigned int i; + + rcu_read_lock(); for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); - const struct Qdisc *q = txq->qdisc; + const struct Qdisc *q = rcu_dereference(txq->qdisc); - if (q->q.qlen) + if (q->q.qlen) { + rcu_read_unlock(); return false; + } } + rcu_read_unlock(); return true; } @@ -416,9 +449,10 @@ static inline bool qdisc_all_tx_empty(const struct net_device *dev) static inline bool qdisc_tx_changing(const struct net_device *dev) { unsigned int i; + for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); - if (txq->qdisc != txq->qdisc_sleeping) + if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping) return true; } return false; @@ -428,9 +462,10 @@ static inline bool qdisc_tx_changing(const struct net_device *dev) static inline bool qdisc_tx_is_noop(const struct net_device *dev) { unsigned int i; + for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); - if (txq->qdisc != &noop_qdisc) + if (rcu_access_pointer(txq->qdisc) != &noop_qdisc) return false; } return true; @@ -476,6 +511,10 @@ static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch) return qdisc_enqueue(skb, sch) & NET_XMIT_MASK; } +static inline bool qdisc_is_percpu_stats(const struct Qdisc *q) +{ + return q->flags & TCQ_F_CPUSTATS; +} static inline void bstats_update(struct gnet_stats_basic_packed *bstats, const struct sk_buff *skb) @@ -484,17 +523,62 @@ static inline void bstats_update(struct gnet_stats_basic_packed *bstats, bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; } +static inline void qdisc_bstats_update_cpu(struct Qdisc *sch, + const struct sk_buff *skb) +{ + struct gnet_stats_basic_cpu *bstats = + this_cpu_ptr(sch->cpu_bstats); + + u64_stats_update_begin(&bstats->syncp); + bstats_update(&bstats->bstats, skb); + u64_stats_update_end(&bstats->syncp); +} + static inline void qdisc_bstats_update(struct Qdisc *sch, const struct sk_buff *skb) { bstats_update(&sch->bstats, skb); } +static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch, + const struct sk_buff *skb) +{ + sch->qstats.backlog -= qdisc_pkt_len(skb); +} + +static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch, + const struct sk_buff *skb) +{ + sch->qstats.backlog += qdisc_pkt_len(skb); +} + +static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count) +{ + sch->qstats.drops += count; +} + +static inline void qdisc_qstats_drop(struct Qdisc *sch) +{ + sch->qstats.drops++; +} + +static inline void qdisc_qstats_drop_cpu(struct Qdisc *sch) +{ + struct gnet_stats_queue *qstats = this_cpu_ptr(sch->cpu_qstats); + + qstats->drops++; +} + +static inline void qdisc_qstats_overlimit(struct Qdisc *sch) +{ + sch->qstats.overlimits++; +} + static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff_head *list) { __skb_queue_tail(list, skb); - sch->qstats.backlog += qdisc_pkt_len(skb); + qdisc_qstats_backlog_inc(sch, skb); return NET_XMIT_SUCCESS; } @@ -510,7 +594,7 @@ static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch, struct sk_buff *skb = __skb_dequeue(list); if (likely(skb != NULL)) { - sch->qstats.backlog -= qdisc_pkt_len(skb); + qdisc_qstats_backlog_dec(sch, skb); qdisc_bstats_update(sch, skb); } @@ -529,7 +613,7 @@ static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, if (likely(skb != NULL)) { unsigned int len = qdisc_pkt_len(skb); - sch->qstats.backlog -= len; + qdisc_qstats_backlog_dec(sch, skb); kfree_skb(skb); return len; } @@ -548,7 +632,7 @@ static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch, struct sk_buff *skb = __skb_dequeue_tail(list); if (likely(skb != NULL)) - sch->qstats.backlog -= qdisc_pkt_len(skb); + qdisc_qstats_backlog_dec(sch, skb); return skb; } @@ -630,14 +714,14 @@ static inline unsigned int qdisc_queue_drop(struct Qdisc *sch) static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) { kfree_skb(skb); - sch->qstats.drops++; + qdisc_qstats_drop(sch); return NET_XMIT_DROP; } static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch) { - sch->qstats.drops++; + qdisc_qstats_drop(sch); #ifdef CONFIG_NET_CLS_ACT if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch)) diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h index f22538e68245..d4a20d00461c 100644 --- a/include/net/sctp/command.h +++ b/include/net/sctp/command.h @@ -115,7 +115,7 @@ typedef enum { * analysis of the state functions, but in reality just taken from * thin air in the hopes othat we don't trigger a kernel panic. */ -#define SCTP_MAX_NUM_COMMANDS 14 +#define SCTP_MAX_NUM_COMMANDS 20 typedef union { void *zero_all; /* Set to NULL to clear the entire union */ diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index f6e7397e799d..9fbd856e6713 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -320,6 +320,19 @@ static inline sctp_assoc_t sctp_assoc2id(const struct sctp_association *asoc) return asoc ? asoc->assoc_id : 0; } +static inline enum sctp_sstat_state +sctp_assoc_to_state(const struct sctp_association *asoc) +{ + /* SCTP's uapi always had SCTP_EMPTY(=0) as a dummy state, but we + * got rid of it in kernel space. Therefore SCTP_CLOSED et al + * start at =1 in user space, but actually as =0 in kernel space. + * Now that we can not break user space and SCTP_EMPTY is exposed + * there, we need to fix it up with an ugly offset not to break + * applications. :( + */ + return asoc->state + 1; +} + /* Look up the association by its id. */ struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id); diff --git a/include/net/snmp.h b/include/net/snmp.h index f1f27fdbb0d5..8fd2f498782e 100644 --- a/include/net/snmp.h +++ b/include/net/snmp.h @@ -146,19 +146,15 @@ struct linux_xfrm_mib { #define SNMP_ADD_STATS(mib, field, addend) \ this_cpu_add(mib->mibs[field], addend) -/* - * Use "__typeof__(*mib) *ptr" instead of "__typeof__(mib) ptr" - * to make @ptr a non-percpu pointer. - */ #define SNMP_UPD_PO_STATS(mib, basefield, addend) \ do { \ - __typeof__(*mib->mibs) *ptr = mib->mibs; \ + __typeof__((mib->mibs) + 0) ptr = mib->mibs; \ this_cpu_inc(ptr[basefield##PKTS]); \ this_cpu_add(ptr[basefield##OCTETS], addend); \ } while (0) #define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \ do { \ - __typeof__(*mib->mibs) *ptr = mib->mibs; \ + __typeof__((mib->mibs) + 0) ptr = mib->mibs; \ __this_cpu_inc(ptr[basefield##PKTS]); \ __this_cpu_add(ptr[basefield##OCTETS], addend); \ } while (0) diff --git a/include/net/sock.h b/include/net/sock.h index 7f2ab72f321a..7db3db112baa 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -233,7 +233,6 @@ struct cg_proto; * @sk_receive_queue: incoming packets * @sk_wmem_alloc: transmit queue bytes committed * @sk_write_queue: Packet sending queue - * @sk_async_wait_queue: DMA copied packets * @sk_omem_alloc: "o" is "option" or "other" * @sk_wmem_queued: persistent queue size * @sk_forward_alloc: space allocated forward @@ -362,10 +361,6 @@ struct sock { struct sk_filter __rcu *sk_filter; struct socket_wq __rcu *sk_wq; -#ifdef CONFIG_NET_DMA - struct sk_buff_head sk_async_wait_queue; -#endif - #ifdef CONFIG_XFRM struct xfrm_policy *sk_policy[2]; #endif @@ -1574,7 +1569,12 @@ struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, void sock_wfree(struct sk_buff *skb); void skb_orphan_partial(struct sk_buff *skb); void sock_rfree(struct sk_buff *skb); +void sock_efree(struct sk_buff *skb); +#ifdef CONFIG_INET void sock_edemux(struct sk_buff *skb); +#else +#define sock_edemux(skb) sock_efree(skb) +#endif int sock_setsockopt(struct socket *sock, int level, int op, char __user *optval, unsigned int optlen); @@ -2041,6 +2041,7 @@ void sk_stop_timer(struct sock *sk, struct timer_list *timer); int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb); +struct sk_buff *sock_dequeue_err_skb(struct sock *sk); /* * Recover an error report and clear atomically @@ -2165,9 +2166,7 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) */ if (sock_flag(sk, SOCK_RCVTSTAMP) || (sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) || - (kt.tv64 && - (sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE || - skb_shinfo(skb)->tx_flags & SKBTX_ANY_SW_TSTAMP)) || + (kt.tv64 && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) || (hwtstamps->hwtstamp.tv64 && (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE))) __sock_recv_timestamp(msg, sk, skb); @@ -2195,6 +2194,8 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, sk->sk_stamp = skb->tstamp; } +void __sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags); + /** * sock_tx_timestamp - checks whether the outgoing packet is to be time stamped * @sk: socket sending this packet @@ -2202,33 +2203,27 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, * * Note : callers should take care of initial *tx_flags value (usually 0) */ -void sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags); +static inline void sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags) +{ + if (unlikely(sk->sk_tsflags)) + __sock_tx_timestamp(sk, tx_flags); + if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS))) + *tx_flags |= SKBTX_WIFI_STATUS; +} /** * sk_eat_skb - Release a skb if it is no longer needed * @sk: socket to eat this skb from * @skb: socket buffer to eat - * @copied_early: flag indicating whether DMA operations copied this data early * * This routine must be called with interrupts disabled or with the socket * locked so that the sk_buff queue operation is ok. */ -#ifdef CONFIG_NET_DMA -static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, bool copied_early) -{ - __skb_unlink(skb, &sk->sk_receive_queue); - if (!copied_early) - __kfree_skb(skb); - else - __skb_queue_tail(&sk->sk_async_wait_queue, skb); -} -#else -static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, bool copied_early) +static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb) { __skb_unlink(skb, &sk->sk_receive_queue); __kfree_skb(skb); } -#endif static inline struct net *sock_net(const struct sock *sk) diff --git a/include/net/tcp.h b/include/net/tcp.h index 590e01a476ac..74efeda994b3 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -27,7 +27,6 @@ #include <linux/cache.h> #include <linux/percpu.h> #include <linux/skbuff.h> -#include <linux/dmaengine.h> #include <linux/crypto.h> #include <linux/cryptohash.h> #include <linux/kref.h> @@ -262,7 +261,6 @@ extern int sysctl_tcp_adv_win_scale; extern int sysctl_tcp_tw_reuse; extern int sysctl_tcp_frto; extern int sysctl_tcp_low_latency; -extern int sysctl_tcp_dma_copybreak; extern int sysctl_tcp_nometrics_save; extern int sysctl_tcp_moderate_rcvbuf; extern int sysctl_tcp_tso_win_divisor; @@ -368,7 +366,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th, unsigned int len); void tcp_rcv_space_adjust(struct sock *sk); -void tcp_cleanup_rbuf(struct sock *sk, int copied); int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp); void tcp_twsk_destructor(struct sock *sk); ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos, @@ -672,6 +669,12 @@ void tcp_send_window_probe(struct sock *sk); */ #define tcp_time_stamp ((__u32)(jiffies)) +static inline u32 tcp_skb_timestamp(const struct sk_buff *skb) +{ + return skb->skb_mstamp.stamp_jiffies; +} + + #define tcp_flag_byte(th) (((u_int8_t *)th)[13]) #define TCPHDR_FIN 0x01 @@ -690,15 +693,18 @@ void tcp_send_window_probe(struct sock *sk); * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately. */ struct tcp_skb_cb { - union { - struct inet_skb_parm h4; -#if IS_ENABLED(CONFIG_IPV6) - struct inet6_skb_parm h6; -#endif - } header; /* For incoming frames */ __u32 seq; /* Starting sequence number */ __u32 end_seq; /* SEQ + FIN + SYN + datalen */ - __u32 when; /* used to compute rtt's */ + union { + /* Note : tcp_tw_isn is used in input path only + * (isn chosen by tcp_timewait_state_process()) + * + * tcp_gso_segs is used in write queue only, + * cf tcp_skb_pcount() + */ + __u32 tcp_tw_isn; + __u32 tcp_gso_segs; + }; __u8 tcp_flags; /* TCP header flags. (tcp[13]) */ __u8 sacked; /* State flags for SACK/FACK. */ @@ -714,33 +720,32 @@ struct tcp_skb_cb { __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */ /* 1 byte hole */ __u32 ack_seq; /* Sequence number ACK'd */ + union { + struct inet_skb_parm h4; +#if IS_ENABLED(CONFIG_IPV6) + struct inet6_skb_parm h6; +#endif + } header; /* For incoming frames */ }; #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0])) -/* RFC3168 : 6.1.1 SYN packets must not have ECT/ECN bits set - * - * If we receive a SYN packet with these bits set, it means a network is - * playing bad games with TOS bits. In order to avoid possible false congestion - * notifications, we disable TCP ECN negociation. +/* Due to TSO, an SKB can be composed of multiple actual + * packets. To keep these tracked properly, we use this. */ -static inline void -TCP_ECN_create_request(struct request_sock *req, const struct sk_buff *skb, - struct net *net) +static inline int tcp_skb_pcount(const struct sk_buff *skb) { - const struct tcphdr *th = tcp_hdr(skb); + return TCP_SKB_CB(skb)->tcp_gso_segs; +} - if (net->ipv4.sysctl_tcp_ecn && th->ece && th->cwr && - INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield)) - inet_rsk(req)->ecn_ok = 1; +static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs) +{ + TCP_SKB_CB(skb)->tcp_gso_segs = segs; } -/* Due to TSO, an SKB can be composed of multiple actual - * packets. To keep these tracked properly, we use this. - */ -static inline int tcp_skb_pcount(const struct sk_buff *skb) +static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs) { - return skb_shinfo(skb)->gso_segs; + TCP_SKB_CB(skb)->tcp_gso_segs += segs; } /* This is valid iff tcp_skb_pcount() > 1. */ @@ -755,8 +760,17 @@ enum tcp_ca_event { CA_EVENT_CWND_RESTART, /* congestion window restart */ CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */ CA_EVENT_LOSS, /* loss timeout */ - CA_EVENT_FAST_ACK, /* in sequence ack */ - CA_EVENT_SLOW_ACK, /* other ack */ + CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */ + CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */ + CA_EVENT_DELAYED_ACK, /* Delayed ack is sent */ + CA_EVENT_NON_DELAYED_ACK, +}; + +/* Information about inbound ACK, passed to cong_ops->in_ack_event() */ +enum tcp_ca_ack_event_flags { + CA_ACK_SLOWPATH = (1 << 0), /* In slow path processing */ + CA_ACK_WIN_UPDATE = (1 << 1), /* ACK updated window */ + CA_ACK_ECE = (1 << 2), /* ECE bit is set on ack */ }; /* @@ -766,7 +780,10 @@ enum tcp_ca_event { #define TCP_CA_MAX 128 #define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX) +/* Algorithm can be set on socket without CAP_NET_ADMIN privileges */ #define TCP_CONG_NON_RESTRICTED 0x1 +/* Requires ECN/ECT set on all packets */ +#define TCP_CONG_NEEDS_ECN 0x2 struct tcp_congestion_ops { struct list_head list; @@ -785,6 +802,8 @@ struct tcp_congestion_ops { void (*set_state)(struct sock *sk, u8 new_state); /* call when cwnd event occurs (optional) */ void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev); + /* call when ack arrives (optional) */ + void (*in_ack_event)(struct sock *sk, u32 flags); /* new value of cwnd after loss (optional) */ u32 (*undo_cwnd)(struct sock *sk); /* hook for packet ack accounting (optional) */ @@ -799,6 +818,7 @@ struct tcp_congestion_ops { int tcp_register_congestion_control(struct tcp_congestion_ops *type); void tcp_unregister_congestion_control(struct tcp_congestion_ops *type); +void tcp_assign_congestion_control(struct sock *sk); void tcp_init_congestion_control(struct sock *sk); void tcp_cleanup_congestion_control(struct sock *sk); int tcp_set_default_congestion_control(const char *name); @@ -807,14 +827,20 @@ void tcp_get_available_congestion_control(char *buf, size_t len); void tcp_get_allowed_congestion_control(char *buf, size_t len); int tcp_set_allowed_congestion_control(char *allowed); int tcp_set_congestion_control(struct sock *sk, const char *name); -int tcp_slow_start(struct tcp_sock *tp, u32 acked); +void tcp_slow_start(struct tcp_sock *tp, u32 acked); void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w); -extern struct tcp_congestion_ops tcp_init_congestion_ops; u32 tcp_reno_ssthresh(struct sock *sk); void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked); extern struct tcp_congestion_ops tcp_reno; +static inline bool tcp_ca_needs_ecn(const struct sock *sk) +{ + const struct inet_connection_sock *icsk = inet_csk(sk); + + return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN; +} + static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state) { struct inet_connection_sock *icsk = inet_csk(sk); @@ -1031,12 +1057,6 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp) tp->ucopy.len = 0; tp->ucopy.memory = 0; skb_queue_head_init(&tp->ucopy.prequeue); -#ifdef CONFIG_NET_DMA - tp->ucopy.dma_chan = NULL; - tp->ucopy.wakeup = 0; - tp->ucopy.pinned_list = NULL; - tp->ucopy.dma_cookie = 0; -#endif } bool tcp_prequeue(struct sock *sk, struct sk_buff *skb); diff --git a/include/net/udp.h b/include/net/udp.h index 70f941368ace..07f9b70962f6 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -158,6 +158,24 @@ static inline __sum16 udp_v4_check(int len, __be32 saddr, void udp_set_csum(bool nocheck, struct sk_buff *skb, __be32 saddr, __be32 daddr, int len); +struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, + struct udphdr *uh); +int udp_gro_complete(struct sk_buff *skb, int nhoff); + +static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb) +{ + struct udphdr *uh; + unsigned int hlen, off; + + off = skb_gro_offset(skb); + hlen = off + sizeof(*uh); + uh = skb_gro_header_fast(skb, off); + if (skb_gro_header_hard(skb, hlen)) + uh = skb_gro_header_slow(skb, hlen, off); + + return uh; +} + /* hash routines shared between UDPv4/6 and UDP-Litev4/6 */ static inline void udp_lib_hash(struct sock *sk) { @@ -221,7 +239,8 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg); int udp_disconnect(struct sock *sk, int flags); unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait); struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, - netdev_features_t features); + netdev_features_t features, + bool is_ipv6); int udp_lib_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); int udp_lib_setsockopt(struct sock *sk, int level, int optname, diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h index ffd69cbded35..a47790bcaa38 100644 --- a/include/net/udp_tunnel.h +++ b/include/net/udp_tunnel.h @@ -1,6 +1,14 @@ #ifndef __NET_UDP_TUNNEL_H #define __NET_UDP_TUNNEL_H +#include <net/ip_tunnels.h> +#include <net/udp.h> + +#if IS_ENABLED(CONFIG_IPV6) +#include <net/ipv6.h> +#include <net/addrconf.h> +#endif + struct udp_port_cfg { u8 family; @@ -26,7 +34,80 @@ struct udp_port_cfg { use_udp6_rx_checksums:1; }; -int udp_sock_create(struct net *net, struct udp_port_cfg *cfg, - struct socket **sockp); +int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg, + struct socket **sockp); + +#if IS_ENABLED(CONFIG_IPV6) +int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg, + struct socket **sockp); +#else +static inline int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg, + struct socket **sockp) +{ + return 0; +} +#endif + +static inline int udp_sock_create(struct net *net, + struct udp_port_cfg *cfg, + struct socket **sockp) +{ + if (cfg->family == AF_INET) + return udp_sock_create4(net, cfg, sockp); + + if (cfg->family == AF_INET6) + return udp_sock_create6(net, cfg, sockp); + + return -EPFNOSUPPORT; +} + +typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb); +typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk); + +struct udp_tunnel_sock_cfg { + void *sk_user_data; /* user data used by encap_rcv call back */ + /* Used for setting up udp_sock fields, see udp.h for details */ + __u8 encap_type; + udp_tunnel_encap_rcv_t encap_rcv; + udp_tunnel_encap_destroy_t encap_destroy; +}; + +/* Setup the given (UDP) sock to receive UDP encapsulated packets */ +void setup_udp_tunnel_sock(struct net *net, struct socket *sock, + struct udp_tunnel_sock_cfg *sock_cfg); + +/* Transmit the skb using UDP encapsulation. */ +int udp_tunnel_xmit_skb(struct socket *sock, struct rtable *rt, + struct sk_buff *skb, __be32 src, __be32 dst, + __u8 tos, __u8 ttl, __be16 df, __be16 src_port, + __be16 dst_port, bool xnet); + +#if IS_ENABLED(CONFIG_IPV6) +int udp_tunnel6_xmit_skb(struct socket *sock, struct dst_entry *dst, + struct sk_buff *skb, struct net_device *dev, + struct in6_addr *saddr, struct in6_addr *daddr, + __u8 prio, __u8 ttl, __be16 src_port, + __be16 dst_port); +#endif + +void udp_tunnel_sock_release(struct socket *sock); + +static inline struct sk_buff *udp_tunnel_handle_offloads(struct sk_buff *skb, + bool udp_csum) +{ + int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; + + return iptunnel_handle_offloads(skb, udp_csum, type); +} + +static inline void udp_tunnel_encap_enable(struct socket *sock) +{ +#if IS_ENABLED(CONFIG_IPV6) + if (sock->sk->sk_family == PF_INET6) + ipv6_stub->udpv6_encap_enable(); + else +#endif + udp_encap_enable(); +} #endif diff --git a/include/net/wimax.h b/include/net/wimax.h index e52ef5357e08..c52b68577cb0 100644 --- a/include/net/wimax.h +++ b/include/net/wimax.h @@ -290,7 +290,7 @@ struct wimax_dev; * This operation has to be synchronous, and return only when the * reset is complete. In case of having had to resort to bus/cold * reset implying a device disconnection, the call is allowed to - * return inmediately. + * return immediately. * NOTE: wimax_dev->mutex is NOT locked when this op is being * called; however, wimax_dev->mutex_reset IS locked to ensure * serialization of calls to wimax_reset(). diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 721e9c3b11bd..dc4865e90fe4 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -1591,6 +1591,7 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8, int dir, u32 id, int delete, int *err); int xfrm_policy_flush(struct net *net, u8 type, bool task_valid); +void xfrm_policy_hash_rebuild(struct net *net); u32 xfrm_get_acqseq(void); int verify_spi_info(u8 proto, u32 min, u32 max); int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi); diff --git a/include/ras/ras_event.h b/include/ras/ras_event.h index 47da53c27ffa..79abb9c71772 100644 --- a/include/ras/ras_event.h +++ b/include/ras/ras_event.h @@ -8,6 +8,7 @@ #include <linux/tracepoint.h> #include <linux/edac.h> #include <linux/ktime.h> +#include <linux/pci.h> #include <linux/aer.h> #include <linux/cper.h> @@ -173,25 +174,34 @@ TRACE_EVENT(mc_event, * u8 severity - error severity 0:NONFATAL 1:FATAL 2:CORRECTED */ -#define aer_correctable_errors \ - {BIT(0), "Receiver Error"}, \ - {BIT(6), "Bad TLP"}, \ - {BIT(7), "Bad DLLP"}, \ - {BIT(8), "RELAY_NUM Rollover"}, \ - {BIT(12), "Replay Timer Timeout"}, \ - {BIT(13), "Advisory Non-Fatal"} - -#define aer_uncorrectable_errors \ - {BIT(4), "Data Link Protocol"}, \ - {BIT(12), "Poisoned TLP"}, \ - {BIT(13), "Flow Control Protocol"}, \ - {BIT(14), "Completion Timeout"}, \ - {BIT(15), "Completer Abort"}, \ - {BIT(16), "Unexpected Completion"}, \ - {BIT(17), "Receiver Overflow"}, \ - {BIT(18), "Malformed TLP"}, \ - {BIT(19), "ECRC"}, \ - {BIT(20), "Unsupported Request"} +#define aer_correctable_errors \ + {PCI_ERR_COR_RCVR, "Receiver Error"}, \ + {PCI_ERR_COR_BAD_TLP, "Bad TLP"}, \ + {PCI_ERR_COR_BAD_DLLP, "Bad DLLP"}, \ + {PCI_ERR_COR_REP_ROLL, "RELAY_NUM Rollover"}, \ + {PCI_ERR_COR_REP_TIMER, "Replay Timer Timeout"}, \ + {PCI_ERR_COR_ADV_NFAT, "Advisory Non-Fatal Error"}, \ + {PCI_ERR_COR_INTERNAL, "Corrected Internal Error"}, \ + {PCI_ERR_COR_LOG_OVER, "Header Log Overflow"} + +#define aer_uncorrectable_errors \ + {PCI_ERR_UNC_UND, "Undefined"}, \ + {PCI_ERR_UNC_DLP, "Data Link Protocol Error"}, \ + {PCI_ERR_UNC_SURPDN, "Surprise Down Error"}, \ + {PCI_ERR_UNC_POISON_TLP,"Poisoned TLP"}, \ + {PCI_ERR_UNC_FCP, "Flow Control Protocol Error"}, \ + {PCI_ERR_UNC_COMP_TIME, "Completion Timeout"}, \ + {PCI_ERR_UNC_COMP_ABORT,"Completer Abort"}, \ + {PCI_ERR_UNC_UNX_COMP, "Unexpected Completion"}, \ + {PCI_ERR_UNC_RX_OVER, "Receiver Overflow"}, \ + {PCI_ERR_UNC_MALF_TLP, "Malformed TLP"}, \ + {PCI_ERR_UNC_ECRC, "ECRC Error"}, \ + {PCI_ERR_UNC_UNSUP, "Unsupported Request Error"}, \ + {PCI_ERR_UNC_ACSV, "ACS Violation"}, \ + {PCI_ERR_UNC_INTN, "Uncorrectable Internal Error"},\ + {PCI_ERR_UNC_MCBTLP, "MC Blocked TLP"}, \ + {PCI_ERR_UNC_ATOMEG, "AtomicOp Egress Blocked"}, \ + {PCI_ERR_UNC_TLPPRE, "TLP Prefix Blocked Error"} TRACE_EVENT(aer_event, TP_PROTO(const char *dev_name, diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h index 1ea0b65c4cfb..a2bf41e0bde9 100644 --- a/include/rdma/ib_umem.h +++ b/include/rdma/ib_umem.h @@ -47,6 +47,7 @@ struct ib_umem { int writable; int hugetlb; struct work_struct work; + struct pid *pid; struct mm_struct *mm; unsigned long diff; struct sg_table sg_head; diff --git a/include/rxrpc/types.h b/include/rxrpc/types.h deleted file mode 100644 index 30d48f6da228..000000000000 --- a/include/rxrpc/types.h +++ /dev/null @@ -1,41 +0,0 @@ -/* types.h: Rx types - * - * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#ifndef _LINUX_RXRPC_TYPES_H -#define _LINUX_RXRPC_TYPES_H - -#include <linux/types.h> -#include <linux/list.h> -#include <linux/socket.h> -#include <linux/in.h> -#include <linux/spinlock.h> -#include <linux/atomic.h> - -typedef uint32_t rxrpc_seq_t; /* Rx message sequence number */ -typedef uint32_t rxrpc_serial_t; /* Rx message serial number */ -typedef __be32 rxrpc_seq_net_t; /* on-the-wire Rx message sequence number */ -typedef __be32 rxrpc_serial_net_t; /* on-the-wire Rx message serial number */ - -struct rxrpc_call; -struct rxrpc_connection; -struct rxrpc_header; -struct rxrpc_message; -struct rxrpc_operation; -struct rxrpc_peer; -struct rxrpc_service; -typedef struct rxrpc_timer rxrpc_timer_t; -struct rxrpc_transport; - -typedef void (*rxrpc_call_attn_func_t)(struct rxrpc_call *call); -typedef void (*rxrpc_call_error_func_t)(struct rxrpc_call *call); -typedef void (*rxrpc_call_aemap_func_t)(struct rxrpc_call *call); - -#endif /* _LINUX_RXRPC_TYPES_H */ diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h index 261e708010da..d17178e6fcdd 100644 --- a/include/scsi/scsi.h +++ b/include/scsi/scsi.h @@ -333,6 +333,7 @@ static inline int scsi_status_is_good(int status) #define TYPE_RBC 0x0e #define TYPE_OSD 0x11 #define TYPE_ZBC 0x14 +#define TYPE_WLUN 0x1e /* well-known logical unit */ #define TYPE_NO_LUN 0x7f /* SCSI protocols; these are taken from SPC-3 section 7.5 */ diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 1a0d1842962e..27ecee73bd72 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -174,6 +174,7 @@ struct scsi_device { unsigned wce_default_on:1; /* Cache is ON by default */ unsigned no_dif:1; /* T10 PI (DIF) should be disabled */ unsigned broken_fua:1; /* Don't set FUA bit */ + unsigned lun_in_cdb:1; /* Store LUN bits in CDB[1] */ atomic_t disk_events_disable_depth; /* disable depth for disk events */ diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index ba2034779961..5e362489ee88 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -606,7 +606,7 @@ struct Scsi_Host { /* * These three parameters can be used to allow for wide scsi, * and for host adapters that support multiple busses - * The first two should be set to 1 more than the actual max id + * The last two should be set to 1 more than the actual max id * or lun (e.g. 8 for SCSI parallel systems). */ unsigned int max_channel; @@ -680,6 +680,7 @@ struct Scsi_Host { unsigned no_write_same:1; unsigned use_blk_mq:1; + unsigned use_cmd_list:1; /* * Optional work queue to be utilized by the transport @@ -692,6 +693,9 @@ struct Scsi_Host { */ struct workqueue_struct *tmf_work_q; + /* The transport requires the LUN bits NOT to be stored in CDB[1] */ + unsigned no_scsi2_lun_in_cdb:1; + /* * Value host_blocked counts down from */ diff --git a/include/scsi/scsi_tcq.h b/include/scsi/scsi_tcq.h index cdcc90b07ecb..e64583560701 100644 --- a/include/scsi/scsi_tcq.h +++ b/include/scsi/scsi_tcq.h @@ -68,7 +68,7 @@ static inline void scsi_activate_tcq(struct scsi_device *sdev, int depth) return; if (!shost_use_blk_mq(sdev->host) && - blk_queue_tagged(sdev->request_queue)) + !blk_queue_tagged(sdev->request_queue)) blk_queue_init_tags(sdev->request_queue, depth, sdev->host->bqt); diff --git a/include/sound/pcm.h b/include/sound/pcm.h index 6f3e10ca0e32..e862497f7556 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h @@ -183,6 +183,7 @@ struct snd_pcm_ops { #define SNDRV_PCM_FMTBIT_G723_40_1B _SNDRV_PCM_FMTBIT(G723_40_1B) #define SNDRV_PCM_FMTBIT_DSD_U8 _SNDRV_PCM_FMTBIT(DSD_U8) #define SNDRV_PCM_FMTBIT_DSD_U16_LE _SNDRV_PCM_FMTBIT(DSD_U16_LE) +#define SNDRV_PCM_FMTBIT_DSD_U32_LE _SNDRV_PCM_FMTBIT(DSD_U32_LE) #ifdef SNDRV_LITTLE_ENDIAN #define SNDRV_PCM_FMTBIT_S16 SNDRV_PCM_FMTBIT_S16_LE @@ -365,6 +366,7 @@ struct snd_pcm_runtime { struct snd_pcm_group { /* keep linked substreams */ spinlock_t lock; + struct mutex mutex; struct list_head substreams; int count; }; @@ -460,6 +462,7 @@ struct snd_pcm { void (*private_free) (struct snd_pcm *pcm); struct device *dev; /* actual hw device this belongs to */ bool internal; /* pcm is for internal use only */ + bool nonatomic; /* whole PCM operations are in non-atomic context */ #if defined(CONFIG_SND_PCM_OSS) || defined(CONFIG_SND_PCM_OSS_MODULE) struct snd_pcm_oss oss; #endif @@ -492,8 +495,6 @@ int snd_pcm_notify(struct snd_pcm_notify *notify, int nfree); * Native I/O */ -extern rwlock_t snd_pcm_link_rwlock; - int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info); int snd_pcm_info_user(struct snd_pcm_substream *substream, struct snd_pcm_info __user *info); @@ -537,41 +538,18 @@ static inline int snd_pcm_stream_linked(struct snd_pcm_substream *substream) return substream->group != &substream->self_group; } -static inline void snd_pcm_stream_lock(struct snd_pcm_substream *substream) -{ - read_lock(&snd_pcm_link_rwlock); - spin_lock(&substream->self_group.lock); -} - -static inline void snd_pcm_stream_unlock(struct snd_pcm_substream *substream) -{ - spin_unlock(&substream->self_group.lock); - read_unlock(&snd_pcm_link_rwlock); -} - -static inline void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream) -{ - read_lock_irq(&snd_pcm_link_rwlock); - spin_lock(&substream->self_group.lock); -} - -static inline void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream) -{ - spin_unlock(&substream->self_group.lock); - read_unlock_irq(&snd_pcm_link_rwlock); -} - -#define snd_pcm_stream_lock_irqsave(substream, flags) \ -do { \ - read_lock_irqsave(&snd_pcm_link_rwlock, (flags)); \ - spin_lock(&substream->self_group.lock); \ -} while (0) - -#define snd_pcm_stream_unlock_irqrestore(substream, flags) \ -do { \ - spin_unlock(&substream->self_group.lock); \ - read_unlock_irqrestore(&snd_pcm_link_rwlock, (flags)); \ -} while (0) +void snd_pcm_stream_lock(struct snd_pcm_substream *substream); +void snd_pcm_stream_unlock(struct snd_pcm_substream *substream); +void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream); +void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream); +unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream); +#define snd_pcm_stream_lock_irqsave(substream, flags) \ + do { \ + typecheck(unsigned long, flags); \ + flags = _snd_pcm_stream_lock_irqsave(substream); \ + } while (0) +void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream, + unsigned long flags); #define snd_pcm_group_for_each_entry(s, substream) \ list_for_each_entry(s, &substream->group->substreams, link_list) diff --git a/include/sound/rt5645.h b/include/sound/rt5645.h index 1de744c242f6..a5352712194b 100644 --- a/include/sound/rt5645.h +++ b/include/sound/rt5645.h @@ -20,6 +20,9 @@ struct rt5645_platform_data { /* 0 = IN2N; 1 = GPIO5; 2 = GPIO11 */ unsigned int dmic2_data_pin; /* 0 = IN2P; 1 = GPIO6; 2 = GPIO10; 3 = GPIO12 */ + + unsigned int hp_det_gpio; + bool gpio_hp_det_active_high; }; #endif diff --git a/include/sound/rt5677.h b/include/sound/rt5677.h index 3da14313bcfc..082670e3a353 100644 --- a/include/sound/rt5677.h +++ b/include/sound/rt5677.h @@ -12,10 +12,21 @@ #ifndef __LINUX_SND_RT5677_H #define __LINUX_SND_RT5677_H +enum rt5677_dmic2_clk { + RT5677_DMIC_CLK1 = 0, + RT5677_DMIC_CLK2 = 1, +}; + + struct rt5677_platform_data { - /* IN1 IN2 can optionally be differential */ + /* IN1/IN2/LOUT1/LOUT2/LOUT3 can optionally be differential */ bool in1_diff; bool in2_diff; + bool lout1_diff; + bool lout2_diff; + bool lout3_diff; + /* DMIC2 clock source selection */ + enum rt5677_dmic2_clk dmic2_clk_pin; }; #endif diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h index aac04ff84eea..3a4d7da67b8d 100644 --- a/include/sound/soc-dapm.h +++ b/include/sound/soc-dapm.h @@ -432,6 +432,7 @@ int snd_soc_dapm_force_enable_pin_unlocked(struct snd_soc_dapm_context *dapm, int snd_soc_dapm_ignore_suspend(struct snd_soc_dapm_context *dapm, const char *pin); void snd_soc_dapm_auto_nc_pins(struct snd_soc_card *card); +unsigned int dapm_kcontrol_get_value(const struct snd_kcontrol *kcontrol); /* Mostly internal - should not normally be used */ void dapm_mark_io_dirty(struct snd_soc_dapm_context *dapm); @@ -587,13 +588,13 @@ struct snd_soc_dapm_context { enum snd_soc_bias_level suspend_bias_level; struct delayed_work delayed_work; unsigned int idle_bias_off:1; /* Use BIAS_OFF instead of STANDBY */ - + /* Go to BIAS_OFF in suspend if the DAPM context is idle */ + unsigned int suspend_bias_off:1; void (*seq_notifier)(struct snd_soc_dapm_context *, enum snd_soc_dapm_type, int); struct device *dev; /* from parent - for debug */ struct snd_soc_component *component; /* parent component */ - struct snd_soc_codec *codec; /* parent codec */ struct snd_soc_card *card; /* parent card */ /* used during DAPM updates */ diff --git a/include/sound/soc.h b/include/sound/soc.h index c83a334dd00f..7ba7130037a0 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -690,6 +690,17 @@ struct snd_soc_compr_ops { struct snd_soc_component_driver { const char *name; + /* Default control and setup, added after probe() is run */ + const struct snd_kcontrol_new *controls; + unsigned int num_controls; + const struct snd_soc_dapm_widget *dapm_widgets; + unsigned int num_dapm_widgets; + const struct snd_soc_dapm_route *dapm_routes; + unsigned int num_dapm_routes; + + int (*probe)(struct snd_soc_component *); + void (*remove)(struct snd_soc_component *); + /* DT */ int (*of_xlate_dai_name)(struct snd_soc_component *component, struct of_phandle_args *args, @@ -697,6 +708,10 @@ struct snd_soc_component_driver { void (*seq_notifier)(struct snd_soc_component *, enum snd_soc_dapm_type, int subseq); int (*stream_event)(struct snd_soc_component *, int event); + + /* probe ordering - for components with runtime dependencies */ + int probe_order; + int remove_order; }; struct snd_soc_component { @@ -710,6 +725,7 @@ struct snd_soc_component { unsigned int ignore_pmdown_time:1; /* pmdown_time is ignored at stop */ unsigned int registered_as_component:1; + unsigned int probed:1; struct list_head list; @@ -728,9 +744,35 @@ struct snd_soc_component { struct mutex io_mutex; +#ifdef CONFIG_DEBUG_FS + struct dentry *debugfs_root; +#endif + + /* + * DO NOT use any of the fields below in drivers, they are temporary and + * are going to be removed again soon. If you use them in driver code the + * driver will be marked as BROKEN when these fields are removed. + */ + /* Don't use these, use snd_soc_component_get_dapm() */ struct snd_soc_dapm_context dapm; struct snd_soc_dapm_context *dapm_ptr; + + const struct snd_kcontrol_new *controls; + unsigned int num_controls; + const struct snd_soc_dapm_widget *dapm_widgets; + unsigned int num_dapm_widgets; + const struct snd_soc_dapm_route *dapm_routes; + unsigned int num_dapm_routes; + struct snd_soc_codec *codec; + + int (*probe)(struct snd_soc_component *); + void (*remove)(struct snd_soc_component *); + +#ifdef CONFIG_DEBUG_FS + void (*init_debugfs)(struct snd_soc_component *component); + const char *debugfs_prefix; +#endif }; /* SoC Audio Codec device */ @@ -746,11 +788,9 @@ struct snd_soc_codec { struct snd_ac97 *ac97; /* for ad-hoc ac97 devices */ unsigned int cache_bypass:1; /* Suppress access to the cache */ unsigned int suspended:1; /* Codec is in suspend PM state */ - unsigned int probed:1; /* Codec has been probed */ unsigned int ac97_registered:1; /* Codec has been AC97 registered */ unsigned int ac97_created:1; /* Codec has been created by SoC */ unsigned int cache_init:1; /* codec cache has been initialized */ - u32 cache_only; /* Suppress writes to hardware */ u32 cache_sync; /* Cache needs to be synced to hardware */ /* codec IO */ @@ -766,7 +806,6 @@ struct snd_soc_codec { struct snd_soc_dapm_context dapm; #ifdef CONFIG_DEBUG_FS - struct dentry *debugfs_codec_root; struct dentry *debugfs_reg; #endif }; @@ -808,15 +847,12 @@ struct snd_soc_codec_driver { int (*set_bias_level)(struct snd_soc_codec *, enum snd_soc_bias_level level); bool idle_bias_off; + bool suspend_bias_off; void (*seq_notifier)(struct snd_soc_dapm_context *, enum snd_soc_dapm_type, int); bool ignore_pmdown_time; /* Doesn't benefit from pmdown delay */ - - /* probe ordering - for components with runtime dependencies */ - int probe_order; - int remove_order; }; /* SoC platform interface */ @@ -832,14 +868,6 @@ struct snd_soc_platform_driver { int (*pcm_new)(struct snd_soc_pcm_runtime *); void (*pcm_free)(struct snd_pcm *); - /* Default control and setup, added after probe() is run */ - const struct snd_kcontrol_new *controls; - int num_controls; - const struct snd_soc_dapm_widget *dapm_widgets; - int num_dapm_widgets; - const struct snd_soc_dapm_route *dapm_routes; - int num_dapm_routes; - /* * For platform caused delay reporting. * Optional. @@ -853,13 +881,6 @@ struct snd_soc_platform_driver { /* platform stream compress ops */ const struct snd_compr_ops *compr_ops; - /* probe ordering - for components with runtime dependencies */ - int probe_order; - int remove_order; - - /* platform IO - used for platform DAPM */ - unsigned int (*read)(struct snd_soc_platform *, unsigned int); - int (*write)(struct snd_soc_platform *, unsigned int, unsigned int); int (*bespoke_trigger)(struct snd_pcm_substream *, int); }; @@ -874,15 +895,10 @@ struct snd_soc_platform { const struct snd_soc_platform_driver *driver; unsigned int suspended:1; /* platform is suspended */ - unsigned int probed:1; struct list_head list; struct snd_soc_component component; - -#ifdef CONFIG_DEBUG_FS - struct dentry *debugfs_platform_root; -#endif }; struct snd_soc_dai_link { @@ -897,7 +913,7 @@ struct snd_soc_dai_link { * only for codec to codec links, or systems using device tree. */ const char *cpu_name; - const struct device_node *cpu_of_node; + struct device_node *cpu_of_node; /* * You MAY specify the DAI name of the CPU DAI. If this information is * omitted, the CPU-side DAI is matched using .cpu_name/.cpu_of_node @@ -909,7 +925,7 @@ struct snd_soc_dai_link { * DT/OF node, but not both. */ const char *codec_name; - const struct device_node *codec_of_node; + struct device_node *codec_of_node; /* You MUST specify the DAI name within the codec */ const char *codec_dai_name; @@ -922,7 +938,7 @@ struct snd_soc_dai_link { * do not need a platform. */ const char *platform_name; - const struct device_node *platform_of_node; + struct device_node *platform_of_node; int be_id; /* optional ID for machine driver BE identification */ const struct snd_soc_pcm_stream *params; @@ -994,7 +1010,7 @@ struct snd_soc_aux_dev { const struct device_node *codec_of_node; /* codec/machine specific init - e.g. add machine controls */ - int (*init)(struct snd_soc_dapm_context *dapm); + int (*init)(struct snd_soc_component *component); }; /* SoC card */ @@ -1112,6 +1128,7 @@ struct snd_soc_pcm_runtime { struct snd_soc_platform *platform; struct snd_soc_dai *codec_dai; struct snd_soc_dai *cpu_dai; + struct snd_soc_component *component; /* Only valid for AUX dev rtds */ struct snd_soc_dai **codec_dais; unsigned int num_codecs; @@ -1260,9 +1277,6 @@ void snd_soc_component_async_complete(struct snd_soc_component *component); int snd_soc_component_test_bits(struct snd_soc_component *component, unsigned int reg, unsigned int mask, unsigned int value); -int snd_soc_component_init_io(struct snd_soc_component *component, - struct regmap *regmap); - /* device driver data */ static inline void snd_soc_card_set_drvdata(struct snd_soc_card *card, @@ -1276,26 +1290,37 @@ static inline void *snd_soc_card_get_drvdata(struct snd_soc_card *card) return card->drvdata; } +static inline void snd_soc_component_set_drvdata(struct snd_soc_component *c, + void *data) +{ + dev_set_drvdata(c->dev, data); +} + +static inline void *snd_soc_component_get_drvdata(struct snd_soc_component *c) +{ + return dev_get_drvdata(c->dev); +} + static inline void snd_soc_codec_set_drvdata(struct snd_soc_codec *codec, void *data) { - dev_set_drvdata(codec->dev, data); + snd_soc_component_set_drvdata(&codec->component, data); } static inline void *snd_soc_codec_get_drvdata(struct snd_soc_codec *codec) { - return dev_get_drvdata(codec->dev); + return snd_soc_component_get_drvdata(&codec->component); } static inline void snd_soc_platform_set_drvdata(struct snd_soc_platform *platform, void *data) { - dev_set_drvdata(platform->dev, data); + snd_soc_component_set_drvdata(&platform->component, data); } static inline void *snd_soc_platform_get_drvdata(struct snd_soc_platform *platform) { - return dev_get_drvdata(platform->dev); + return snd_soc_component_get_drvdata(&platform->component); } static inline void snd_soc_pcm_set_drvdata(struct snd_soc_pcm_runtime *rtd, diff --git a/include/sound/vx_core.h b/include/sound/vx_core.h index f634f8f85db5..cae9c9d4ef22 100644 --- a/include/sound/vx_core.h +++ b/include/sound/vx_core.h @@ -80,8 +80,6 @@ struct vx_pipe { unsigned int references; /* an output pipe may be used for monitoring and/or playback */ struct vx_pipe *monitoring_pipe; /* pointer to the monitoring pipe (capture pipe only)*/ - - struct tasklet_struct start_tq; }; struct vx_core; @@ -165,9 +163,7 @@ struct vx_core { struct snd_vx_hardware *hw; struct snd_vx_ops *ops; - spinlock_t lock; - spinlock_t irq_lock; - struct tasklet_struct tq; + struct mutex lock; unsigned int chip_status; unsigned int pcm_running; @@ -223,6 +219,7 @@ void snd_vx_free_firmware(struct vx_core *chip); * interrupt handler; exported for pcmcia */ irqreturn_t snd_vx_irq_handler(int irq, void *dev); +irqreturn_t snd_vx_threaded_irq_handler(int irq, void *dev); /* * lowlevel functions diff --git a/include/trace/events/asoc.h b/include/trace/events/asoc.h index 0194a641e4e2..b04ee7e5a466 100644 --- a/include/trace/events/asoc.h +++ b/include/trace/events/asoc.h @@ -175,7 +175,7 @@ TRACE_EVENT(snd_soc_dapm_output_path, __entry->path_sink = (long)path->sink; ), - TP_printk("%c%s -> %s -> %s\n", + TP_printk("%c%s -> %s -> %s", (int) __entry->path_sink && (int) __entry->path_connect ? '*' : ' ', __get_str(wname), __get_str(pname), __get_str(psname)) @@ -204,7 +204,7 @@ TRACE_EVENT(snd_soc_dapm_input_path, __entry->path_source = (long)path->source; ), - TP_printk("%c%s <- %s <- %s\n", + TP_printk("%c%s <- %s <- %s", (int) __entry->path_source && (int) __entry->path_connect ? '*' : ' ', __get_str(wname), __get_str(pname), __get_str(psname)) @@ -226,7 +226,7 @@ TRACE_EVENT(snd_soc_dapm_connected, __entry->stream = stream; ), - TP_printk("%s: found %d paths\n", + TP_printk("%s: found %d paths", __entry->stream ? "capture" : "playback", __entry->paths) ); diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 4ee4e30d26d9..1faecea101f3 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -23,6 +23,7 @@ struct map_lookup; struct extent_buffer; struct btrfs_work; struct __btrfs_workqueue; +struct btrfs_qgroup_operation; #define show_ref_type(type) \ __print_symbolic(type, \ @@ -157,12 +158,13 @@ DEFINE_EVENT(btrfs__inode, btrfs_inode_evict, #define show_map_flags(flag) \ __print_flags(flag, "|", \ - { EXTENT_FLAG_PINNED, "PINNED" }, \ - { EXTENT_FLAG_COMPRESSED, "COMPRESSED" }, \ - { EXTENT_FLAG_VACANCY, "VACANCY" }, \ - { EXTENT_FLAG_PREALLOC, "PREALLOC" }, \ - { EXTENT_FLAG_LOGGING, "LOGGING" }, \ - { EXTENT_FLAG_FILLING, "FILLING" }) + { (1 << EXTENT_FLAG_PINNED), "PINNED" },\ + { (1 << EXTENT_FLAG_COMPRESSED), "COMPRESSED" },\ + { (1 << EXTENT_FLAG_VACANCY), "VACANCY" },\ + { (1 << EXTENT_FLAG_PREALLOC), "PREALLOC" },\ + { (1 << EXTENT_FLAG_LOGGING), "LOGGING" },\ + { (1 << EXTENT_FLAG_FILLING), "FILLING" },\ + { (1 << EXTENT_FLAG_FS_MAPPING), "FS_MAPPING" }) TRACE_EVENT_CONDITION(btrfs_get_extent, @@ -996,6 +998,7 @@ DECLARE_EVENT_CLASS(btrfs__work, __field( void *, func ) __field( void *, ordered_func ) __field( void *, ordered_free ) + __field( void *, normal_work ) ), TP_fast_assign( @@ -1004,11 +1007,13 @@ DECLARE_EVENT_CLASS(btrfs__work, __entry->func = work->func; __entry->ordered_func = work->ordered_func; __entry->ordered_free = work->ordered_free; + __entry->normal_work = &work->normal_work; ), - TP_printk("work=%p, wq=%p, func=%p, ordered_func=%p, ordered_free=%p", - __entry->work, __entry->wq, __entry->func, - __entry->ordered_func, __entry->ordered_free) + TP_printk("work=%p (normal_work=%p), wq=%p, func=%pf, ordered_func=%p," + " ordered_free=%p", + __entry->work, __entry->normal_work, __entry->wq, + __entry->func, __entry->ordered_func, __entry->ordered_free) ); /* For situiations that the work is freed */ @@ -1043,13 +1048,6 @@ DEFINE_EVENT(btrfs__work, btrfs_work_sched, TP_ARGS(work) ); -DEFINE_EVENT(btrfs__work, btrfs_normal_work_done, - - TP_PROTO(struct btrfs_work *work), - - TP_ARGS(work) -); - DEFINE_EVENT(btrfs__work__done, btrfs_all_work_done, TP_PROTO(struct btrfs_work *work), @@ -1119,6 +1117,61 @@ DEFINE_EVENT(btrfs__workqueue_done, btrfs_workqueue_destroy, TP_ARGS(wq) ); +#define show_oper_type(type) \ + __print_symbolic(type, \ + { BTRFS_QGROUP_OPER_ADD_EXCL, "OPER_ADD_EXCL" }, \ + { BTRFS_QGROUP_OPER_ADD_SHARED, "OPER_ADD_SHARED" }, \ + { BTRFS_QGROUP_OPER_SUB_EXCL, "OPER_SUB_EXCL" }, \ + { BTRFS_QGROUP_OPER_SUB_SHARED, "OPER_SUB_SHARED" }) + +DECLARE_EVENT_CLASS(btrfs_qgroup_oper, + + TP_PROTO(struct btrfs_qgroup_operation *oper), + + TP_ARGS(oper), + + TP_STRUCT__entry( + __field( u64, ref_root ) + __field( u64, bytenr ) + __field( u64, num_bytes ) + __field( u64, seq ) + __field( int, type ) + __field( u64, elem_seq ) + ), + + TP_fast_assign( + __entry->ref_root = oper->ref_root; + __entry->bytenr = oper->bytenr, + __entry->num_bytes = oper->num_bytes; + __entry->seq = oper->seq; + __entry->type = oper->type; + __entry->elem_seq = oper->elem.seq; + ), + + TP_printk("ref_root = %llu, bytenr = %llu, num_bytes = %llu, " + "seq = %llu, elem.seq = %llu, type = %s", + (unsigned long long)__entry->ref_root, + (unsigned long long)__entry->bytenr, + (unsigned long long)__entry->num_bytes, + (unsigned long long)__entry->seq, + (unsigned long long)__entry->elem_seq, + show_oper_type(__entry->type)) +); + +DEFINE_EVENT(btrfs_qgroup_oper, btrfs_qgroup_account, + + TP_PROTO(struct btrfs_qgroup_operation *oper), + + TP_ARGS(oper) +); + +DEFINE_EVENT(btrfs_qgroup_oper, btrfs_qgroup_record_ref, + + TP_PROTO(struct btrfs_qgroup_operation *oper), + + TP_ARGS(oper) +); + #endif /* _TRACE_BTRFS_H */ /* This part must be outside protection */ diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index d06d44363fea..bbc4de9baef7 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -69,6 +69,12 @@ { GC_GREEDY, "Greedy" }, \ { GC_CB, "Cost-Benefit" }) +#define show_cpreason(type) \ + __print_symbolic(type, \ + { CP_UMOUNT, "Umount" }, \ + { CP_SYNC, "Sync" }, \ + { CP_DISCARD, "Discard" }) + struct victim_sel_policy; DECLARE_EVENT_CLASS(f2fs__inode, @@ -944,25 +950,25 @@ TRACE_EVENT(f2fs_submit_page_mbio, TRACE_EVENT(f2fs_write_checkpoint, - TP_PROTO(struct super_block *sb, bool is_umount, char *msg), + TP_PROTO(struct super_block *sb, int reason, char *msg), - TP_ARGS(sb, is_umount, msg), + TP_ARGS(sb, reason, msg), TP_STRUCT__entry( __field(dev_t, dev) - __field(bool, is_umount) + __field(int, reason) __field(char *, msg) ), TP_fast_assign( __entry->dev = sb->s_dev; - __entry->is_umount = is_umount; + __entry->reason = reason; __entry->msg = msg; ), TP_printk("dev = (%d,%d), checkpoint for %s, state = %s", show_dev(__entry), - __entry->is_umount ? "clean umount" : "consistency", + show_cpreason(__entry->reason), __entry->msg) ); diff --git a/include/trace/events/filelock.h b/include/trace/events/filelock.h index 59d11c22f076..a0d008070962 100644 --- a/include/trace/events/filelock.h +++ b/include/trace/events/filelock.h @@ -53,15 +53,15 @@ DECLARE_EVENT_CLASS(filelock_lease, ), TP_fast_assign( - __entry->fl = fl; + __entry->fl = fl ? fl : NULL; __entry->s_dev = inode->i_sb->s_dev; __entry->i_ino = inode->i_ino; - __entry->fl_next = fl->fl_next; - __entry->fl_owner = fl->fl_owner; - __entry->fl_flags = fl->fl_flags; - __entry->fl_type = fl->fl_type; - __entry->fl_break_time = fl->fl_break_time; - __entry->fl_downgrade_time = fl->fl_downgrade_time; + __entry->fl_next = fl ? fl->fl_next : NULL; + __entry->fl_owner = fl ? fl->fl_owner : NULL; + __entry->fl_flags = fl ? fl->fl_flags : 0; + __entry->fl_type = fl ? fl->fl_type : 0; + __entry->fl_break_time = fl ? fl->fl_break_time : 0; + __entry->fl_downgrade_time = fl ? fl->fl_downgrade_time : 0; ), TP_printk("fl=0x%p dev=0x%x:0x%x ino=0x%lx fl_next=0x%p fl_owner=0x%p fl_flags=%s fl_type=%s fl_break_time=%lu fl_downgrade_time=%lu", diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h index 1c09820df585..3608bebd3d9c 100644 --- a/include/trace/events/irq.h +++ b/include/trace/events/irq.h @@ -107,7 +107,7 @@ DECLARE_EVENT_CLASS(softirq, * @vec_nr: softirq vector number * * When used in combination with the softirq_exit tracepoint - * we can determine the softirq handler runtine. + * we can determine the softirq handler routine. */ DEFINE_EVENT(softirq, softirq_entry, @@ -121,7 +121,7 @@ DEFINE_EVENT(softirq, softirq_entry, * @vec_nr: softirq vector number * * When used in combination with the softirq_entry tracepoint - * we can determine the softirq handler runtine. + * we can determine the softirq handler routine. */ DEFINE_EVENT(softirq, softirq_exit, diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h index 908925ace776..6edf1f2028cd 100644 --- a/include/trace/events/kvm.h +++ b/include/trace/events/kvm.h @@ -95,6 +95,26 @@ TRACE_EVENT(kvm_ioapic_set_irq, __entry->coalesced ? " (coalesced)" : "") ); +TRACE_EVENT(kvm_ioapic_delayed_eoi_inj, + TP_PROTO(__u64 e), + TP_ARGS(e), + + TP_STRUCT__entry( + __field( __u64, e ) + ), + + TP_fast_assign( + __entry->e = e; + ), + + TP_printk("dst %x vec=%u (%s|%s|%s%s)", + (u8)(__entry->e >> 56), (u8)__entry->e, + __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode), + (__entry->e & (1<<11)) ? "logical" : "physical", + (__entry->e & (1<<15)) ? "level" : "edge", + (__entry->e & (1<<16)) ? "|masked" : "") +); + TRACE_EVENT(kvm_msi_set_irq, TP_PROTO(__u64 address, __u64 data), TP_ARGS(address, data), @@ -205,24 +225,26 @@ TRACE_EVENT(kvm_fpu, ); TRACE_EVENT(kvm_age_page, - TP_PROTO(ulong hva, struct kvm_memory_slot *slot, int ref), - TP_ARGS(hva, slot, ref), + TP_PROTO(ulong gfn, int level, struct kvm_memory_slot *slot, int ref), + TP_ARGS(gfn, level, slot, ref), TP_STRUCT__entry( __field( u64, hva ) __field( u64, gfn ) + __field( u8, level ) __field( u8, referenced ) ), TP_fast_assign( - __entry->hva = hva; - __entry->gfn = - slot->base_gfn + ((hva - slot->userspace_addr) >> PAGE_SHIFT); + __entry->gfn = gfn; + __entry->level = level; + __entry->hva = ((gfn - slot->base_gfn) << + PAGE_SHIFT) + slot->userspace_addr; __entry->referenced = ref; ), - TP_printk("hva %llx gfn %llx %s", - __entry->hva, __entry->gfn, + TP_printk("hva %llx gfn %llx level %u %s", + __entry->hva, __entry->gfn, __entry->level, __entry->referenced ? "YOUNG" : "OLD") ); diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h index aca382266411..9b56f37148cf 100644 --- a/include/trace/events/rcu.h +++ b/include/trace/events/rcu.h @@ -180,9 +180,12 @@ TRACE_EVENT(rcu_grace_period_init, * argument is a string as follows: * * "WakeEmpty": Wake rcuo kthread, first CB to empty list. + * "WakeEmptyIsDeferred": Wake rcuo kthread later, first CB to empty list. * "WakeOvf": Wake rcuo kthread, CB list is huge. + * "WakeOvfIsDeferred": Wake rcuo kthread later, CB list is huge. * "WakeNot": Don't wake rcuo kthread. * "WakeNotPoll": Don't wake rcuo kthread because it is polling. + * "DeferredWake": Carried out the "IsDeferred" wakeup. * "Poll": Start of new polling cycle for rcu_nocb_poll. * "Sleep": Sleep waiting for CBs for !rcu_nocb_poll. * "WokeEmpty": rcuo kthread woke to find empty list. diff --git a/include/uapi/Kbuild b/include/uapi/Kbuild index 81d2106287fe..245aa6e05e6a 100644 --- a/include/uapi/Kbuild +++ b/include/uapi/Kbuild @@ -12,3 +12,4 @@ header-y += video/ header-y += drm/ header-y += xen/ header-y += scsi/ +header-y += misc/ diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index 11d11bc5c78f..22749c134117 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -705,9 +705,11 @@ __SYSCALL(__NR_seccomp, sys_seccomp) __SYSCALL(__NR_getrandom, sys_getrandom) #define __NR_memfd_create 279 __SYSCALL(__NR_memfd_create, sys_memfd_create) +#define __NR_bpf 280 +__SYSCALL(__NR_bpf, sys_bpf) #undef __NR_syscalls -#define __NR_syscalls 280 +#define __NR_syscalls 281 /* * All syscalls below here should go away really, diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index 24e9033f8b3f..3cc8e1c2b996 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild @@ -67,6 +67,7 @@ header-y += bfs_fs.h header-y += binfmts.h header-y += blkpg.h header-y += blktrace_api.h +header-y += bpf.h header-y += bpqether.h header-y += bsg.h header-y += btrfs.h @@ -240,6 +241,7 @@ header-y += matroxfb.h header-y += mdio.h header-y += media.h header-y += mei.h +header-y += memfd.h header-y += mempolicy.h header-y += meye.h header-y += mic_common.h @@ -353,6 +355,7 @@ header-y += serio.h header-y += shm.h header-y += signal.h header-y += signalfd.h +header-y += smiapp.h header-y += snmp.h header-y += sock_diag.h header-y += socket.h @@ -395,6 +398,7 @@ header-y += un.h header-y += unistd.h header-y += unix_diag.h header-y += usbdevice_fs.h +header-y += usbip.h header-y += utime.h header-y += utsname.h header-y += uuid.h diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h new file mode 100644 index 000000000000..31b0ac208a52 --- /dev/null +++ b/include/uapi/linux/bpf.h @@ -0,0 +1,155 @@ +/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ +#ifndef _UAPI__LINUX_BPF_H__ +#define _UAPI__LINUX_BPF_H__ + +#include <linux/types.h> + +/* Extended instruction set based on top of classic BPF */ + +/* instruction classes */ +#define BPF_ALU64 0x07 /* alu mode in double word width */ + +/* ld/ldx fields */ +#define BPF_DW 0x18 /* double word */ +#define BPF_XADD 0xc0 /* exclusive add */ + +/* alu/jmp fields */ +#define BPF_MOV 0xb0 /* mov reg to reg */ +#define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */ + +/* change endianness of a register */ +#define BPF_END 0xd0 /* flags for endianness conversion: */ +#define BPF_TO_LE 0x00 /* convert to little-endian */ +#define BPF_TO_BE 0x08 /* convert to big-endian */ +#define BPF_FROM_LE BPF_TO_LE +#define BPF_FROM_BE BPF_TO_BE + +#define BPF_JNE 0x50 /* jump != */ +#define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */ +#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */ +#define BPF_CALL 0x80 /* function call */ +#define BPF_EXIT 0x90 /* function return */ + +/* Register numbers */ +enum { + BPF_REG_0 = 0, + BPF_REG_1, + BPF_REG_2, + BPF_REG_3, + BPF_REG_4, + BPF_REG_5, + BPF_REG_6, + BPF_REG_7, + BPF_REG_8, + BPF_REG_9, + BPF_REG_10, + __MAX_BPF_REG, +}; + +/* BPF has 10 general purpose 64-bit registers and stack frame. */ +#define MAX_BPF_REG __MAX_BPF_REG + +struct bpf_insn { + __u8 code; /* opcode */ + __u8 dst_reg:4; /* dest register */ + __u8 src_reg:4; /* source register */ + __s16 off; /* signed offset */ + __s32 imm; /* signed immediate constant */ +}; + +/* BPF syscall commands */ +enum bpf_cmd { + /* create a map with given type and attributes + * fd = bpf(BPF_MAP_CREATE, union bpf_attr *, u32 size) + * returns fd or negative error + * map is deleted when fd is closed + */ + BPF_MAP_CREATE, + + /* lookup key in a given map + * err = bpf(BPF_MAP_LOOKUP_ELEM, union bpf_attr *attr, u32 size) + * Using attr->map_fd, attr->key, attr->value + * returns zero and stores found elem into value + * or negative error + */ + BPF_MAP_LOOKUP_ELEM, + + /* create or update key/value pair in a given map + * err = bpf(BPF_MAP_UPDATE_ELEM, union bpf_attr *attr, u32 size) + * Using attr->map_fd, attr->key, attr->value + * returns zero or negative error + */ + BPF_MAP_UPDATE_ELEM, + + /* find and delete elem by key in a given map + * err = bpf(BPF_MAP_DELETE_ELEM, union bpf_attr *attr, u32 size) + * Using attr->map_fd, attr->key + * returns zero or negative error + */ + BPF_MAP_DELETE_ELEM, + + /* lookup key in a given map and return next key + * err = bpf(BPF_MAP_GET_NEXT_KEY, union bpf_attr *attr, u32 size) + * Using attr->map_fd, attr->key, attr->next_key + * returns zero and stores next key or negative error + */ + BPF_MAP_GET_NEXT_KEY, + + /* verify and load eBPF program + * prog_fd = bpf(BPF_PROG_LOAD, union bpf_attr *attr, u32 size) + * Using attr->prog_type, attr->insns, attr->license + * returns fd or negative error + */ + BPF_PROG_LOAD, +}; + +enum bpf_map_type { + BPF_MAP_TYPE_UNSPEC, +}; + +enum bpf_prog_type { + BPF_PROG_TYPE_UNSPEC, +}; + +union bpf_attr { + struct { /* anonymous struct used by BPF_MAP_CREATE command */ + __u32 map_type; /* one of enum bpf_map_type */ + __u32 key_size; /* size of key in bytes */ + __u32 value_size; /* size of value in bytes */ + __u32 max_entries; /* max number of entries in a map */ + }; + + struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ + __u32 map_fd; + __aligned_u64 key; + union { + __aligned_u64 value; + __aligned_u64 next_key; + }; + }; + + struct { /* anonymous struct used by BPF_PROG_LOAD command */ + __u32 prog_type; /* one of enum bpf_prog_type */ + __u32 insn_cnt; + __aligned_u64 insns; + __aligned_u64 license; + __u32 log_level; /* verbosity level of verifier */ + __u32 log_size; /* size of user buffer */ + __aligned_u64 log_buf; /* user supplied buffer */ + }; +} __attribute__((aligned(8))); + +/* integer value in 'imm' field of BPF_CALL instruction selects which helper + * function eBPF program intends to call + */ +enum bpf_func_id { + BPF_FUNC_unspec, + __BPF_FUNC_MAX_ID, +}; + +#endif /* _UAPI__LINUX_BPF_H__ */ diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index e3c7a719c76b..99b43056a6fe 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -209,6 +209,33 @@ struct ethtool_value { __u32 data; }; +enum tunable_id { + ETHTOOL_ID_UNSPEC, + ETHTOOL_RX_COPYBREAK, + ETHTOOL_TX_COPYBREAK, +}; + +enum tunable_type_id { + ETHTOOL_TUNABLE_UNSPEC, + ETHTOOL_TUNABLE_U8, + ETHTOOL_TUNABLE_U16, + ETHTOOL_TUNABLE_U32, + ETHTOOL_TUNABLE_U64, + ETHTOOL_TUNABLE_STRING, + ETHTOOL_TUNABLE_S8, + ETHTOOL_TUNABLE_S16, + ETHTOOL_TUNABLE_S32, + ETHTOOL_TUNABLE_S64, +}; + +struct ethtool_tunable { + __u32 cmd; + __u32 id; + __u32 type_id; + __u32 len; + void *data[0]; +}; + /** * struct ethtool_regs - hardware register dump * @cmd: Command number = %ETHTOOL_GREGS @@ -1152,6 +1179,8 @@ enum ethtool_sfeatures_retval_bits { #define ETHTOOL_GRSSH 0x00000046 /* Get RX flow hash configuration */ #define ETHTOOL_SRSSH 0x00000047 /* Set RX flow hash configuration */ +#define ETHTOOL_GTUNABLE 0x00000048 /* Get tunable configuration */ +#define ETHTOOL_STUNABLE 0x00000049 /* Set tunable configuration */ /* compatibility with older code */ #define SPARC_ETH_GSET ETHTOOL_GSET diff --git a/include/uapi/linux/fou.h b/include/uapi/linux/fou.h new file mode 100644 index 000000000000..8df06894da23 --- /dev/null +++ b/include/uapi/linux/fou.h @@ -0,0 +1,39 @@ +/* fou.h - FOU Interface */ + +#ifndef _UAPI_LINUX_FOU_H +#define _UAPI_LINUX_FOU_H + +/* NETLINK_GENERIC related info + */ +#define FOU_GENL_NAME "fou" +#define FOU_GENL_VERSION 0x1 + +enum { + FOU_ATTR_UNSPEC, + FOU_ATTR_PORT, /* u16 */ + FOU_ATTR_AF, /* u8 */ + FOU_ATTR_IPPROTO, /* u8 */ + FOU_ATTR_TYPE, /* u8 */ + + __FOU_ATTR_MAX, +}; + +#define FOU_ATTR_MAX (__FOU_ATTR_MAX - 1) + +enum { + FOU_CMD_UNSPEC, + FOU_CMD_ADD, + FOU_CMD_DEL, + + __FOU_CMD_MAX, +}; + +enum { + FOU_ENCAP_UNSPEC, + FOU_ENCAP_DIRECT, + FOU_ENCAP_GUE, +}; + +#define FOU_CMD_MAX (__FOU_CMD_MAX - 1) + +#endif /* _UAPI_LINUX_FOU_H */ diff --git a/include/uapi/linux/genwqe/genwqe_card.h b/include/uapi/linux/genwqe/genwqe_card.h index 4fc065f29255..baa93fb4cd4f 100644 --- a/include/uapi/linux/genwqe/genwqe_card.h +++ b/include/uapi/linux/genwqe/genwqe_card.h @@ -8,7 +8,7 @@ * * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> - * Author: Michael Jung <mijung@de.ibm.com> + * Author: Michael Jung <mijung@gmx.net> * Author: Michael Ruettger <michael@ibmra.de> * * This program is free software; you can redistribute it and/or modify diff --git a/include/uapi/linux/hyperv.h b/include/uapi/linux/hyperv.h index 78e4a86030dd..0a8e6badb29b 100644 --- a/include/uapi/linux/hyperv.h +++ b/include/uapi/linux/hyperv.h @@ -137,7 +137,7 @@ struct hv_do_fcopy { __u64 offset; __u32 size; __u8 data[DATA_FRAGMENT]; -}; +} __attribute__((packed)); /* * An implementation of HyperV key value pair (KVP) functionality for Linux. diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h index 0f8210b8e0bc..aa63ed023c2b 100644 --- a/include/uapi/linux/if_ether.h +++ b/include/uapi/linux/if_ether.h @@ -128,6 +128,7 @@ #define ETH_P_PHONET 0x00F5 /* Nokia Phonet frames */ #define ETH_P_IEEE802154 0x00F6 /* IEEE802.15.4 frame */ #define ETH_P_CAIF 0x00F7 /* ST-Ericsson CAIF protocol */ +#define ETH_P_XDSA 0x00F8 /* Multiplexed DSA protocol */ /* * This is an Ethernet frame header. diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index ff957604a721..0bdb77e16875 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -215,6 +215,18 @@ enum in6_addr_gen_mode { IN6_ADDR_GEN_MODE_NONE, }; +/* Bridge section */ + +enum { + IFLA_BR_UNSPEC, + IFLA_BR_FORWARD_DELAY, + IFLA_BR_HELLO_TIME, + IFLA_BR_MAX_AGE, + __IFLA_BR_MAX, +}; + +#define IFLA_BR_MAX (__IFLA_BR_MAX - 1) + enum { BRIDGE_MODE_UNSPEC, BRIDGE_MODE_HAIRPIN, @@ -291,6 +303,10 @@ enum { IFLA_MACVLAN_UNSPEC, IFLA_MACVLAN_MODE, IFLA_MACVLAN_FLAGS, + IFLA_MACVLAN_MACADDR_MODE, + IFLA_MACVLAN_MACADDR, + IFLA_MACVLAN_MACADDR_DATA, + IFLA_MACVLAN_MACADDR_COUNT, __IFLA_MACVLAN_MAX, }; @@ -301,6 +317,14 @@ enum macvlan_mode { MACVLAN_MODE_VEPA = 2, /* talk to other ports through ext bridge */ MACVLAN_MODE_BRIDGE = 4, /* talk to bridge ports directly */ MACVLAN_MODE_PASSTHRU = 8,/* take over the underlying device */ + MACVLAN_MODE_SOURCE = 16,/* use source MAC address list to assign */ +}; + +enum macvlan_macaddr_mode { + MACVLAN_MACADDR_ADD, + MACVLAN_MACADDR_DEL, + MACVLAN_MACADDR_FLUSH, + MACVLAN_MACADDR_SET, }; #define MACVLAN_FLAG_NOPROMISC 1 diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h index 3bce9e9d9f7c..280d9e092283 100644 --- a/include/uapi/linux/if_tunnel.h +++ b/include/uapi/linux/if_tunnel.h @@ -53,10 +53,23 @@ enum { IFLA_IPTUN_6RD_RELAY_PREFIX, IFLA_IPTUN_6RD_PREFIXLEN, IFLA_IPTUN_6RD_RELAY_PREFIXLEN, + IFLA_IPTUN_ENCAP_TYPE, + IFLA_IPTUN_ENCAP_FLAGS, + IFLA_IPTUN_ENCAP_SPORT, + IFLA_IPTUN_ENCAP_DPORT, __IFLA_IPTUN_MAX, }; #define IFLA_IPTUN_MAX (__IFLA_IPTUN_MAX - 1) +enum tunnel_encap_types { + TUNNEL_ENCAP_NONE, + TUNNEL_ENCAP_FOU, + TUNNEL_ENCAP_GUE, +}; + +#define TUNNEL_ENCAP_FLAG_CSUM (1<<0) +#define TUNNEL_ENCAP_FLAG_CSUM6 (1<<1) + /* SIT-mode i_flags */ #define SIT_ISATAP 0x0001 @@ -94,6 +107,10 @@ enum { IFLA_GRE_ENCAP_LIMIT, IFLA_GRE_FLOWINFO, IFLA_GRE_FLAGS, + IFLA_GRE_ENCAP_TYPE, + IFLA_GRE_ENCAP_FLAGS, + IFLA_GRE_ENCAP_SPORT, + IFLA_GRE_ENCAP_DPORT, __IFLA_GRE_MAX, }; diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h index bbde90fa5838..d65c0a09efd3 100644 --- a/include/uapi/linux/inet_diag.h +++ b/include/uapi/linux/inet_diag.h @@ -110,10 +110,10 @@ enum { INET_DIAG_TCLASS, INET_DIAG_SKMEMINFO, INET_DIAG_SHUTDOWN, + INET_DIAG_DCTCPINFO, }; -#define INET_DIAG_MAX INET_DIAG_SHUTDOWN - +#define INET_DIAG_MAX INET_DIAG_DCTCPINFO /* INET_DIAG_MEM */ @@ -133,5 +133,14 @@ struct tcpvegas_info { __u32 tcpv_minrtt; }; +/* INET_DIAG_DCTCPINFO */ + +struct tcp_dctcp_info { + __u16 dctcp_enabled; + __u16 dctcp_ce_state; + __u32 dctcp_alpha; + __u32 dctcp_ab_ecn; + __u32 dctcp_ab_tot; +}; #endif /* _UAPI_INET_DIAG_H_ */ diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h index 19df18c9b8be..1874ebe9ac1e 100644 --- a/include/uapi/linux/input.h +++ b/include/uapi/linux/input.h @@ -165,6 +165,7 @@ struct input_keymap_entry { #define INPUT_PROP_BUTTONPAD 0x02 /* has button(s) under pad */ #define INPUT_PROP_SEMI_MT 0x03 /* touch rectangle only */ #define INPUT_PROP_TOPBUTTONPAD 0x04 /* softbuttons at top of pad */ +#define INPUT_PROP_POINTING_STICK 0x05 /* is a pointing stick */ #define INPUT_PROP_MAX 0x1f #define INPUT_PROP_CNT (INPUT_PROP_MAX + 1) diff --git a/include/uapi/linux/ip_vs.h b/include/uapi/linux/ip_vs.h index fbcffe8041f7..cabe95d5b461 100644 --- a/include/uapi/linux/ip_vs.h +++ b/include/uapi/linux/ip_vs.h @@ -384,6 +384,9 @@ enum { IPVS_DEST_ATTR_PERSIST_CONNS, /* persistent connections */ IPVS_DEST_ATTR_STATS, /* nested attribute for dest stats */ + + IPVS_DEST_ATTR_ADDR_FAMILY, /* Address family of address */ + __IPVS_DEST_ATTR_MAX, }; diff --git a/include/uapi/linux/kernel-page-flags.h b/include/uapi/linux/kernel-page-flags.h index 5116a0e48172..2f96d233c980 100644 --- a/include/uapi/linux/kernel-page-flags.h +++ b/include/uapi/linux/kernel-page-flags.h @@ -31,6 +31,7 @@ #define KPF_KSM 21 #define KPF_THP 22 +#define KPF_BALLOON 23 #endif /* _UAPILINUX_KERNEL_PAGE_FLAGS_H */ diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index cf3a2ff440e4..60768822b140 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -654,9 +654,7 @@ struct kvm_ppc_smmu_info { #endif /* Bug in KVM_SET_USER_MEMORY_REGION fixed: */ #define KVM_CAP_DESTROY_MEMORY_REGION_WORKS 21 -#ifdef __KVM_HAVE_USER_NMI #define KVM_CAP_USER_NMI 22 -#endif #ifdef __KVM_HAVE_GUEST_DEBUG #define KVM_CAP_SET_GUEST_DEBUG 23 #endif @@ -738,9 +736,7 @@ struct kvm_ppc_smmu_info { #define KVM_CAP_PPC_GET_SMMU_INFO 78 #define KVM_CAP_S390_COW 79 #define KVM_CAP_PPC_ALLOC_HTAB 80 -#ifdef __KVM_HAVE_READONLY_MEM #define KVM_CAP_READONLY_MEM 81 -#endif #define KVM_CAP_IRQFD_RESAMPLE 82 #define KVM_CAP_PPC_BOOKE_WATCHDOG 83 #define KVM_CAP_PPC_HTAB_FD 84 @@ -947,15 +943,25 @@ struct kvm_device_attr { __u64 addr; /* userspace address of attr data */ }; -#define KVM_DEV_TYPE_FSL_MPIC_20 1 -#define KVM_DEV_TYPE_FSL_MPIC_42 2 -#define KVM_DEV_TYPE_XICS 3 -#define KVM_DEV_TYPE_VFIO 4 #define KVM_DEV_VFIO_GROUP 1 #define KVM_DEV_VFIO_GROUP_ADD 1 #define KVM_DEV_VFIO_GROUP_DEL 2 -#define KVM_DEV_TYPE_ARM_VGIC_V2 5 -#define KVM_DEV_TYPE_FLIC 6 + +enum kvm_device_type { + KVM_DEV_TYPE_FSL_MPIC_20 = 1, +#define KVM_DEV_TYPE_FSL_MPIC_20 KVM_DEV_TYPE_FSL_MPIC_20 + KVM_DEV_TYPE_FSL_MPIC_42, +#define KVM_DEV_TYPE_FSL_MPIC_42 KVM_DEV_TYPE_FSL_MPIC_42 + KVM_DEV_TYPE_XICS, +#define KVM_DEV_TYPE_XICS KVM_DEV_TYPE_XICS + KVM_DEV_TYPE_VFIO, +#define KVM_DEV_TYPE_VFIO KVM_DEV_TYPE_VFIO + KVM_DEV_TYPE_ARM_VGIC_V2, +#define KVM_DEV_TYPE_ARM_VGIC_V2 KVM_DEV_TYPE_ARM_VGIC_V2 + KVM_DEV_TYPE_FLIC, +#define KVM_DEV_TYPE_FLIC KVM_DEV_TYPE_FLIC + KVM_DEV_TYPE_MAX, +}; /* * ioctls for VM fds @@ -1093,7 +1099,7 @@ struct kvm_s390_ucas_mapping { #define KVM_S390_INITIAL_RESET _IO(KVMIO, 0x97) #define KVM_GET_MP_STATE _IOR(KVMIO, 0x98, struct kvm_mp_state) #define KVM_SET_MP_STATE _IOW(KVMIO, 0x99, struct kvm_mp_state) -/* Available with KVM_CAP_NMI */ +/* Available with KVM_CAP_USER_NMI */ #define KVM_NMI _IO(KVMIO, 0x9a) /* Available with KVM_CAP_SET_GUEST_DEBUG */ #define KVM_SET_GUEST_DEBUG _IOW(KVMIO, 0x9b, struct kvm_guest_debug) diff --git a/include/uapi/linux/netfilter/ipset/ip_set.h b/include/uapi/linux/netfilter/ipset/ip_set.h index 78c2f2e79920..ca03119111a2 100644 --- a/include/uapi/linux/netfilter/ipset/ip_set.h +++ b/include/uapi/linux/netfilter/ipset/ip_set.h @@ -115,6 +115,9 @@ enum { IPSET_ATTR_BYTES, IPSET_ATTR_PACKETS, IPSET_ATTR_COMMENT, + IPSET_ATTR_SKBMARK, + IPSET_ATTR_SKBPRIO, + IPSET_ATTR_SKBQUEUE, __IPSET_ATTR_ADT_MAX, }; #define IPSET_ATTR_ADT_MAX (__IPSET_ATTR_ADT_MAX - 1) @@ -147,6 +150,7 @@ enum ipset_errno { IPSET_ERR_COUNTER, IPSET_ERR_COMMENT, IPSET_ERR_INVALID_MARKMASK, + IPSET_ERR_SKBINFO, /* Type specific error codes */ IPSET_ERR_TYPE_SPECIFIC = 4352, @@ -170,6 +174,12 @@ enum ipset_cmd_flags { IPSET_FLAG_MATCH_COUNTERS = (1 << IPSET_FLAG_BIT_MATCH_COUNTERS), IPSET_FLAG_BIT_RETURN_NOMATCH = 7, IPSET_FLAG_RETURN_NOMATCH = (1 << IPSET_FLAG_BIT_RETURN_NOMATCH), + IPSET_FLAG_BIT_MAP_SKBMARK = 8, + IPSET_FLAG_MAP_SKBMARK = (1 << IPSET_FLAG_BIT_MAP_SKBMARK), + IPSET_FLAG_BIT_MAP_SKBPRIO = 9, + IPSET_FLAG_MAP_SKBPRIO = (1 << IPSET_FLAG_BIT_MAP_SKBPRIO), + IPSET_FLAG_BIT_MAP_SKBQUEUE = 10, + IPSET_FLAG_MAP_SKBQUEUE = (1 << IPSET_FLAG_BIT_MAP_SKBQUEUE), IPSET_FLAG_CMD_MAX = 15, }; @@ -187,6 +197,8 @@ enum ipset_cadt_flags { IPSET_FLAG_WITH_COMMENT = (1 << IPSET_FLAG_BIT_WITH_COMMENT), IPSET_FLAG_BIT_WITH_FORCEADD = 5, IPSET_FLAG_WITH_FORCEADD = (1 << IPSET_FLAG_BIT_WITH_FORCEADD), + IPSET_FLAG_BIT_WITH_SKBINFO = 6, + IPSET_FLAG_WITH_SKBINFO = (1 << IPSET_FLAG_BIT_WITH_SKBINFO), IPSET_FLAG_CADT_MAX = 15, }; diff --git a/include/uapi/linux/netfilter/nf_nat.h b/include/uapi/linux/netfilter/nf_nat.h index 1ad3659102b6..0880781ad7b6 100644 --- a/include/uapi/linux/netfilter/nf_nat.h +++ b/include/uapi/linux/netfilter/nf_nat.h @@ -13,6 +13,11 @@ #define NF_NAT_RANGE_PROTO_RANDOM_ALL \ (NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PROTO_RANDOM_FULLY) +#define NF_NAT_RANGE_MASK \ + (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED | \ + NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PERSISTENT | \ + NF_NAT_RANGE_PROTO_RANDOM_FULLY) + struct nf_nat_ipv4_range { unsigned int flags; __be32 min_ip; diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 801bdd1e56e3..f31fe7b660a5 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -51,6 +51,8 @@ enum nft_verdicts { * @NFT_MSG_NEWSETELEM: create a new set element (enum nft_set_elem_attributes) * @NFT_MSG_GETSETELEM: get a set element (enum nft_set_elem_attributes) * @NFT_MSG_DELSETELEM: delete a set element (enum nft_set_elem_attributes) + * @NFT_MSG_NEWGEN: announce a new generation, only for events (enum nft_gen_attributes) + * @NFT_MSG_GETGEN: get the rule-set generation (enum nft_gen_attributes) */ enum nf_tables_msg_types { NFT_MSG_NEWTABLE, @@ -68,6 +70,8 @@ enum nf_tables_msg_types { NFT_MSG_NEWSETELEM, NFT_MSG_GETSETELEM, NFT_MSG_DELSETELEM, + NFT_MSG_NEWGEN, + NFT_MSG_GETGEN, NFT_MSG_MAX, }; @@ -571,6 +575,10 @@ enum nft_exthdr_attributes { * @NFT_META_L4PROTO: layer 4 protocol number * @NFT_META_BRI_IIFNAME: packet input bridge interface name * @NFT_META_BRI_OIFNAME: packet output bridge interface name + * @NFT_META_PKTTYPE: packet type (skb->pkt_type), special handling for loopback + * @NFT_META_CPU: cpu id through smp_processor_id() + * @NFT_META_IIFGROUP: packet input interface group + * @NFT_META_OIFGROUP: packet output interface group */ enum nft_meta_keys { NFT_META_LEN, @@ -592,6 +600,10 @@ enum nft_meta_keys { NFT_META_L4PROTO, NFT_META_BRI_IIFNAME, NFT_META_BRI_OIFNAME, + NFT_META_PKTTYPE, + NFT_META_CPU, + NFT_META_IIFGROUP, + NFT_META_OIFGROUP, }; /** @@ -737,13 +749,34 @@ enum nft_queue_attributes { * * @NFT_REJECT_ICMP_UNREACH: reject using ICMP unreachable * @NFT_REJECT_TCP_RST: reject using TCP RST + * @NFT_REJECT_ICMPX_UNREACH: abstracted ICMP unreachable for bridge and inet */ enum nft_reject_types { NFT_REJECT_ICMP_UNREACH, NFT_REJECT_TCP_RST, + NFT_REJECT_ICMPX_UNREACH, }; /** + * enum nft_reject_code - Generic reject codes for IPv4/IPv6 + * + * @NFT_REJECT_ICMPX_NO_ROUTE: no route to host / network unreachable + * @NFT_REJECT_ICMPX_PORT_UNREACH: port unreachable + * @NFT_REJECT_ICMPX_HOST_UNREACH: host unreachable + * @NFT_REJECT_ICMPX_ADMIN_PROHIBITED: administratively prohibited + * + * These codes are mapped to real ICMP and ICMPv6 codes. + */ +enum nft_reject_inet_code { + NFT_REJECT_ICMPX_NO_ROUTE = 0, + NFT_REJECT_ICMPX_PORT_UNREACH, + NFT_REJECT_ICMPX_HOST_UNREACH, + NFT_REJECT_ICMPX_ADMIN_PROHIBITED, + __NFT_REJECT_ICMPX_MAX +}; +#define NFT_REJECT_ICMPX_MAX (__NFT_REJECT_ICMPX_MAX - 1) + +/** * enum nft_reject_attributes - nf_tables reject expression netlink attributes * * @NFTA_REJECT_TYPE: packet type to use (NLA_U32: nft_reject_types) @@ -777,6 +810,7 @@ enum nft_nat_types { * @NFTA_NAT_REG_ADDR_MAX: source register of address range end (NLA_U32: nft_registers) * @NFTA_NAT_REG_PROTO_MIN: source register of proto range start (NLA_U32: nft_registers) * @NFTA_NAT_REG_PROTO_MAX: source register of proto range end (NLA_U32: nft_registers) + * @NFTA_NAT_FLAGS: NAT flags (see NF_NAT_RANGE_* in linux/netfilter/nf_nat.h) (NLA_U32) */ enum nft_nat_attributes { NFTA_NAT_UNSPEC, @@ -786,8 +820,33 @@ enum nft_nat_attributes { NFTA_NAT_REG_ADDR_MAX, NFTA_NAT_REG_PROTO_MIN, NFTA_NAT_REG_PROTO_MAX, + NFTA_NAT_FLAGS, __NFTA_NAT_MAX }; #define NFTA_NAT_MAX (__NFTA_NAT_MAX - 1) +/** + * enum nft_masq_attributes - nf_tables masquerade expression attributes + * + * @NFTA_MASQ_FLAGS: NAT flags (see NF_NAT_RANGE_* in linux/netfilter/nf_nat.h) (NLA_U32) + */ +enum nft_masq_attributes { + NFTA_MASQ_UNSPEC, + NFTA_MASQ_FLAGS, + __NFTA_MASQ_MAX +}; +#define NFTA_MASQ_MAX (__NFTA_MASQ_MAX - 1) + +/** + * enum nft_gen_attributes - nf_tables ruleset generation attributes + * + * @NFTA_GEN_ID: Ruleset generation ID (NLA_U32) + */ +enum nft_gen_attributes { + NFTA_GEN_UNSPEC, + NFTA_GEN_ID, + __NFTA_GEN_MAX +}; +#define NFTA_GEN_MAX (__NFTA_GEN_MAX - 1) + #endif /* _LINUX_NF_TABLES_H */ diff --git a/include/uapi/linux/netfilter/nfnetlink_acct.h b/include/uapi/linux/netfilter/nfnetlink_acct.h index 51404ec19022..f3e34dbbf966 100644 --- a/include/uapi/linux/netfilter/nfnetlink_acct.h +++ b/include/uapi/linux/netfilter/nfnetlink_acct.h @@ -28,9 +28,17 @@ enum nfnl_acct_type { NFACCT_USE, NFACCT_FLAGS, NFACCT_QUOTA, + NFACCT_FILTER, __NFACCT_MAX }; #define NFACCT_MAX (__NFACCT_MAX - 1) +enum nfnl_attr_filter_type { + NFACCT_FILTER_UNSPEC, + NFACCT_FILTER_MASK, + NFACCT_FILTER_VALUE, + __NFACCT_FILTER_MAX +}; +#define NFACCT_FILTER_MAX (__NFACCT_FILTER_MAX - 1) #endif /* _UAPI_NFNL_ACCT_H_ */ diff --git a/include/uapi/linux/netfilter/xt_set.h b/include/uapi/linux/netfilter/xt_set.h index 964d3d42f874..d6a1df1f2947 100644 --- a/include/uapi/linux/netfilter/xt_set.h +++ b/include/uapi/linux/netfilter/xt_set.h @@ -71,4 +71,14 @@ struct xt_set_info_match_v3 { __u32 flags; }; +/* Revision 3 target */ + +struct xt_set_info_target_v3 { + struct xt_set_info add_set; + struct xt_set_info del_set; + struct xt_set_info map_set; + __u32 flags; + __u32 timeout; +}; + #endif /*_XT_SET_H*/ diff --git a/include/uapi/linux/netfilter_arp/arpt_mangle.h b/include/uapi/linux/netfilter_arp/arpt_mangle.h index 250f502902bb..8c2b16a1f5a0 100644 --- a/include/uapi/linux/netfilter_arp/arpt_mangle.h +++ b/include/uapi/linux/netfilter_arp/arpt_mangle.h @@ -13,7 +13,7 @@ struct arpt_mangle union { struct in_addr tgt_ip; } u_t; - u_int8_t flags; + __u8 flags; int target; }; diff --git a/include/uapi/linux/nfsd/export.h b/include/uapi/linux/nfsd/export.h index cf47c313794e..584b6ef3a5e8 100644 --- a/include/uapi/linux/nfsd/export.h +++ b/include/uapi/linux/nfsd/export.h @@ -28,7 +28,8 @@ #define NFSEXP_ALLSQUASH 0x0008 #define NFSEXP_ASYNC 0x0010 #define NFSEXP_GATHERED_WRITES 0x0020 -/* 40 80 100 currently unused */ +#define NFSEXP_NOREADDIRPLUS 0x0040 +/* 80 100 currently unused */ #define NFSEXP_NOHIDE 0x0200 #define NFSEXP_NOSUBTREECHECK 0x0400 #define NFSEXP_NOAUTHNLM 0x0800 /* Don't authenticate NLM requests - just trust */ @@ -47,7 +48,7 @@ */ #define NFSEXP_V4ROOT 0x10000 /* All flags that we claim to support. (Note we don't support NOACL.) */ -#define NFSEXP_ALLFLAGS 0x17E3F +#define NFSEXP_ALLFLAGS 0x1FE7F /* The flags that may vary depending on security flavor: */ #define NFSEXP_SECINFO_FLAGS (NFSEXP_READONLY | NFSEXP_ROOTSQUASH \ diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index f1db15b9c041..4b28dc07bcb1 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -722,6 +722,22 @@ * QoS mapping is relevant for IP packets, it is only valid during an * association. This is cleared on disassociation and AP restart. * + * @NL80211_CMD_ADD_TX_TS: Ask the kernel to add a traffic stream for the given + * %NL80211_ATTR_TSID and %NL80211_ATTR_MAC with %NL80211_ATTR_USER_PRIO + * and %NL80211_ATTR_ADMITTED_TIME parameters. + * Note that the action frame handshake with the AP shall be handled by + * userspace via the normal management RX/TX framework, this only sets + * up the TX TS in the driver/device. + * If the admitted time attribute is not added then the request just checks + * if a subsequent setup could be successful, the intent is to use this to + * avoid setting up a session with the AP when local restrictions would + * make that impossible. However, the subsequent "real" setup may still + * fail even if the check was successful. + * @NL80211_CMD_DEL_TX_TS: Remove an existing TS with the %NL80211_ATTR_TSID + * and %NL80211_ATTR_MAC parameters. It isn't necessary to call this + * before removing a station entry entirely, or before disassociating + * or similar, cleanup will happen in the driver/device in this case. + * * @NL80211_CMD_MAX: highest used command number * @__NL80211_CMD_AFTER_LAST: internal use */ @@ -893,6 +909,9 @@ enum nl80211_commands { NL80211_CMD_SET_QOS_MAP, + NL80211_CMD_ADD_TX_TS, + NL80211_CMD_DEL_TX_TS, + /* add new commands above here */ /* used to define NL80211_CMD_MAX below */ @@ -1594,6 +1613,31 @@ enum nl80211_commands { * @NL80211_ATTR_TDLS_INITIATOR: flag attribute indicating the current end is * the TDLS link initiator. * + * @NL80211_ATTR_USE_RRM: flag for indicating whether the current connection + * shall support Radio Resource Measurements (11k). This attribute can be + * used with %NL80211_CMD_ASSOCIATE and %NL80211_CMD_CONNECT requests. + * User space applications are expected to use this flag only if the + * underlying device supports these minimal RRM features: + * %NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES, + * %NL80211_FEATURE_QUIET, + * If this flag is used, driver must add the Power Capabilities IE to the + * association request. In addition, it must also set the RRM capability + * flag in the association request's Capability Info field. + * + * @NL80211_ATTR_WIPHY_DYN_ACK: flag attribute used to enable ACK timeout + * estimation algorithm (dynack). In order to activate dynack + * %NL80211_FEATURE_ACKTO_ESTIMATION feature flag must be set by lower + * drivers to indicate dynack capability. Dynack is automatically disabled + * setting valid value for coverage class. + * + * @NL80211_ATTR_TSID: a TSID value (u8 attribute) + * @NL80211_ATTR_USER_PRIO: user priority value (u8 attribute) + * @NL80211_ATTR_ADMITTED_TIME: admitted time in units of 32 microseconds + * (per second) (u16 attribute) + * + * @NL80211_ATTR_SMPS_MODE: SMPS mode to use (ap mode). see + * &enum nl80211_smps_mode. + * * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use */ @@ -1936,6 +1980,16 @@ enum nl80211_attrs { NL80211_ATTR_TDLS_INITIATOR, + NL80211_ATTR_USE_RRM, + + NL80211_ATTR_WIPHY_DYN_ACK, + + NL80211_ATTR_TSID, + NL80211_ATTR_USER_PRIO, + NL80211_ATTR_ADMITTED_TIME, + + NL80211_ATTR_SMPS_MODE, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -3055,14 +3109,20 @@ enum nl80211_bss_scan_width { * @NL80211_BSS_BSSID: BSSID of the BSS (6 octets) * @NL80211_BSS_FREQUENCY: frequency in MHz (u32) * @NL80211_BSS_TSF: TSF of the received probe response/beacon (u64) + * (if @NL80211_BSS_PRESP_DATA is present then this is known to be + * from a probe response, otherwise it may be from the same beacon + * that the NL80211_BSS_BEACON_TSF will be from) * @NL80211_BSS_BEACON_INTERVAL: beacon interval of the (I)BSS (u16) * @NL80211_BSS_CAPABILITY: capability field (CPU order, u16) * @NL80211_BSS_INFORMATION_ELEMENTS: binary attribute containing the * raw information elements from the probe response/beacon (bin); - * if the %NL80211_BSS_BEACON_IES attribute is present, the IEs here are - * from a Probe Response frame; otherwise they are from a Beacon frame. + * if the %NL80211_BSS_BEACON_IES attribute is present and the data is + * different then the IEs here are from a Probe Response frame; otherwise + * they are from a Beacon frame. * However, if the driver does not indicate the source of the IEs, these * IEs may be from either frame subtype. + * If present, the @NL80211_BSS_PRESP_DATA attribute indicates that the + * data here is known to be from a probe response, without any heuristics. * @NL80211_BSS_SIGNAL_MBM: signal strength of probe response/beacon * in mBm (100 * dBm) (s32) * @NL80211_BSS_SIGNAL_UNSPEC: signal strength of the probe response/beacon @@ -3074,6 +3134,10 @@ enum nl80211_bss_scan_width { * yet been received * @NL80211_BSS_CHAN_WIDTH: channel width of the control channel * (u32, enum nl80211_bss_scan_width) + * @NL80211_BSS_BEACON_TSF: TSF of the last received beacon (u64) + * (not present if no beacon frame has been received yet) + * @NL80211_BSS_PRESP_DATA: the data in @NL80211_BSS_INFORMATION_ELEMENTS and + * @NL80211_BSS_TSF is known to be from a probe response (flag attribute) * @__NL80211_BSS_AFTER_LAST: internal * @NL80211_BSS_MAX: highest BSS attribute */ @@ -3091,6 +3155,8 @@ enum nl80211_bss { NL80211_BSS_SEEN_MS_AGO, NL80211_BSS_BEACON_IES, NL80211_BSS_CHAN_WIDTH, + NL80211_BSS_BEACON_TSF, + NL80211_BSS_PRESP_DATA, /* keep last */ __NL80211_BSS_AFTER_LAST, @@ -3956,6 +4022,26 @@ enum nl80211_ap_sme_features { * @NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE: This driver supports dynamic * channel bandwidth change (e.g., HT 20 <-> 40 MHz channel) during the * lifetime of a BSS. + * @NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES: This device adds a DS Parameter + * Set IE to probe requests. + * @NL80211_FEATURE_WFA_TPC_IE_IN_PROBES: This device adds a WFA TPC Report IE + * to probe requests. + * @NL80211_FEATURE_QUIET: This device, in client mode, supports Quiet Period + * requests sent to it by an AP. + * @NL80211_FEATURE_TX_POWER_INSERTION: This device is capable of inserting the + * current tx power value into the TPC Report IE in the spectrum + * management TPC Report action frame, and in the Radio Measurement Link + * Measurement Report action frame. + * @NL80211_FEATURE_ACKTO_ESTIMATION: This driver supports dynamic ACK timeout + * estimation (dynack). %NL80211_ATTR_WIPHY_DYN_ACK flag attribute is used + * to enable dynack. + * @NL80211_FEATURE_STATIC_SMPS: Device supports static spatial + * multiplexing powersave, ie. can turn off all but one chain + * even on HT connections that should be using more chains. + * @NL80211_FEATURE_DYNAMIC_SMPS: Device supports dynamic spatial + * multiplexing powersave, ie. can turn off all but one chain + * and then wake the rest up as required after, for example, + * rts/cts handshake. */ enum nl80211_feature_flags { NL80211_FEATURE_SK_TX_STATUS = 1 << 0, @@ -3977,6 +4063,13 @@ enum nl80211_feature_flags { NL80211_FEATURE_USERSPACE_MPM = 1 << 16, NL80211_FEATURE_ACTIVE_MONITOR = 1 << 17, NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE = 1 << 18, + NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES = 1 << 19, + NL80211_FEATURE_WFA_TPC_IE_IN_PROBES = 1 << 20, + NL80211_FEATURE_QUIET = 1 << 21, + NL80211_FEATURE_TX_POWER_INSERTION = 1 << 22, + NL80211_FEATURE_ACKTO_ESTIMATION = 1 << 23, + NL80211_FEATURE_STATIC_SMPS = 1 << 24, + NL80211_FEATURE_DYNAMIC_SMPS = 1 << 25, }; /** @@ -4051,6 +4144,25 @@ enum nl80211_acl_policy { }; /** + * enum nl80211_smps_mode - SMPS mode + * + * Requested SMPS mode (for AP mode) + * + * @NL80211_SMPS_OFF: SMPS off (use all antennas). + * @NL80211_SMPS_STATIC: static SMPS (use a single antenna) + * @NL80211_SMPS_DYNAMIC: dynamic smps (start with a single antenna and + * turn on other antennas after CTS/RTS). + */ +enum nl80211_smps_mode { + NL80211_SMPS_OFF, + NL80211_SMPS_STATIC, + NL80211_SMPS_DYNAMIC, + + __NL80211_SMPS_AFTER_LAST, + NL80211_SMPS_MAX = __NL80211_SMPS_AFTER_LAST - 1 +}; + +/** * enum nl80211_radar_event - type of radar event for DFS operation * * Type of event to be used with NL80211_ATTR_RADAR_EVENT to inform userspace diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index a794d1dd7b40..435eabc5ffaa 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -192,6 +192,7 @@ enum ovs_vport_type { OVS_VPORT_TYPE_INTERNAL, /* network device implemented by datapath */ OVS_VPORT_TYPE_GRE, /* GRE tunnel. */ OVS_VPORT_TYPE_VXLAN, /* VXLAN tunnel. */ + OVS_VPORT_TYPE_GENEVE, /* Geneve tunnel. */ __OVS_VPORT_TYPE_MAX }; @@ -289,9 +290,12 @@ enum ovs_key_attr { OVS_KEY_ATTR_TUNNEL, /* Nested set of ovs_tunnel attributes */ OVS_KEY_ATTR_SCTP, /* struct ovs_key_sctp */ OVS_KEY_ATTR_TCP_FLAGS, /* be16 TCP flags. */ + OVS_KEY_ATTR_DP_HASH, /* u32 hash value. Value 0 indicates the hash + is not computed by the datapath. */ + OVS_KEY_ATTR_RECIRC_ID, /* u32 recirc id */ #ifdef __KERNEL__ - OVS_KEY_ATTR_IPV4_TUNNEL, /* struct ovs_key_ipv4_tunnel */ + OVS_KEY_ATTR_TUNNEL_INFO, /* struct ovs_tunnel_info */ #endif __OVS_KEY_ATTR_MAX }; @@ -306,6 +310,8 @@ enum ovs_tunnel_key_attr { OVS_TUNNEL_KEY_ATTR_TTL, /* u8 Tunnel IP TTL. */ OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT, /* No argument, set DF. */ OVS_TUNNEL_KEY_ATTR_CSUM, /* No argument. CSUM packet. */ + OVS_TUNNEL_KEY_ATTR_OAM, /* No argument. OAM frame. */ + OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS, /* Array of Geneve options. */ __OVS_TUNNEL_KEY_ATTR_MAX }; @@ -493,6 +499,27 @@ struct ovs_action_push_vlan { __be16 vlan_tci; /* 802.1Q TCI (VLAN ID and priority). */ }; +/* Data path hash algorithm for computing Datapath hash. + * + * The algorithm type only specifies the fields in a flow + * will be used as part of the hash. Each datapath is free + * to use its own hash algorithm. The hash value will be + * opaque to the user space daemon. + */ +enum ovs_hash_alg { + OVS_HASH_ALG_L4, +}; + +/* + * struct ovs_action_hash - %OVS_ACTION_ATTR_HASH action argument. + * @hash_alg: Algorithm used to compute hash prior to recirculation. + * @hash_basis: basis used for computing hash. + */ +struct ovs_action_hash { + uint32_t hash_alg; /* One of ovs_hash_alg. */ + uint32_t hash_basis; +}; + /** * enum ovs_action_attr - Action types. * @@ -521,6 +548,8 @@ enum ovs_action_attr { OVS_ACTION_ATTR_PUSH_VLAN, /* struct ovs_action_push_vlan. */ OVS_ACTION_ATTR_POP_VLAN, /* No argument. */ OVS_ACTION_ATTR_SAMPLE, /* Nested OVS_SAMPLE_ATTR_*. */ + OVS_ACTION_ATTR_RECIRC, /* u32 recirc_id. */ + OVS_ACTION_ATTR_HASH, /* struct ovs_action_hash. */ __OVS_ACTION_ATTR_MAX }; diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h index 30db069bce62..4a1d0cc38ff2 100644 --- a/include/uapi/linux/pci_regs.h +++ b/include/uapi/linux/pci_regs.h @@ -552,6 +552,7 @@ #define PCI_EXP_RTCTL_PMEIE 0x0008 /* PME Interrupt Enable */ #define PCI_EXP_RTCTL_CRSSVE 0x0010 /* CRS Software Visibility Enable */ #define PCI_EXP_RTCAP 30 /* Root Capabilities */ +#define PCI_EXP_RTCAP_CRSVIS 0x0001 /* CRS Software Visibility capability */ #define PCI_EXP_RTSTA 32 /* Root Status */ #define PCI_EXP_RTSTA_PME 0x00010000 /* PME status */ #define PCI_EXP_RTSTA_PENDING 0x00020000 /* PME pending */ @@ -630,7 +631,7 @@ /* Advanced Error Reporting */ #define PCI_ERR_UNCOR_STATUS 4 /* Uncorrectable Error Status */ -#define PCI_ERR_UNC_TRAIN 0x00000001 /* Training */ +#define PCI_ERR_UNC_UND 0x00000001 /* Undefined */ #define PCI_ERR_UNC_DLP 0x00000010 /* Data Link Protocol */ #define PCI_ERR_UNC_SURPDN 0x00000020 /* Surprise Down */ #define PCI_ERR_UNC_POISON_TLP 0x00001000 /* Poisoned TLP */ diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h index 58afc04c107e..513df75d0fc9 100644 --- a/include/uapi/linux/prctl.h +++ b/include/uapi/linux/prctl.h @@ -1,6 +1,8 @@ #ifndef _LINUX_PRCTL_H #define _LINUX_PRCTL_H +#include <linux/types.h> + /* Values to pass as first argument to prctl() */ #define PR_SET_PDEATHSIG 1 /* Second arg is a signal */ @@ -119,6 +121,31 @@ # define PR_SET_MM_ENV_END 11 # define PR_SET_MM_AUXV 12 # define PR_SET_MM_EXE_FILE 13 +# define PR_SET_MM_MAP 14 +# define PR_SET_MM_MAP_SIZE 15 + +/* + * This structure provides new memory descriptor + * map which mostly modifies /proc/pid/stat[m] + * output for a task. This mostly done in a + * sake of checkpoint/restore functionality. + */ +struct prctl_mm_map { + __u64 start_code; /* code section bounds */ + __u64 end_code; + __u64 start_data; /* data section bounds */ + __u64 end_data; + __u64 start_brk; /* heap for brk() syscall */ + __u64 brk; + __u64 start_stack; /* stack starts at */ + __u64 arg_start; /* command line arguments bounds */ + __u64 arg_end; + __u64 env_start; /* environment variables bounds */ + __u64 env_end; + __u64 *auxv; /* auxiliary vector */ + __u32 auxv_size; /* vector size */ + __u32 exe_fd; /* /proc/$pid/exe link file */ +}; /* * Set specific pid that is allowed to ptrace the current task. diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h index 5820269aa132..16ad8521af6a 100644 --- a/include/uapi/linux/serial_core.h +++ b/include/uapi/linux/serial_core.h @@ -244,4 +244,7 @@ /* SC16IS74xx */ #define PORT_SC16IS7XX 108 +/* MESON */ +#define PORT_MESON 109 + #endif /* _UAPILINUX_SERIAL_CORE_H */ diff --git a/include/uapi/linux/smiapp.h b/include/uapi/linux/smiapp.h new file mode 100644 index 000000000000..53938f4412ee --- /dev/null +++ b/include/uapi/linux/smiapp.h @@ -0,0 +1,29 @@ +/* + * include/uapi/linux/smiapp.h + * + * Generic driver for SMIA/SMIA++ compliant camera modules + * + * Copyright (C) 2014 Intel Corporation + * Contact: Sakari Ailus <sakari.ailus@iki.fi> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + */ + +#ifndef __UAPI_LINUX_SMIAPP_H_ +#define __UAPI_LINUX_SMIAPP_H_ + +#define V4L2_SMIAPP_TEST_PATTERN_MODE_DISABLED 0 +#define V4L2_SMIAPP_TEST_PATTERN_MODE_SOLID_COLOUR 1 +#define V4L2_SMIAPP_TEST_PATTERN_MODE_COLOUR_BARS 2 +#define V4L2_SMIAPP_TEST_PATTERN_MODE_COLOUR_BARS_GREY 3 +#define V4L2_SMIAPP_TEST_PATTERN_MODE_PN9 4 + +#endif /* __UAPI_LINUX_SMIAPP_H_ */ diff --git a/include/uapi/linux/uhid.h b/include/uapi/linux/uhid.h index 1e3b09c191cd..aaa86d6bd1dd 100644 --- a/include/uapi/linux/uhid.h +++ b/include/uapi/linux/uhid.h @@ -24,35 +24,23 @@ #include <linux/hid.h> enum uhid_event_type { - UHID_CREATE, + __UHID_LEGACY_CREATE, UHID_DESTROY, UHID_START, UHID_STOP, UHID_OPEN, UHID_CLOSE, UHID_OUTPUT, - UHID_OUTPUT_EV, /* obsolete! */ - UHID_INPUT, - UHID_FEATURE, - UHID_FEATURE_ANSWER, + __UHID_LEGACY_OUTPUT_EV, + __UHID_LEGACY_INPUT, + UHID_GET_REPORT, + UHID_GET_REPORT_REPLY, UHID_CREATE2, UHID_INPUT2, + UHID_SET_REPORT, + UHID_SET_REPORT_REPLY, }; -struct uhid_create_req { - __u8 name[128]; - __u8 phys[64]; - __u8 uniq[64]; - __u8 __user *rd_data; - __u16 rd_size; - - __u16 bus; - __u32 vendor; - __u32 product; - __u32 version; - __u32 country; -} __attribute__((__packed__)); - struct uhid_create2_req { __u8 name[128]; __u8 phys[64]; @@ -66,6 +54,16 @@ struct uhid_create2_req { __u8 rd_data[HID_MAX_DESCRIPTOR_SIZE]; } __attribute__((__packed__)); +enum uhid_dev_flag { + UHID_DEV_NUMBERED_FEATURE_REPORTS = (1ULL << 0), + UHID_DEV_NUMBERED_OUTPUT_REPORTS = (1ULL << 1), + UHID_DEV_NUMBERED_INPUT_REPORTS = (1ULL << 2), +}; + +struct uhid_start_req { + __u64 dev_flags; +}; + #define UHID_DATA_MAX 4096 enum uhid_report_type { @@ -74,36 +72,94 @@ enum uhid_report_type { UHID_INPUT_REPORT, }; -struct uhid_input_req { +struct uhid_input2_req { + __u16 size; + __u8 data[UHID_DATA_MAX]; +} __attribute__((__packed__)); + +struct uhid_output_req { __u8 data[UHID_DATA_MAX]; __u16 size; + __u8 rtype; } __attribute__((__packed__)); -struct uhid_input2_req { +struct uhid_get_report_req { + __u32 id; + __u8 rnum; + __u8 rtype; +} __attribute__((__packed__)); + +struct uhid_get_report_reply_req { + __u32 id; + __u16 err; __u16 size; __u8 data[UHID_DATA_MAX]; } __attribute__((__packed__)); -struct uhid_output_req { +struct uhid_set_report_req { + __u32 id; + __u8 rnum; + __u8 rtype; + __u16 size; + __u8 data[UHID_DATA_MAX]; +} __attribute__((__packed__)); + +struct uhid_set_report_reply_req { + __u32 id; + __u16 err; +} __attribute__((__packed__)); + +/* + * Compat Layer + * All these commands and requests are obsolete. You should avoid using them in + * new code. We support them for backwards-compatibility, but you might not get + * access to new feature in case you use them. + */ + +enum uhid_legacy_event_type { + UHID_CREATE = __UHID_LEGACY_CREATE, + UHID_OUTPUT_EV = __UHID_LEGACY_OUTPUT_EV, + UHID_INPUT = __UHID_LEGACY_INPUT, + UHID_FEATURE = UHID_GET_REPORT, + UHID_FEATURE_ANSWER = UHID_GET_REPORT_REPLY, +}; + +/* Obsolete! Use UHID_CREATE2. */ +struct uhid_create_req { + __u8 name[128]; + __u8 phys[64]; + __u8 uniq[64]; + __u8 __user *rd_data; + __u16 rd_size; + + __u16 bus; + __u32 vendor; + __u32 product; + __u32 version; + __u32 country; +} __attribute__((__packed__)); + +/* Obsolete! Use UHID_INPUT2. */ +struct uhid_input_req { __u8 data[UHID_DATA_MAX]; __u16 size; - __u8 rtype; } __attribute__((__packed__)); -/* Obsolete! Newer kernels will no longer send these events but instead convert - * it into raw output reports via UHID_OUTPUT. */ +/* Obsolete! Kernel uses UHID_OUTPUT exclusively now. */ struct uhid_output_ev_req { __u16 type; __u16 code; __s32 value; } __attribute__((__packed__)); +/* Obsolete! Kernel uses ABI compatible UHID_GET_REPORT. */ struct uhid_feature_req { __u32 id; __u8 rnum; __u8 rtype; } __attribute__((__packed__)); +/* Obsolete! Use ABI compatible UHID_GET_REPORT_REPLY. */ struct uhid_feature_answer_req { __u32 id; __u16 err; @@ -111,6 +167,15 @@ struct uhid_feature_answer_req { __u8 data[UHID_DATA_MAX]; } __attribute__((__packed__)); +/* + * UHID Events + * All UHID events from and to the kernel are encoded as "struct uhid_event". + * The "type" field contains a UHID_* type identifier. All payload depends on + * that type and can be accessed via ev->u.XYZ accordingly. + * If user-space writes short events, they're extended with 0s by the kernel. If + * the kernel writes short events, user-space shall extend them with 0s. + */ + struct uhid_event { __u32 type; @@ -120,9 +185,14 @@ struct uhid_event { struct uhid_output_req output; struct uhid_output_ev_req output_ev; struct uhid_feature_req feature; + struct uhid_get_report_req get_report; struct uhid_feature_answer_req feature_answer; + struct uhid_get_report_reply_req get_report_reply; struct uhid_create2_req create2; struct uhid_input2_req input2; + struct uhid_set_report_req set_report; + struct uhid_set_report_reply_req set_report_reply; + struct uhid_start_req start; } u; } __attribute__((__packed__)); diff --git a/include/uapi/linux/usb/functionfs.h b/include/uapi/linux/usb/functionfs.h index 0154b2859fd7..295ba299e7bd 100644 --- a/include/uapi/linux/usb/functionfs.h +++ b/include/uapi/linux/usb/functionfs.h @@ -19,6 +19,7 @@ enum functionfs_flags { FUNCTIONFS_HAS_HS_DESC = 2, FUNCTIONFS_HAS_SS_DESC = 4, FUNCTIONFS_HAS_MS_OS_DESC = 8, + FUNCTIONFS_VIRTUAL_ADDR = 16, }; /* Descriptor of an non-audio endpoint */ @@ -32,6 +33,16 @@ struct usb_endpoint_descriptor_no_audio { __u8 bInterval; } __attribute__((packed)); +struct usb_functionfs_descs_head_v2 { + __le32 magic; + __le32 length; + __le32 flags; + /* + * __le32 fs_count, hs_count, fs_count; must be included manually in + * the structure taking flags into consideration. + */ +} __attribute__((packed)); + /* Legacy format, deprecated as of 3.14. */ struct usb_functionfs_descs_head { __le32 magic; @@ -92,7 +103,7 @@ struct usb_ext_prop_desc { * structure. Any flags that are not recognised cause the whole block to be * rejected with -ENOSYS. * - * Legacy descriptors format: + * Legacy descriptors format (deprecated as of 3.14): * * | off | name | type | description | * |-----+-----------+--------------+--------------------------------------| @@ -265,6 +276,12 @@ struct usb_functionfs_event { */ #define FUNCTIONFS_ENDPOINT_REVMAP _IO('g', 129) +/* + * Returns endpoint descriptor. If function is not active returns -ENODEV. + */ +#define FUNCTIONFS_ENDPOINT_DESC _IOR('g', 130, \ + struct usb_endpoint_descriptor) + #endif /* _UAPI__LINUX_FUNCTIONFS_H__ */ diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h index e946e43fb8d5..661f119a51b8 100644 --- a/include/uapi/linux/v4l2-controls.h +++ b/include/uapi/linux/v4l2-controls.h @@ -746,6 +746,8 @@ enum v4l2_auto_focus_range { V4L2_AUTO_FOCUS_RANGE_INFINITY = 3, }; +#define V4L2_CID_PAN_SPEED (V4L2_CID_CAMERA_CLASS_BASE+32) +#define V4L2_CID_TILT_SPEED (V4L2_CID_CAMERA_CLASS_BASE+33) /* FM Modulator class control IDs */ @@ -865,6 +867,10 @@ enum v4l2_jpeg_chroma_subsampling { #define V4L2_CID_VBLANK (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 1) #define V4L2_CID_HBLANK (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 2) #define V4L2_CID_ANALOGUE_GAIN (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 3) +#define V4L2_CID_TEST_PATTERN_RED (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 4) +#define V4L2_CID_TEST_PATTERN_GREENR (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 5) +#define V4L2_CID_TEST_PATTERN_BLUE (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 6) +#define V4L2_CID_TEST_PATTERN_GREENB (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 7) /* Image processing controls */ diff --git a/include/uapi/linux/v4l2-dv-timings.h b/include/uapi/linux/v4l2-dv-timings.h index 6c8f159e416e..6a0764c89fcb 100644 --- a/include/uapi/linux/v4l2-dv-timings.h +++ b/include/uapi/linux/v4l2-dv-timings.h @@ -21,17 +21,8 @@ #ifndef _V4L2_DV_TIMINGS_H #define _V4L2_DV_TIMINGS_H -#if __GNUC__ < 4 || (__GNUC__ == 4 && (__GNUC_MINOR__ < 6)) -/* Sadly gcc versions older than 4.6 have a bug in how they initialize - anonymous unions where they require additional curly brackets. - This violates the C1x standard. This workaround adds the curly brackets - if needed. */ #define V4L2_INIT_BT_TIMINGS(_width, args...) \ { .bt = { _width , ## args } } -#else -#define V4L2_INIT_BT_TIMINGS(_width, args...) \ - .bt = { _width , ## args } -#endif /* CEA-861-E timings (i.e. standard HDTV timings) */ diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h index 6612974c64bf..29715d27548f 100644 --- a/include/uapi/linux/vfio.h +++ b/include/uapi/linux/vfio.h @@ -33,6 +33,9 @@ /* Check if EEH is supported */ #define VFIO_EEH 5 +/* Two-stage IOMMU */ +#define VFIO_TYPE1_NESTING_IOMMU 6 /* Implies v2 */ + /* * The IOCTL interface is designed for extensibility by embedding the * structure length (argsz) and flags into structures passed between diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index 778a3298fb34..1c2f84fd4d99 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -79,6 +79,7 @@ /* Four-character-code (FOURCC) */ #define v4l2_fourcc(a, b, c, d)\ ((__u32)(a) | ((__u32)(b) << 8) | ((__u32)(c) << 16) | ((__u32)(d) << 24)) +#define v4l2_fourcc_be(a, b, c, d) (v4l2_fourcc(a, b, c, d) | (1 << 31)) /* * E N U M S @@ -307,6 +308,8 @@ struct v4l2_pix_format { #define V4L2_PIX_FMT_XRGB555 v4l2_fourcc('X', 'R', '1', '5') /* 16 XRGB-1-5-5-5 */ #define V4L2_PIX_FMT_RGB565 v4l2_fourcc('R', 'G', 'B', 'P') /* 16 RGB-5-6-5 */ #define V4L2_PIX_FMT_RGB555X v4l2_fourcc('R', 'G', 'B', 'Q') /* 16 RGB-5-5-5 BE */ +#define V4L2_PIX_FMT_ARGB555X v4l2_fourcc_be('A', 'R', '1', '5') /* 16 ARGB-5-5-5 BE */ +#define V4L2_PIX_FMT_XRGB555X v4l2_fourcc_be('X', 'R', '1', '5') /* 16 XRGB-5-5-5 BE */ #define V4L2_PIX_FMT_RGB565X v4l2_fourcc('R', 'G', 'B', 'R') /* 16 RGB-5-6-5 BE */ #define V4L2_PIX_FMT_BGR666 v4l2_fourcc('B', 'G', 'R', 'H') /* 18 BGR-6-6-6 */ #define V4L2_PIX_FMT_BGR24 v4l2_fourcc('B', 'G', 'R', '3') /* 24 BGR-8-8-8 */ @@ -1285,11 +1288,11 @@ struct v4l2_ext_control { union { __s32 value; __s64 value64; - char *string; - __u8 *p_u8; - __u16 *p_u16; - __u32 *p_u32; - void *ptr; + char __user *string; + __u8 __user *p_u8; + __u16 __user *p_u16; + __u32 __user *p_u32; + void __user *ptr; }; } __attribute__ ((packed)); diff --git a/include/uapi/linux/wil6210_uapi.h b/include/uapi/linux/wil6210_uapi.h new file mode 100644 index 000000000000..6a3cddd156c4 --- /dev/null +++ b/include/uapi/linux/wil6210_uapi.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2014 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __WIL6210_UAPI_H__ +#define __WIL6210_UAPI_H__ + +#if !defined(__KERNEL__) +#define __user +#endif + +#include <linux/sockios.h> + +/* Numbers SIOCDEVPRIVATE and SIOCDEVPRIVATE + 1 + * are used by Android devices to implement PNO (preferred network offload). + * Albeit it is temporary solution, use different numbers to avoid conflicts + */ + +/** + * Perform 32-bit I/O operation to the card memory + * + * User code should arrange data in memory like this: + * + * struct wil_memio io; + * struct ifreq ifr = { + * .ifr_data = &io, + * }; + */ +#define WIL_IOCTL_MEMIO (SIOCDEVPRIVATE + 2) + +/** + * Perform block I/O operation to the card memory + * + * User code should arrange data in memory like this: + * + * void *buf; + * struct wil_memio_block io = { + * .block = buf, + * }; + * struct ifreq ifr = { + * .ifr_data = &io, + * }; + */ +#define WIL_IOCTL_MEMIO_BLOCK (SIOCDEVPRIVATE + 3) + +/** + * operation to perform + * + * @wil_mmio_op_mask - bits defining operation, + * @wil_mmio_addr_mask - bits defining addressing mode + */ +enum wil_memio_op { + wil_mmio_read = 0, + wil_mmio_write = 1, + wil_mmio_op_mask = 0xff, + wil_mmio_addr_linker = 0 << 8, + wil_mmio_addr_ahb = 1 << 8, + wil_mmio_addr_bar = 2 << 8, + wil_mmio_addr_mask = 0xff00, +}; + +struct wil_memio { + uint32_t op; /* enum wil_memio_op */ + uint32_t addr; /* should be 32-bit aligned */ + uint32_t val; +}; + +struct wil_memio_block { + uint32_t op; /* enum wil_memio_op */ + uint32_t addr; /* should be 32-bit aligned */ + uint32_t size; /* should be multiple of 4 */ + void __user *block; /* block address */ +}; + +#endif /* __WIL6210_UAPI_H__ */ diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h index 25e5dd916ba4..02d5125a5ee8 100644 --- a/include/uapi/linux/xfrm.h +++ b/include/uapi/linux/xfrm.h @@ -328,6 +328,8 @@ enum xfrm_spdattr_type_t { XFRMA_SPD_UNSPEC, XFRMA_SPD_INFO, XFRMA_SPD_HINFO, + XFRMA_SPD_IPV4_HTHRESH, + XFRMA_SPD_IPV6_HTHRESH, __XFRMA_SPD_MAX #define XFRMA_SPD_MAX (__XFRMA_SPD_MAX - 1) @@ -347,6 +349,11 @@ struct xfrmu_spdhinfo { __u32 spdhmcnt; }; +struct xfrmu_spdhthresh { + __u8 lbits; + __u8 rbits; +}; + struct xfrm_usersa_info { struct xfrm_selector sel; struct xfrm_id id; diff --git a/include/uapi/misc/Kbuild b/include/uapi/misc/Kbuild new file mode 100644 index 000000000000..e96cae7d58c9 --- /dev/null +++ b/include/uapi/misc/Kbuild @@ -0,0 +1,2 @@ +# misc Header export list +header-y += cxl.h diff --git a/include/uapi/misc/cxl.h b/include/uapi/misc/cxl.h new file mode 100644 index 000000000000..cd6d789b73ec --- /dev/null +++ b/include/uapi/misc/cxl.h @@ -0,0 +1,88 @@ +/* + * Copyright 2014 IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _UAPI_MISC_CXL_H +#define _UAPI_MISC_CXL_H + +#include <linux/types.h> +#include <linux/ioctl.h> + + +struct cxl_ioctl_start_work { + __u64 flags; + __u64 work_element_descriptor; + __u64 amr; + __s16 num_interrupts; + __s16 reserved1; + __s32 reserved2; + __u64 reserved3; + __u64 reserved4; + __u64 reserved5; + __u64 reserved6; +}; + +#define CXL_START_WORK_AMR 0x0000000000000001ULL +#define CXL_START_WORK_NUM_IRQS 0x0000000000000002ULL +#define CXL_START_WORK_ALL (CXL_START_WORK_AMR |\ + CXL_START_WORK_NUM_IRQS) + +/* ioctl numbers */ +#define CXL_MAGIC 0xCA +#define CXL_IOCTL_START_WORK _IOW(CXL_MAGIC, 0x00, struct cxl_ioctl_start_work) +#define CXL_IOCTL_GET_PROCESS_ELEMENT _IOR(CXL_MAGIC, 0x01, __u32) + +#define CXL_READ_MIN_SIZE 0x1000 /* 4K */ + +/* Events from read() */ +enum cxl_event_type { + CXL_EVENT_RESERVED = 0, + CXL_EVENT_AFU_INTERRUPT = 1, + CXL_EVENT_DATA_STORAGE = 2, + CXL_EVENT_AFU_ERROR = 3, +}; + +struct cxl_event_header { + __u16 type; + __u16 size; + __u16 process_element; + __u16 reserved1; +}; + +struct cxl_event_afu_interrupt { + __u16 flags; + __u16 irq; /* Raised AFU interrupt number */ + __u32 reserved1; +}; + +struct cxl_event_data_storage { + __u16 flags; + __u16 reserved1; + __u32 reserved2; + __u64 addr; + __u64 dsisr; + __u64 reserved3; +}; + +struct cxl_event_afu_error { + __u16 flags; + __u16 reserved1; + __u32 reserved2; + __u64 error; +}; + +struct cxl_event { + struct cxl_event_header header; + union { + struct cxl_event_afu_interrupt irq; + struct cxl_event_data_storage fault; + struct cxl_event_afu_error afu_error; + }; +}; + +#endif /* _UAPI_MISC_CXL_H */ diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h index 32168f7ffce3..6ee586728df9 100644 --- a/include/uapi/sound/asound.h +++ b/include/uapi/sound/asound.h @@ -219,7 +219,8 @@ typedef int __bitwise snd_pcm_format_t; #define SNDRV_PCM_FORMAT_G723_40_1B ((__force snd_pcm_format_t) 47) /* 1 sample in 1 byte */ #define SNDRV_PCM_FORMAT_DSD_U8 ((__force snd_pcm_format_t) 48) /* DSD, 1-byte samples DSD (x8) */ #define SNDRV_PCM_FORMAT_DSD_U16_LE ((__force snd_pcm_format_t) 49) /* DSD, 2-byte samples DSD (x16), little endian */ -#define SNDRV_PCM_FORMAT_LAST SNDRV_PCM_FORMAT_DSD_U16_LE +#define SNDRV_PCM_FORMAT_DSD_U32_LE ((__force snd_pcm_format_t) 50) /* DSD, 4-byte samples DSD (x32), little endian */ +#define SNDRV_PCM_FORMAT_LAST SNDRV_PCM_FORMAT_DSD_U32_LE #ifdef SNDRV_LITTLE_ENDIAN #define SNDRV_PCM_FORMAT_S16 SNDRV_PCM_FORMAT_S16_LE diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h index 3e43e22cdff9..ef64b66b18df 100644 --- a/include/video/imx-ipu-v3.h +++ b/include/video/imx-ipu-v3.h @@ -108,6 +108,42 @@ int ipu_idmac_get_current_buffer(struct ipuv3_channel *channel); void ipu_idmac_select_buffer(struct ipuv3_channel *channel, u32 buf_num); /* + * IPU Channel Parameter Memory (cpmem) functions + */ +struct ipu_rgb { + struct fb_bitfield red; + struct fb_bitfield green; + struct fb_bitfield blue; + struct fb_bitfield transp; + int bits_per_pixel; +}; + +struct ipu_image { + struct v4l2_pix_format pix; + struct v4l2_rect rect; + dma_addr_t phys; +}; + +void ipu_cpmem_zero(struct ipuv3_channel *ch); +void ipu_cpmem_set_resolution(struct ipuv3_channel *ch, int xres, int yres); +void ipu_cpmem_set_stride(struct ipuv3_channel *ch, int stride); +void ipu_cpmem_set_high_priority(struct ipuv3_channel *ch); +void ipu_cpmem_set_buffer(struct ipuv3_channel *ch, int bufnum, dma_addr_t buf); +void ipu_cpmem_interlaced_scan(struct ipuv3_channel *ch, int stride); +void ipu_cpmem_set_burstsize(struct ipuv3_channel *ch, int burstsize); +int ipu_cpmem_set_format_rgb(struct ipuv3_channel *ch, + const struct ipu_rgb *rgb); +int ipu_cpmem_set_format_passthrough(struct ipuv3_channel *ch, int width); +void ipu_cpmem_set_yuv_interleaved(struct ipuv3_channel *ch, u32 pixel_format); +void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch, + u32 pixel_format, int stride, + int u_offset, int v_offset); +void ipu_cpmem_set_yuv_planar(struct ipuv3_channel *ch, + u32 pixel_format, int stride, int height); +int ipu_cpmem_set_fmt(struct ipuv3_channel *ch, u32 drm_fourcc); +int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image); + +/* * IPU Display Controller (dc) functions */ struct ipu_dc; @@ -180,161 +216,9 @@ int ipu_smfc_disable(struct ipu_soc *ipu); int ipu_smfc_map_channel(struct ipu_soc *ipu, int channel, int csi_id, int mipi_id); int ipu_smfc_set_burstsize(struct ipu_soc *ipu, int channel, int burstsize); -#define IPU_CPMEM_WORD(word, ofs, size) ((((word) * 160 + (ofs)) << 8) | (size)) - -#define IPU_FIELD_UBO IPU_CPMEM_WORD(0, 46, 22) -#define IPU_FIELD_VBO IPU_CPMEM_WORD(0, 68, 22) -#define IPU_FIELD_IOX IPU_CPMEM_WORD(0, 90, 4) -#define IPU_FIELD_RDRW IPU_CPMEM_WORD(0, 94, 1) -#define IPU_FIELD_SO IPU_CPMEM_WORD(0, 113, 1) -#define IPU_FIELD_SLY IPU_CPMEM_WORD(1, 102, 14) -#define IPU_FIELD_SLUV IPU_CPMEM_WORD(1, 128, 14) - -#define IPU_FIELD_XV IPU_CPMEM_WORD(0, 0, 10) -#define IPU_FIELD_YV IPU_CPMEM_WORD(0, 10, 9) -#define IPU_FIELD_XB IPU_CPMEM_WORD(0, 19, 13) -#define IPU_FIELD_YB IPU_CPMEM_WORD(0, 32, 12) -#define IPU_FIELD_NSB_B IPU_CPMEM_WORD(0, 44, 1) -#define IPU_FIELD_CF IPU_CPMEM_WORD(0, 45, 1) -#define IPU_FIELD_SX IPU_CPMEM_WORD(0, 46, 12) -#define IPU_FIELD_SY IPU_CPMEM_WORD(0, 58, 11) -#define IPU_FIELD_NS IPU_CPMEM_WORD(0, 69, 10) -#define IPU_FIELD_SDX IPU_CPMEM_WORD(0, 79, 7) -#define IPU_FIELD_SM IPU_CPMEM_WORD(0, 86, 10) -#define IPU_FIELD_SCC IPU_CPMEM_WORD(0, 96, 1) -#define IPU_FIELD_SCE IPU_CPMEM_WORD(0, 97, 1) -#define IPU_FIELD_SDY IPU_CPMEM_WORD(0, 98, 7) -#define IPU_FIELD_SDRX IPU_CPMEM_WORD(0, 105, 1) -#define IPU_FIELD_SDRY IPU_CPMEM_WORD(0, 106, 1) -#define IPU_FIELD_BPP IPU_CPMEM_WORD(0, 107, 3) -#define IPU_FIELD_DEC_SEL IPU_CPMEM_WORD(0, 110, 2) -#define IPU_FIELD_DIM IPU_CPMEM_WORD(0, 112, 1) -#define IPU_FIELD_BNDM IPU_CPMEM_WORD(0, 114, 3) -#define IPU_FIELD_BM IPU_CPMEM_WORD(0, 117, 2) -#define IPU_FIELD_ROT IPU_CPMEM_WORD(0, 119, 1) -#define IPU_FIELD_HF IPU_CPMEM_WORD(0, 120, 1) -#define IPU_FIELD_VF IPU_CPMEM_WORD(0, 121, 1) -#define IPU_FIELD_THE IPU_CPMEM_WORD(0, 122, 1) -#define IPU_FIELD_CAP IPU_CPMEM_WORD(0, 123, 1) -#define IPU_FIELD_CAE IPU_CPMEM_WORD(0, 124, 1) -#define IPU_FIELD_FW IPU_CPMEM_WORD(0, 125, 13) -#define IPU_FIELD_FH IPU_CPMEM_WORD(0, 138, 12) -#define IPU_FIELD_EBA0 IPU_CPMEM_WORD(1, 0, 29) -#define IPU_FIELD_EBA1 IPU_CPMEM_WORD(1, 29, 29) -#define IPU_FIELD_ILO IPU_CPMEM_WORD(1, 58, 20) -#define IPU_FIELD_NPB IPU_CPMEM_WORD(1, 78, 7) -#define IPU_FIELD_PFS IPU_CPMEM_WORD(1, 85, 4) -#define IPU_FIELD_ALU IPU_CPMEM_WORD(1, 89, 1) -#define IPU_FIELD_ALBM IPU_CPMEM_WORD(1, 90, 3) -#define IPU_FIELD_ID IPU_CPMEM_WORD(1, 93, 2) -#define IPU_FIELD_TH IPU_CPMEM_WORD(1, 95, 7) -#define IPU_FIELD_SL IPU_CPMEM_WORD(1, 102, 14) -#define IPU_FIELD_WID0 IPU_CPMEM_WORD(1, 116, 3) -#define IPU_FIELD_WID1 IPU_CPMEM_WORD(1, 119, 3) -#define IPU_FIELD_WID2 IPU_CPMEM_WORD(1, 122, 3) -#define IPU_FIELD_WID3 IPU_CPMEM_WORD(1, 125, 3) -#define IPU_FIELD_OFS0 IPU_CPMEM_WORD(1, 128, 5) -#define IPU_FIELD_OFS1 IPU_CPMEM_WORD(1, 133, 5) -#define IPU_FIELD_OFS2 IPU_CPMEM_WORD(1, 138, 5) -#define IPU_FIELD_OFS3 IPU_CPMEM_WORD(1, 143, 5) -#define IPU_FIELD_SXYS IPU_CPMEM_WORD(1, 148, 1) -#define IPU_FIELD_CRE IPU_CPMEM_WORD(1, 149, 1) -#define IPU_FIELD_DEC_SEL2 IPU_CPMEM_WORD(1, 150, 1) - -struct ipu_cpmem_word { - u32 data[5]; - u32 res[3]; -}; - -struct ipu_ch_param { - struct ipu_cpmem_word word[2]; -}; - -void ipu_ch_param_write_field(struct ipu_ch_param __iomem *base, u32 wbs, u32 v); -u32 ipu_ch_param_read_field(struct ipu_ch_param __iomem *base, u32 wbs); -struct ipu_ch_param __iomem *ipu_get_cpmem(struct ipuv3_channel *channel); -void ipu_ch_param_dump(struct ipu_ch_param __iomem *p); - -static inline void ipu_ch_param_zero(struct ipu_ch_param __iomem *p) -{ - int i; - void __iomem *base = p; - - for (i = 0; i < sizeof(*p) / sizeof(u32); i++) - writel(0, base + i * sizeof(u32)); -} - -static inline void ipu_cpmem_set_buffer(struct ipu_ch_param __iomem *p, - int bufnum, dma_addr_t buf) -{ - if (bufnum) - ipu_ch_param_write_field(p, IPU_FIELD_EBA1, buf >> 3); - else - ipu_ch_param_write_field(p, IPU_FIELD_EBA0, buf >> 3); -} - -static inline void ipu_cpmem_set_resolution(struct ipu_ch_param __iomem *p, - int xres, int yres) -{ - ipu_ch_param_write_field(p, IPU_FIELD_FW, xres - 1); - ipu_ch_param_write_field(p, IPU_FIELD_FH, yres - 1); -} - -static inline void ipu_cpmem_set_stride(struct ipu_ch_param __iomem *p, - int stride) -{ - ipu_ch_param_write_field(p, IPU_FIELD_SLY, stride - 1); -} - -void ipu_cpmem_set_high_priority(struct ipuv3_channel *channel); - -struct ipu_rgb { - struct fb_bitfield red; - struct fb_bitfield green; - struct fb_bitfield blue; - struct fb_bitfield transp; - int bits_per_pixel; -}; - -struct ipu_image { - struct v4l2_pix_format pix; - struct v4l2_rect rect; - dma_addr_t phys; -}; - -int ipu_cpmem_set_format_passthrough(struct ipu_ch_param __iomem *p, - int width); - -int ipu_cpmem_set_format_rgb(struct ipu_ch_param __iomem *, - const struct ipu_rgb *rgb); - -static inline void ipu_cpmem_interlaced_scan(struct ipu_ch_param *p, - int stride) -{ - ipu_ch_param_write_field(p, IPU_FIELD_SO, 1); - ipu_ch_param_write_field(p, IPU_FIELD_ILO, stride / 8); - ipu_ch_param_write_field(p, IPU_FIELD_SLY, (stride * 2) - 1); -}; - -void ipu_cpmem_set_yuv_planar(struct ipu_ch_param __iomem *p, u32 pixel_format, - int stride, int height); -void ipu_cpmem_set_yuv_interleaved(struct ipu_ch_param __iomem *p, - u32 pixel_format); -void ipu_cpmem_set_yuv_planar_full(struct ipu_ch_param __iomem *p, - u32 pixel_format, int stride, int u_offset, int v_offset); -int ipu_cpmem_set_fmt(struct ipu_ch_param __iomem *cpmem, u32 pixelformat); -int ipu_cpmem_set_image(struct ipu_ch_param __iomem *cpmem, - struct ipu_image *image); - enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc); enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat); -static inline void ipu_cpmem_set_burstsize(struct ipu_ch_param __iomem *p, - int burstsize) -{ - ipu_ch_param_write_field(p, IPU_FIELD_NPB, burstsize - 1); -}; - struct ipu_client_platformdata { int csi; int di; diff --git a/include/xen/events.h b/include/xen/events.h index 8bee7a75e850..5321cd9636e6 100644 --- a/include/xen/events.h +++ b/include/xen/events.h @@ -28,6 +28,8 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi, unsigned long irqflags, const char *devname, void *dev_id); +int bind_interdomain_evtchn_to_irq(unsigned int remote_domain, + unsigned int remote_port); int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain, unsigned int remote_port, irq_handler_t handler, diff --git a/include/xen/interface/elfnote.h b/include/xen/interface/elfnote.h index 6f4eae328ca7..f90b03454659 100644 --- a/include/xen/interface/elfnote.h +++ b/include/xen/interface/elfnote.h @@ -3,6 +3,24 @@ * * Definitions used for the Xen ELF notes. * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * * Copyright (c) 2006, Ian Campbell, XenSource Ltd. */ @@ -18,12 +36,13 @@ * * LEGACY indicated the fields in the legacy __xen_guest string which * this a note type replaces. + * + * String values (for non-legacy) are NULL terminated ASCII, also known + * as ASCIZ type. */ /* * NAME=VALUE pair (string). - * - * LEGACY: FEATURES and PAE */ #define XEN_ELFNOTE_INFO 0 @@ -137,10 +156,30 @@ /* * Whether or not the guest supports cooperative suspend cancellation. + * This is a numeric value. + * + * Default is 0 */ #define XEN_ELFNOTE_SUSPEND_CANCEL 14 /* + * The (non-default) location the initial phys-to-machine map should be + * placed at by the hypervisor (Dom0) or the tools (DomU). + * The kernel must be prepared for this mapping to be established using + * large pages, despite such otherwise not being available to guests. + * The kernel must also be able to handle the page table pages used for + * this mapping not being accessible through the initial mapping. + * (Only x86-64 supports this at present.) + */ +#define XEN_ELFNOTE_INIT_P2M 15 + +/* + * Whether or not the guest can deal with being passed an initrd not + * mapped through its initial page tables. + */ +#define XEN_ELFNOTE_MOD_START_PFN 16 + +/* * The features supported by this kernel (numeric). * * Other than XEN_ELFNOTE_FEATURES on pre-4.2 Xen, this note allows a @@ -153,6 +192,11 @@ */ #define XEN_ELFNOTE_SUPPORTED_FEATURES 17 +/* + * The number of the highest elfnote defined. + */ +#define XEN_ELFNOTE_MAX XEN_ELFNOTE_SUPPORTED_FEATURES + #endif /* __XEN_PUBLIC_ELFNOTE_H__ */ /* diff --git a/include/xen/interface/features.h b/include/xen/interface/features.h index 131a6ccdba25..14334d0161d5 100644 --- a/include/xen/interface/features.h +++ b/include/xen/interface/features.h @@ -53,6 +53,9 @@ /* operation as Dom0 is supported */ #define XENFEAT_dom0 11 +/* Xen also maps grant references at pfn = mfn */ +#define XENFEAT_grant_map_identity 12 + #define XENFEAT_NR_SUBMAPS 1 #endif /* __XEN_PUBLIC_FEATURES_H__ */ diff --git a/include/xen/interface/io/vscsiif.h b/include/xen/interface/io/vscsiif.h new file mode 100644 index 000000000000..d07d7aca8d1c --- /dev/null +++ b/include/xen/interface/io/vscsiif.h @@ -0,0 +1,229 @@ +/****************************************************************************** + * vscsiif.h + * + * Based on the blkif.h code. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Copyright(c) FUJITSU Limited 2008. + */ + +#ifndef __XEN__PUBLIC_IO_SCSI_H__ +#define __XEN__PUBLIC_IO_SCSI_H__ + +#include "ring.h" +#include "../grant_table.h" + +/* + * Feature and Parameter Negotiation + * ================================= + * The two halves of a Xen pvSCSI driver utilize nodes within the XenStore to + * communicate capabilities and to negotiate operating parameters. This + * section enumerates these nodes which reside in the respective front and + * backend portions of the XenStore, following the XenBus convention. + * + * Any specified default value is in effect if the corresponding XenBus node + * is not present in the XenStore. + * + * XenStore nodes in sections marked "PRIVATE" are solely for use by the + * driver side whose XenBus tree contains them. + * + ***************************************************************************** + * Backend XenBus Nodes + ***************************************************************************** + * + *------------------ Backend Device Identification (PRIVATE) ------------------ + * + * p-devname + * Values: string + * + * A free string used to identify the physical device (e.g. a disk name). + * + * p-dev + * Values: string + * + * A string specifying the backend device: either a 4-tuple "h:c:t:l" + * (host, controller, target, lun, all integers), or a WWN (e.g. + * "naa.60014054ac780582"). + * + * v-dev + * Values: string + * + * A string specifying the frontend device in form of a 4-tuple "h:c:t:l" + * (host, controller, target, lun, all integers). + * + *--------------------------------- Features --------------------------------- + * + * feature-sg-grant + * Values: unsigned [VSCSIIF_SG_TABLESIZE...65535] + * Default Value: 0 + * + * Specifies the maximum number of scatter/gather elements in grant pages + * supported. If not set, the backend supports up to VSCSIIF_SG_TABLESIZE + * SG elements specified directly in the request. + * + ***************************************************************************** + * Frontend XenBus Nodes + ***************************************************************************** + * + *----------------------- Request Transport Parameters ----------------------- + * + * event-channel + * Values: unsigned + * + * The identifier of the Xen event channel used to signal activity + * in the ring buffer. + * + * ring-ref + * Values: unsigned + * + * The Xen grant reference granting permission for the backend to map + * the sole page in a single page sized ring buffer. + * + * protocol + * Values: string (XEN_IO_PROTO_ABI_*) + * Default Value: XEN_IO_PROTO_ABI_NATIVE + * + * The machine ABI rules governing the format of all ring request and + * response structures. + */ + +/* Requests from the frontend to the backend */ + +/* + * Request a SCSI operation specified via a CDB in vscsiif_request.cmnd. + * The target is specified via channel, id and lun. + * + * The operation to be performed is specified via a CDB in cmnd[], the length + * of the CDB is in cmd_len. sc_data_direction specifies the direction of data + * (to the device, from the device, or none at all). + * + * If data is to be transferred to or from the device the buffer(s) in the + * guest memory is/are specified via one or multiple scsiif_request_segment + * descriptors each specifying a memory page via a grant_ref_t, a offset into + * the page and the length of the area in that page. All scsiif_request_segment + * areas concatenated form the resulting data buffer used by the operation. + * If the number of scsiif_request_segment areas is not too large (less than + * or equal VSCSIIF_SG_TABLESIZE) the areas can be specified directly in the + * seg[] array and the number of valid scsiif_request_segment elements is to be + * set in nr_segments. + * + * If "feature-sg-grant" in the Xenstore is set it is possible to specify more + * than VSCSIIF_SG_TABLESIZE scsiif_request_segment elements via indirection. + * The maximum number of allowed scsiif_request_segment elements is the value + * of the "feature-sg-grant" entry from Xenstore. When using indirection the + * seg[] array doesn't contain specifications of the data buffers, but + * references to scsiif_request_segment arrays, which in turn reference the + * data buffers. While nr_segments holds the number of populated seg[] entries + * (plus the set VSCSIIF_SG_GRANT bit), the number of scsiif_request_segment + * elements referencing the target data buffers is calculated from the lengths + * of the seg[] elements (the sum of all valid seg[].length divided by the + * size of one scsiif_request_segment structure). + */ +#define VSCSIIF_ACT_SCSI_CDB 1 + +/* + * Request abort of a running operation for the specified target given by + * channel, id, lun and the operation's rqid in ref_rqid. + */ +#define VSCSIIF_ACT_SCSI_ABORT 2 + +/* + * Request a device reset of the specified target (channel and id). + */ +#define VSCSIIF_ACT_SCSI_RESET 3 + +/* + * Preset scatter/gather elements for a following request. Deprecated. + * Keeping the define only to avoid usage of the value "4" for other actions. + */ +#define VSCSIIF_ACT_SCSI_SG_PRESET 4 + +/* + * Maximum scatter/gather segments per request. + * + * Considering balance between allocating at least 16 "vscsiif_request" + * structures on one page (4096 bytes) and the number of scatter/gather + * elements needed, we decided to use 26 as a magic number. + * + * If "feature-sg-grant" is set, more scatter/gather elements can be specified + * by placing them in one or more (up to VSCSIIF_SG_TABLESIZE) granted pages. + * In this case the vscsiif_request seg elements don't contain references to + * the user data, but to the SG elements referencing the user data. + */ +#define VSCSIIF_SG_TABLESIZE 26 + +/* + * based on Linux kernel 2.6.18, still valid + * Changing these values requires support of multiple protocols via the rings + * as "old clients" will blindly use these values and the resulting structure + * sizes. + */ +#define VSCSIIF_MAX_COMMAND_SIZE 16 +#define VSCSIIF_SENSE_BUFFERSIZE 96 + +struct scsiif_request_segment { + grant_ref_t gref; + uint16_t offset; + uint16_t length; +}; + +#define VSCSIIF_SG_PER_PAGE (PAGE_SIZE / sizeof(struct scsiif_request_segment)) + +/* Size of one request is 252 bytes */ +struct vscsiif_request { + uint16_t rqid; /* private guest value, echoed in resp */ + uint8_t act; /* command between backend and frontend */ + uint8_t cmd_len; /* valid CDB bytes */ + + uint8_t cmnd[VSCSIIF_MAX_COMMAND_SIZE]; /* the CDB */ + uint16_t timeout_per_command; /* deprecated */ + uint16_t channel, id, lun; /* (virtual) device specification */ + uint16_t ref_rqid; /* command abort reference */ + uint8_t sc_data_direction; /* for DMA_TO_DEVICE(1) + DMA_FROM_DEVICE(2) + DMA_NONE(3) requests */ + uint8_t nr_segments; /* Number of pieces of scatter-gather */ +/* + * flag in nr_segments: SG elements via grant page + * + * If VSCSIIF_SG_GRANT is set, the low 7 bits of nr_segments specify the number + * of grant pages containing SG elements. Usable if "feature-sg-grant" set. + */ +#define VSCSIIF_SG_GRANT 0x80 + + struct scsiif_request_segment seg[VSCSIIF_SG_TABLESIZE]; + uint32_t reserved[3]; +}; + +/* Size of one response is 252 bytes */ +struct vscsiif_response { + uint16_t rqid; /* identifies request */ + uint8_t padding; + uint8_t sense_len; + uint8_t sense_buffer[VSCSIIF_SENSE_BUFFERSIZE]; + int32_t rslt; + uint32_t residual_len; /* request bufflen - + return the value from physical device */ + uint32_t reserved[36]; +}; + +DEFINE_RING_TYPES(vscsiif, struct vscsiif_request, struct vscsiif_response); + +#endif /*__XEN__PUBLIC_IO_SCSI_H__*/ diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h index de082130ba4b..f68719f405af 100644 --- a/include/xen/interface/xen.h +++ b/include/xen/interface/xen.h @@ -3,6 +3,24 @@ * * Guest OS interface to Xen. * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * * Copyright (c) 2004, K A Fraser */ @@ -73,13 +91,23 @@ * VIRTUAL INTERRUPTS * * Virtual interrupts that a guest OS may receive from Xen. + * In the side comments, 'V.' denotes a per-VCPU VIRQ while 'G.' denotes a + * global VIRQ. The former can be bound once per VCPU and cannot be re-bound. + * The latter can be allocated only once per guest: they must initially be + * allocated to VCPU0 but can subsequently be re-bound. */ -#define VIRQ_TIMER 0 /* Timebase update, and/or requested timeout. */ -#define VIRQ_DEBUG 1 /* Request guest to dump debug info. */ -#define VIRQ_CONSOLE 2 /* (DOM0) Bytes received on emergency console. */ -#define VIRQ_DOM_EXC 3 /* (DOM0) Exceptional event for some domain. */ -#define VIRQ_DEBUGGER 6 /* (DOM0) A domain has paused for debugging. */ -#define VIRQ_PCPU_STATE 9 /* (DOM0) PCPU state changed */ +#define VIRQ_TIMER 0 /* V. Timebase update, and/or requested timeout. */ +#define VIRQ_DEBUG 1 /* V. Request guest to dump debug info. */ +#define VIRQ_CONSOLE 2 /* G. (DOM0) Bytes received on emergency console. */ +#define VIRQ_DOM_EXC 3 /* G. (DOM0) Exceptional event for some domain. */ +#define VIRQ_TBUF 4 /* G. (DOM0) Trace buffer has records available. */ +#define VIRQ_DEBUGGER 6 /* G. (DOM0) A domain has paused for debugging. */ +#define VIRQ_XENOPROF 7 /* V. XenOprofile interrupt: new sample available */ +#define VIRQ_CON_RING 8 /* G. (DOM0) Bytes received on console */ +#define VIRQ_PCPU_STATE 9 /* G. (DOM0) PCPU state changed */ +#define VIRQ_MEM_EVENT 10 /* G. (DOM0) A memory event has occured */ +#define VIRQ_XC_RESERVED 11 /* G. Reserved for XenClient */ +#define VIRQ_ENOMEM 12 /* G. (DOM0) Low on heap memory */ /* Architecture-specific VIRQ definitions. */ #define VIRQ_ARCH_0 16 @@ -92,24 +120,68 @@ #define VIRQ_ARCH_7 23 #define NR_VIRQS 24 + /* - * MMU-UPDATE REQUESTS - * - * HYPERVISOR_mmu_update() accepts a list of (ptr, val) pairs. - * A foreigndom (FD) can be specified (or DOMID_SELF for none). - * Where the FD has some effect, it is described below. - * ptr[1:0] specifies the appropriate MMU_* command. + * enum neg_errnoval HYPERVISOR_mmu_update(const struct mmu_update reqs[], + * unsigned count, unsigned *done_out, + * unsigned foreigndom) + * @reqs is an array of mmu_update_t structures ((ptr, val) pairs). + * @count is the length of the above array. + * @pdone is an output parameter indicating number of completed operations + * @foreigndom[15:0]: FD, the expected owner of data pages referenced in this + * hypercall invocation. Can be DOMID_SELF. + * @foreigndom[31:16]: PFD, the expected owner of pagetable pages referenced + * in this hypercall invocation. The value of this field + * (x) encodes the PFD as follows: + * x == 0 => PFD == DOMID_SELF + * x != 0 => PFD == x - 1 * + * Sub-commands: ptr[1:0] specifies the appropriate MMU_* command. + * ------------- * ptr[1:0] == MMU_NORMAL_PT_UPDATE: - * Updates an entry in a page table. If updating an L1 table, and the new - * table entry is valid/present, the mapped frame must belong to the FD, if - * an FD has been specified. If attempting to map an I/O page then the - * caller assumes the privilege of the FD. + * Updates an entry in a page table belonging to PFD. If updating an L1 table, + * and the new table entry is valid/present, the mapped frame must belong to + * FD. If attempting to map an I/O page then the caller assumes the privilege + * of the FD. * FD == DOMID_IO: Permit /only/ I/O mappings, at the priv level of the caller. * FD == DOMID_XEN: Map restricted areas of Xen's heap space. * ptr[:2] -- Machine address of the page-table entry to modify. * val -- Value to write. * + * There also certain implicit requirements when using this hypercall. The + * pages that make up a pagetable must be mapped read-only in the guest. + * This prevents uncontrolled guest updates to the pagetable. Xen strictly + * enforces this, and will disallow any pagetable update which will end up + * mapping pagetable page RW, and will disallow using any writable page as a + * pagetable. In practice it means that when constructing a page table for a + * process, thread, etc, we MUST be very dilligient in following these rules: + * 1). Start with top-level page (PGD or in Xen language: L4). Fill out + * the entries. + * 2). Keep on going, filling out the upper (PUD or L3), and middle (PMD + * or L2). + * 3). Start filling out the PTE table (L1) with the PTE entries. Once + * done, make sure to set each of those entries to RO (so writeable bit + * is unset). Once that has been completed, set the PMD (L2) for this + * PTE table as RO. + * 4). When completed with all of the PMD (L2) entries, and all of them have + * been set to RO, make sure to set RO the PUD (L3). Do the same + * operation on PGD (L4) pagetable entries that have a PUD (L3) entry. + * 5). Now before you can use those pages (so setting the cr3), you MUST also + * pin them so that the hypervisor can verify the entries. This is done + * via the HYPERVISOR_mmuext_op(MMUEXT_PIN_L4_TABLE, guest physical frame + * number of the PGD (L4)). And this point the HYPERVISOR_mmuext_op( + * MMUEXT_NEW_BASEPTR, guest physical frame number of the PGD (L4)) can be + * issued. + * For 32-bit guests, the L4 is not used (as there is less pagetables), so + * instead use L3. + * At this point the pagetables can be modified using the MMU_NORMAL_PT_UPDATE + * hypercall. Also if so desired the OS can also try to write to the PTE + * and be trapped by the hypervisor (as the PTE entry is RO). + * + * To deallocate the pages, the operations are the reverse of the steps + * mentioned above. The argument is MMUEXT_UNPIN_TABLE for all levels and the + * pagetable MUST not be in use (meaning that the cr3 is not set to it). + * * ptr[1:0] == MMU_MACHPHYS_UPDATE: * Updates an entry in the machine->pseudo-physical mapping table. * ptr[:2] -- Machine address within the frame whose mapping to modify. @@ -119,6 +191,72 @@ * ptr[1:0] == MMU_PT_UPDATE_PRESERVE_AD: * As MMU_NORMAL_PT_UPDATE above, but A/D bits currently in the PTE are ORed * with those in @val. + * + * @val is usually the machine frame number along with some attributes. + * The attributes by default follow the architecture defined bits. Meaning that + * if this is a X86_64 machine and four page table layout is used, the layout + * of val is: + * - 63 if set means No execute (NX) + * - 46-13 the machine frame number + * - 12 available for guest + * - 11 available for guest + * - 10 available for guest + * - 9 available for guest + * - 8 global + * - 7 PAT (PSE is disabled, must use hypercall to make 4MB or 2MB pages) + * - 6 dirty + * - 5 accessed + * - 4 page cached disabled + * - 3 page write through + * - 2 userspace accessible + * - 1 writeable + * - 0 present + * + * The one bits that does not fit with the default layout is the PAGE_PSE + * also called PAGE_PAT). The MMUEXT_[UN]MARK_SUPER arguments to the + * HYPERVISOR_mmuext_op serve as mechanism to set a pagetable to be 4MB + * (or 2MB) instead of using the PAGE_PSE bit. + * + * The reason that the PAGE_PSE (bit 7) is not being utilized is due to Xen + * using it as the Page Attribute Table (PAT) bit - for details on it please + * refer to Intel SDM 10.12. The PAT allows to set the caching attributes of + * pages instead of using MTRRs. + * + * The PAT MSR is as follows (it is a 64-bit value, each entry is 8 bits): + * PAT4 PAT0 + * +-----+-----+----+----+----+-----+----+----+ + * | UC | UC- | WC | WB | UC | UC- | WC | WB | <= Linux + * +-----+-----+----+----+----+-----+----+----+ + * | UC | UC- | WT | WB | UC | UC- | WT | WB | <= BIOS (default when machine boots) + * +-----+-----+----+----+----+-----+----+----+ + * | rsv | rsv | WP | WC | UC | UC- | WT | WB | <= Xen + * +-----+-----+----+----+----+-----+----+----+ + * + * The lookup of this index table translates to looking up + * Bit 7, Bit 4, and Bit 3 of val entry: + * + * PAT/PSE (bit 7) ... PCD (bit 4) .. PWT (bit 3). + * + * If all bits are off, then we are using PAT0. If bit 3 turned on, + * then we are using PAT1, if bit 3 and bit 4, then PAT2.. + * + * As you can see, the Linux PAT1 translates to PAT4 under Xen. Which means + * that if a guest that follows Linux's PAT setup and would like to set Write + * Combined on pages it MUST use PAT4 entry. Meaning that Bit 7 (PAGE_PAT) is + * set. For example, under Linux it only uses PAT0, PAT1, and PAT2 for the + * caching as: + * + * WB = none (so PAT0) + * WC = PWT (bit 3 on) + * UC = PWT | PCD (bit 3 and 4 are on). + * + * To make it work with Xen, it needs to translate the WC bit as so: + * + * PWT (so bit 3 on) --> PAT (so bit 7 is on) and clear bit 3 + * + * And to translate back it would: + * + * PAT (bit 7 on) --> PWT (bit 3 on) and clear bit 7. */ #define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */ #define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */ @@ -127,7 +265,12 @@ /* * MMU EXTENDED OPERATIONS * - * HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures. + * enum neg_errnoval HYPERVISOR_mmuext_op(mmuext_op_t uops[], + * unsigned int count, + * unsigned int *pdone, + * unsigned int foreigndom) + */ +/* HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures. * A foreigndom (FD) can be specified (or DOMID_SELF for none). * Where the FD has some effect, it is described below. * @@ -164,9 +307,23 @@ * cmd: MMUEXT_FLUSH_CACHE * No additional arguments. Writes back and flushes cache contents. * + * cmd: MMUEXT_FLUSH_CACHE_GLOBAL + * No additional arguments. Writes back and flushes cache contents + * on all CPUs in the system. + * * cmd: MMUEXT_SET_LDT * linear_addr: Linear address of LDT base (NB. must be page-aligned). * nr_ents: Number of entries in LDT. + * + * cmd: MMUEXT_CLEAR_PAGE + * mfn: Machine frame number to be cleared. + * + * cmd: MMUEXT_COPY_PAGE + * mfn: Machine frame number of the destination page. + * src_mfn: Machine frame number of the source page. + * + * cmd: MMUEXT_[UN]MARK_SUPER + * mfn: Machine frame number of head of superpage to be [un]marked. */ #define MMUEXT_PIN_L1_TABLE 0 #define MMUEXT_PIN_L2_TABLE 1 @@ -183,12 +340,18 @@ #define MMUEXT_FLUSH_CACHE 12 #define MMUEXT_SET_LDT 13 #define MMUEXT_NEW_USER_BASEPTR 15 +#define MMUEXT_CLEAR_PAGE 16 +#define MMUEXT_COPY_PAGE 17 +#define MMUEXT_FLUSH_CACHE_GLOBAL 18 +#define MMUEXT_MARK_SUPER 19 +#define MMUEXT_UNMARK_SUPER 20 #ifndef __ASSEMBLY__ struct mmuext_op { unsigned int cmd; union { - /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR */ + /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR + * CLEAR_PAGE, COPY_PAGE, [UN]MARK_SUPER */ xen_pfn_t mfn; /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */ unsigned long linear_addr; @@ -198,6 +361,8 @@ struct mmuext_op { unsigned int nr_ents; /* TLB_FLUSH_MULTI, INVLPG_MULTI */ void *vcpumask; + /* COPY_PAGE */ + xen_pfn_t src_mfn; } arg2; }; DEFINE_GUEST_HANDLE_STRUCT(mmuext_op); @@ -225,10 +390,23 @@ DEFINE_GUEST_HANDLE_STRUCT(mmuext_op); */ #define VMASST_CMD_enable 0 #define VMASST_CMD_disable 1 + +/* x86/32 guests: simulate full 4GB segment limits. */ #define VMASST_TYPE_4gb_segments 0 + +/* x86/32 guests: trap (vector 15) whenever above vmassist is used. */ #define VMASST_TYPE_4gb_segments_notify 1 + +/* + * x86 guests: support writes to bottom-level PTEs. + * NB1. Page-directory entries cannot be written. + * NB2. Guest must continue to remove all writable mappings of PTEs. + */ #define VMASST_TYPE_writable_pagetables 2 + +/* x86/PAE guests: support PDPTs above 4GB. */ #define VMASST_TYPE_pae_extended_cr3 3 + #define MAX_VMASST_TYPE 3 #ifndef __ASSEMBLY__ @@ -260,6 +438,15 @@ typedef uint16_t domid_t; */ #define DOMID_XEN (0x7FF2U) +/* DOMID_COW is used as the owner of sharable pages */ +#define DOMID_COW (0x7FF3U) + +/* DOMID_INVALID is used to identify pages with unknown owner. */ +#define DOMID_INVALID (0x7FF4U) + +/* Idle domain. */ +#define DOMID_IDLE (0x7FFFU) + /* * Send an array of these to HYPERVISOR_mmu_update(). * NB. The fields are natural pointer/address size for this architecture. @@ -272,7 +459,9 @@ DEFINE_GUEST_HANDLE_STRUCT(mmu_update); /* * Send an array of these to HYPERVISOR_multicall(). - * NB. The fields are natural register size for this architecture. + * NB. The fields are logically the natural register size for this + * architecture. In cases where xen_ulong_t is larger than this then + * any unused bits in the upper portion must be zero. */ struct multicall_entry { xen_ulong_t op; @@ -442,8 +631,48 @@ struct start_info { unsigned long mod_start; /* VIRTUAL address of pre-loaded module. */ unsigned long mod_len; /* Size (bytes) of pre-loaded module. */ int8_t cmd_line[MAX_GUEST_CMDLINE]; + /* The pfn range here covers both page table and p->m table frames. */ + unsigned long first_p2m_pfn;/* 1st pfn forming initial P->M table. */ + unsigned long nr_p2m_frames;/* # of pfns forming initial P->M table. */ }; +/* These flags are passed in the 'flags' field of start_info_t. */ +#define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */ +#define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */ +#define SIF_MULTIBOOT_MOD (1<<2) /* Is mod_start a multiboot module? */ +#define SIF_MOD_START_PFN (1<<3) /* Is mod_start a PFN? */ +#define SIF_PM_MASK (0xFF<<8) /* reserve 1 byte for xen-pm options */ + +/* + * A multiboot module is a package containing modules very similar to a + * multiboot module array. The only differences are: + * - the array of module descriptors is by convention simply at the beginning + * of the multiboot module, + * - addresses in the module descriptors are based on the beginning of the + * multiboot module, + * - the number of modules is determined by a termination descriptor that has + * mod_start == 0. + * + * This permits to both build it statically and reference it in a configuration + * file, and let the PV guest easily rebase the addresses to virtual addresses + * and at the same time count the number of modules. + */ +struct xen_multiboot_mod_list { + /* Address of first byte of the module */ + uint32_t mod_start; + /* Address of last byte of the module (inclusive) */ + uint32_t mod_end; + /* Address of zero-terminated command line */ + uint32_t cmdline; + /* Unused, must be zero */ + uint32_t pad; +}; +/* + * The console structure in start_info.console.dom0 + * + * This structure includes a variety of information required to + * have a working VGA/VESA console. + */ struct dom0_vga_console_info { uint8_t video_type; #define XEN_VGATYPE_TEXT_MODE_3 0x03 @@ -484,11 +713,6 @@ struct dom0_vga_console_info { } u; }; -/* These flags are passed in the 'flags' field of start_info_t. */ -#define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */ -#define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */ -#define SIF_PM_MASK (0xFF<<8) /* reserve 1 byte for xen-pm options */ - typedef uint64_t cpumap_t; typedef uint8_t xen_domain_handle_t[16]; diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h index 0324c6d340c1..b78f21caf55a 100644 --- a/include/xen/xenbus.h +++ b/include/xen/xenbus.h @@ -86,6 +86,7 @@ struct xenbus_device_id /* A xenbus driver. */ struct xenbus_driver { + const char *name; /* defaults to ids[0].devicetype */ const struct xenbus_device_id *ids; int (*probe)(struct xenbus_device *dev, const struct xenbus_device_id *id); @@ -100,20 +101,22 @@ struct xenbus_driver { int (*is_ready)(struct xenbus_device *dev); }; -#define DEFINE_XENBUS_DRIVER(var, drvname, methods...) \ -struct xenbus_driver var ## _driver = { \ - .driver.name = drvname + 0 ?: var ## _ids->devicetype, \ - .driver.owner = THIS_MODULE, \ - .ids = var ## _ids, ## methods \ -} - static inline struct xenbus_driver *to_xenbus_driver(struct device_driver *drv) { return container_of(drv, struct xenbus_driver, driver); } -int __must_check xenbus_register_frontend(struct xenbus_driver *); -int __must_check xenbus_register_backend(struct xenbus_driver *); +int __must_check __xenbus_register_frontend(struct xenbus_driver *drv, + struct module *owner, + const char *mod_name); +int __must_check __xenbus_register_backend(struct xenbus_driver *drv, + struct module *owner, + const char *mod_name); + +#define xenbus_register_frontend(drv) \ + __xenbus_register_frontend(drv, THIS_MODULE, KBUILD_MODNAME); +#define xenbus_register_backend(drv) \ + __xenbus_register_backend(drv, THIS_MODULE, KBUILD_MODNAME); void xenbus_unregister_driver(struct xenbus_driver *drv); |