diff options
Diffstat (limited to 'include')
384 files changed, 11760 insertions, 5813 deletions
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index 9ed8b987185b..3f38eb03649c 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h @@ -223,6 +223,7 @@ static inline void atomic_dec(atomic_t *v) #define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) +#ifndef __atomic_add_unless static inline int __atomic_add_unless(atomic_t *v, int a, int u) { int c, old; @@ -231,5 +232,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) c = old; return c; } +#endif #endif /* __ASM_GENERIC_ATOMIC_H */ diff --git a/include/asm-generic/kprobes.h b/include/asm-generic/kprobes.h new file mode 100644 index 000000000000..57af9f21d148 --- /dev/null +++ b/include/asm-generic/kprobes.h @@ -0,0 +1,25 @@ +#ifndef _ASM_GENERIC_KPROBES_H +#define _ASM_GENERIC_KPROBES_H + +#if defined(__KERNEL__) && !defined(__ASSEMBLY__) +#ifdef CONFIG_KPROBES +/* + * Blacklist ganerating macro. Specify functions which is not probed + * by using this macro. + */ +# define __NOKPROBE_SYMBOL(fname) \ +static unsigned long __used \ + __attribute__((__section__("_kprobe_blacklist"))) \ + _kbl_addr_##fname = (unsigned long)fname; +# define NOKPROBE_SYMBOL(fname) __NOKPROBE_SYMBOL(fname) +/* Use this to forbid a kprobes attach on very low level functions */ +# define __kprobes __attribute__((__section__(".kprobes.text"))) +# define nokprobe_inline __always_inline +#else +# define NOKPROBE_SYMBOL(fname) +# define __kprobes +# define nokprobe_inline inline +#endif +#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */ + +#endif /* _ASM_GENERIC_KPROBES_H */ diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 18af2bcefe6a..f4ca23b158b3 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -36,6 +36,9 @@ extern int ptep_set_access_flags(struct vm_area_struct *vma, extern int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp, pmd_t entry, int dirty); +extern int pudp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pud_t *pudp, + pud_t entry, int dirty); #else static inline int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp, @@ -44,6 +47,13 @@ static inline int pmdp_set_access_flags(struct vm_area_struct *vma, BUILD_BUG(); return 0; } +static inline int pudp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pud_t *pudp, + pud_t entry, int dirty) +{ + BUILD_BUG(); + return 0; +} #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif @@ -121,8 +131,8 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, } #endif -#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR #ifdef CONFIG_TRANSPARENT_HUGEPAGE +#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long address, pmd_t *pmdp) @@ -131,20 +141,40 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, pmd_clear(pmdp); return pmd; } +#endif /* __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR */ +#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR +static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm, + unsigned long address, + pud_t *pudp) +{ + pud_t pud = *pudp; + + pud_clear(pudp); + return pud; +} +#endif /* __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ -#endif -#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL #ifdef CONFIG_TRANSPARENT_HUGEPAGE +#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm, unsigned long address, pmd_t *pmdp, int full) { return pmdp_huge_get_and_clear(mm, address, pmdp); } -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif +#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL +static inline pud_t pudp_huge_get_and_clear_full(struct mm_struct *mm, + unsigned long address, pud_t *pudp, + int full) +{ + return pudp_huge_get_and_clear(mm, address, pudp); +} +#endif +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long address, pte_t *ptep, @@ -181,6 +211,9 @@ extern pte_t ptep_clear_flush(struct vm_area_struct *vma, extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp); +extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, + unsigned long address, + pud_t *pudp); #endif #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT @@ -192,6 +225,30 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres } #endif +#ifndef pte_savedwrite +#define pte_savedwrite pte_write +#endif + +#ifndef pte_mk_savedwrite +#define pte_mk_savedwrite pte_mkwrite +#endif + +#ifndef pte_clear_savedwrite +#define pte_clear_savedwrite pte_wrprotect +#endif + +#ifndef pmd_savedwrite +#define pmd_savedwrite pmd_write +#endif + +#ifndef pmd_mk_savedwrite +#define pmd_mk_savedwrite pmd_mkwrite +#endif + +#ifndef pmd_clear_savedwrite +#define pmd_clear_savedwrite pmd_wrprotect +#endif + #ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline void pmdp_set_wrprotect(struct mm_struct *mm, @@ -208,6 +265,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif +#ifndef __HAVE_ARCH_PUDP_SET_WRPROTECT +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD +static inline void pudp_set_wrprotect(struct mm_struct *mm, + unsigned long address, pud_t *pudp) +{ + pud_t old_pud = *pudp; + + set_pud_at(mm, address, pudp, pud_wrprotect(old_pud)); +} +#else +static inline void pudp_set_wrprotect(struct mm_struct *mm, + unsigned long address, pud_t *pudp) +{ + BUILD_BUG(); +} +#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ +#endif #ifndef pmdp_collapse_flush #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -273,12 +347,23 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) { return pmd_val(pmd_a) == pmd_val(pmd_b); } + +static inline int pud_same(pud_t pud_a, pud_t pud_b) +{ + return pud_val(pud_a) == pud_val(pud_b); +} #else /* CONFIG_TRANSPARENT_HUGEPAGE */ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) { BUILD_BUG(); return 0; } + +static inline int pud_same(pud_t pud_a, pud_t pud_b) +{ + BUILD_BUG(); + return 0; +} #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif @@ -640,6 +725,15 @@ static inline int pmd_write(pmd_t pmd) #endif /* __HAVE_ARCH_PMD_WRITE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \ + (defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ + !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) +static inline int pud_trans_huge(pud_t pud) +{ + return 0; +} +#endif + #ifndef pmd_read_atomic static inline pmd_t pmd_read_atomic(pmd_t *pmdp) { @@ -785,8 +879,10 @@ static inline int pmd_clear_huge(pmd_t *pmd) * e.g. see arch/arc: flush_pmd_tlb_range */ #define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) +#define flush_pud_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) #else #define flush_pmd_tlb_range(vma, addr, end) BUILD_BUG() +#define flush_pud_tlb_range(vma, addr, end) BUILD_BUG() #endif #endif diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 7eed8cf3130a..4329bc6ef04b 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -232,6 +232,20 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \ } while (0) +/** + * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb + * invalidation. This is a nop so far, because only x86 needs it. + */ +#ifndef __tlb_remove_pud_tlb_entry +#define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0) +#endif + +#define tlb_remove_pud_tlb_entry(tlb, pudp, address) \ + do { \ + __tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE); \ + __tlb_remove_pud_tlb_entry(tlb, pudp, address); \ + } while (0) + /* * For things like page tables caches (ie caching addresses "inside" the * page tables, like x86 does), for legacy reasons, flushing an diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 404e9558e879..436c4c2683c7 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -191,9 +191,25 @@ static inline unsigned int crypto_queue_len(struct crypto_queue *queue) return queue->qlen; } -/* These functions require the input/output to be aligned as u32. */ void crypto_inc(u8 *a, unsigned int size); -void crypto_xor(u8 *dst, const u8 *src, unsigned int size); +void __crypto_xor(u8 *dst, const u8 *src, unsigned int size); + +static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size) +{ + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && + __builtin_constant_p(size) && + (size % sizeof(unsigned long)) == 0) { + unsigned long *d = (unsigned long *)dst; + unsigned long *s = (unsigned long *)src; + + while (size > 0) { + *d++ ^= *s++; + size -= sizeof(unsigned long); + } + } else { + __crypto_xor(dst, src, size); + } +} int blkcipher_walk_done(struct blkcipher_desc *desc, struct blkcipher_walk *walk, int err); @@ -344,13 +360,18 @@ static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb, return crypto_attr_alg(tb[1], type, mask); } +static inline int crypto_requires_off(u32 type, u32 mask, u32 off) +{ + return (type ^ off) & mask & off; +} + /* * Returns CRYPTO_ALG_ASYNC if type/mask requires the use of sync algorithms. * Otherwise returns zero. */ static inline int crypto_requires_sync(u32 type, u32 mask) { - return (type ^ CRYPTO_ALG_ASYNC) & mask & CRYPTO_ALG_ASYNC; + return crypto_requires_off(type, mask, CRYPTO_ALG_ASYNC); } noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size); diff --git a/include/crypto/chacha20.h b/include/crypto/chacha20.h index 20d20f681a72..445fc45f4b5b 100644 --- a/include/crypto/chacha20.h +++ b/include/crypto/chacha20.h @@ -5,6 +5,7 @@ #ifndef _CRYPTO_CHACHA20_H #define _CRYPTO_CHACHA20_H +#include <crypto/skcipher.h> #include <linux/types.h> #include <linux/crypto.h> @@ -18,9 +19,8 @@ struct chacha20_ctx { void chacha20_block(u32 *state, void *stream); void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv); -int crypto_chacha20_setkey(struct crypto_tfm *tfm, const u8 *key, +int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keysize); -int crypto_chacha20_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes); +int crypto_chacha20_crypt(struct skcipher_request *req); #endif diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 216a2b876147..b5727bcd2336 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -329,6 +329,16 @@ static inline unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm) return crypto_hash_alg_common(tfm)->digestsize; } +/** + * crypto_ahash_statesize() - obtain size of the ahash state + * @tfm: cipher handle + * + * Return the size of the ahash state. With the crypto_ahash_export() + * function, the caller can export the state into a buffer whose size is + * defined with this function. + * + * Return: size of the ahash state + */ static inline unsigned int crypto_ahash_statesize(struct crypto_ahash *tfm) { return crypto_hash_alg_common(tfm)->statesize; @@ -369,11 +379,7 @@ static inline struct crypto_ahash *crypto_ahash_reqtfm( * crypto_ahash_reqsize() - obtain size of the request data structure * @tfm: cipher handle * - * Return the size of the ahash state size. With the crypto_ahash_export - * function, the caller can export the state into a buffer whose size is - * defined with this function. - * - * Return: size of the ahash state + * Return: size of the request data */ static inline unsigned int crypto_ahash_reqsize(struct crypto_ahash *tfm) { @@ -453,7 +459,7 @@ int crypto_ahash_digest(struct ahash_request *req); * * This function exports the hash state of the ahash_request handle into the * caller-allocated output buffer out which must have sufficient size (e.g. by - * calling crypto_ahash_reqsize). + * calling crypto_ahash_statesize()). * * Return: 0 if the export was successful; < 0 if an error occurred */ diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index 8735979ed341..e42f7063f245 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h @@ -66,7 +66,7 @@ struct skcipher_walk { int flags; unsigned int blocksize; - unsigned int chunksize; + unsigned int stride; unsigned int alignmask; }; diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index 750b14f1ada4..562001cb412b 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -115,6 +115,9 @@ struct crypto_skcipher { * IV of exactly that size to perform the encrypt or decrypt operation. * @chunksize: Equal to the block size except for stream ciphers such as * CTR where it is set to the underlying block size. + * @walksize: Equal to the chunk size except in cases where the algorithm is + * considerably more efficient if it can operate on multiple chunks + * in parallel. Should be a multiple of chunksize. * @base: Definition of a generic crypto algorithm. * * All fields except @ivsize are mandatory and must be filled. @@ -131,6 +134,7 @@ struct skcipher_alg { unsigned int max_keysize; unsigned int ivsize; unsigned int chunksize; + unsigned int walksize; struct crypto_alg base; }; @@ -289,6 +293,19 @@ static inline unsigned int crypto_skcipher_alg_chunksize( return alg->chunksize; } +static inline unsigned int crypto_skcipher_alg_walksize( + struct skcipher_alg *alg) +{ + if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_BLKCIPHER) + return alg->base.cra_blocksize; + + if (alg->base.cra_ablkcipher.encrypt) + return alg->base.cra_blocksize; + + return alg->walksize; +} + /** * crypto_skcipher_chunksize() - obtain chunk size * @tfm: cipher handle @@ -307,6 +324,23 @@ static inline unsigned int crypto_skcipher_chunksize( } /** + * crypto_skcipher_walksize() - obtain walk size + * @tfm: cipher handle + * + * In some cases, algorithms can only perform optimally when operating on + * multiple blocks in parallel. This is reflected by the walksize, which + * must be a multiple of the chunksize (or equal if the concern does not + * apply) + * + * Return: walk size in bytes + */ +static inline unsigned int crypto_skcipher_walksize( + struct crypto_skcipher *tfm) +{ + return crypto_skcipher_alg_walksize(crypto_skcipher_alg(tfm)); +} + +/** * crypto_skcipher_blocksize() - obtain block size of cipher * @tfm: cipher handle * diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h index bae79f3c4d28..b080a171a23f 100644 --- a/include/drm/bridge/dw_hdmi.h +++ b/include/drm/bridge/dw_hdmi.h @@ -27,6 +27,16 @@ enum dw_hdmi_devtype { RK3288_HDMI, }; +enum dw_hdmi_phy_type { + DW_HDMI_PHY_DWC_HDMI_TX_PHY = 0x00, + DW_HDMI_PHY_DWC_MHL_PHY_HEAC = 0xb2, + DW_HDMI_PHY_DWC_MHL_PHY = 0xc2, + DW_HDMI_PHY_DWC_HDMI_3D_TX_PHY_HEAC = 0xe2, + DW_HDMI_PHY_DWC_HDMI_3D_TX_PHY = 0xf2, + DW_HDMI_PHY_DWC_HDMI20_TX_PHY = 0xf3, + DW_HDMI_PHY_VENDOR_PHY = 0xfe, +}; + struct dw_hdmi_mpll_config { unsigned long mpixelclock; struct { @@ -56,10 +66,11 @@ struct dw_hdmi_plat_data { struct drm_display_mode *mode); }; -void dw_hdmi_unbind(struct device *dev, struct device *master, void *data); -int dw_hdmi_bind(struct device *dev, struct device *master, - void *data, struct drm_encoder *encoder, - struct resource *iores, int irq, +int dw_hdmi_probe(struct platform_device *pdev, + const struct dw_hdmi_plat_data *plat_data); +void dw_hdmi_remove(struct platform_device *pdev); +void dw_hdmi_unbind(struct device *dev); +int dw_hdmi_bind(struct platform_device *pdev, struct drm_encoder *encoder, const struct dw_hdmi_plat_data *plat_data); void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate); diff --git a/include/drm/bridge/mhl.h b/include/drm/bridge/mhl.h index 3629b2734db6..fbdfc8d7f3c7 100644 --- a/include/drm/bridge/mhl.h +++ b/include/drm/bridge/mhl.h @@ -15,6 +15,8 @@ #ifndef __MHL_H__ #define __MHL_H__ +#include <linux/types.h> + /* Device Capabilities Registers */ enum { MHL_DCAP_DEV_STATE, @@ -288,4 +290,87 @@ enum { /* Unsupported/unrecognized key code */ #define MHL_UCPE_STATUS_INEFFECTIVE_KEY_CODE 0x01 +enum mhl_burst_id { + MHL_BURST_ID_3D_VIC = 0x10, + MHL_BURST_ID_3D_DTD = 0x11, + MHL_BURST_ID_HEV_VIC = 0x20, + MHL_BURST_ID_HEV_DTDA = 0x21, + MHL_BURST_ID_HEV_DTDB = 0x22, + MHL_BURST_ID_VC_ASSIGN = 0x38, + MHL_BURST_ID_VC_CONFIRM = 0x39, + MHL_BURST_ID_AUD_DELAY = 0x40, + MHL_BURST_ID_ADT_BURSTID = 0x41, + MHL_BURST_ID_BIST_SETUP = 0x51, + MHL_BURST_ID_BIST_RETURN_STAT = 0x52, + MHL_BURST_ID_EMSC_SUPPORT = 0x61, + MHL_BURST_ID_HID_PAYLOAD = 0x62, + MHL_BURST_ID_BLK_RCV_BUFFER_INFO = 0x63, + MHL_BURST_ID_BITS_PER_PIXEL_FMT = 0x64, +}; + +struct mhl_burst_blk_rcv_buffer_info { + __be16 id; + __le16 size; +} __packed; + +struct mhl3_burst_header { + __be16 id; + u8 checksum; + u8 total_entries; + u8 sequence_index; +} __packed; + +struct mhl_burst_bits_per_pixel_fmt { + struct mhl3_burst_header hdr; + u8 num_entries; + struct { + u8 stream_id; + u8 pixel_format; + } __packed desc[0]; +} __packed; + +struct mhl_burst_emsc_support { + struct mhl3_burst_header hdr; + u8 num_entries; + __be16 burst_id[0]; +} __packed; + +struct mhl_burst_audio_descr { + struct mhl3_burst_header hdr; + u8 flags; + u8 short_desc[9]; +} __packed; + +/* + * MHL3 infoframe related definitions + */ + +#define MHL3_IEEE_OUI 0x7ca61d +#define MHL3_INFOFRAME_SIZE 15 + +enum mhl3_video_format { + MHL3_VIDEO_FORMAT_NONE, + MHL3_VIDEO_FORMAT_3D, + MHL3_VIDEO_FORMAT_MULTI_VIEW, + MHL3_VIDEO_FORMAT_DUAL_3D +}; + +enum mhl3_3d_format_type { + MHL3_3D_FORMAT_TYPE_FS, /* frame sequential */ + MHL3_3D_FORMAT_TYPE_TB, /* top-bottom */ + MHL3_3D_FORMAT_TYPE_LR, /* left-right */ + MHL3_3D_FORMAT_TYPE_FS_TB, /* frame sequential, top-bottom */ + MHL3_3D_FORMAT_TYPE_FS_LR, /* frame sequential, left-right */ + MHL3_3D_FORMAT_TYPE_TB_LR /* top-bottom, left-right */ +}; + +struct mhl3_infoframe { + unsigned char version; + enum mhl3_video_format video_format; + enum mhl3_3d_format_type format_type; + bool sep_audio; + int hev_format; + int av_delay; +}; + #endif /* __MHL_H__ */ diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 9c4ee144b5f6..6105c050d7bc 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -360,6 +360,7 @@ struct drm_ioctl_desc { /* Event queued up for userspace to read */ struct drm_pending_event { struct completion *completion; + void (*completion_release)(struct completion *completion); struct drm_event *event; struct dma_fence *fence; struct list_head link; @@ -635,6 +636,19 @@ struct drm_device { int switch_power_state; }; +/** + * drm_drv_uses_atomic_modeset - check if the driver implements + * atomic_commit() + * @dev: DRM device + * + * This check is useful if drivers do not have DRIVER_ATOMIC set but + * have atomic modesetting internally implemented. + */ +static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev) +{ + return dev->mode_config.funcs->atomic_commit != NULL; +} + #include <drm/drm_irq.h> #define DRM_SWITCH_POWER_ON 0 @@ -718,11 +732,6 @@ int drm_noop(struct drm_device *dev, void *data, int drm_invalid_op(struct drm_device *dev, void *data, struct drm_file *file_priv); -/* Cache management (drm_cache.c) */ -void drm_clflush_pages(struct page *pages[], unsigned long num_pages); -void drm_clflush_sg(struct sg_table *st); -void drm_clflush_virt_range(void *addr, unsigned long length); - /* * These are exported to drivers so that they can implement fencing using * DMA quiscent + idle. DMA quiescent usually requires the hardware lock. @@ -782,21 +791,6 @@ extern void drm_sysfs_hotplug_event(struct drm_device *dev); /*@}*/ -/* PCI section */ -static __inline__ int drm_pci_device_is_agp(struct drm_device *dev) -{ - if (dev->driver->device_is_agp != NULL) { - int err = (*dev->driver->device_is_agp) (dev); - - if (err != 2) { - return err; - } - } - - return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP); -} -void drm_pci_agp_destroy(struct drm_device *dev); - extern int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver); extern void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver); #ifdef CONFIG_PCI diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index 56814e8ae7ea..052ab161b239 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h @@ -123,7 +123,8 @@ struct drm_crtc_commit { /** * @commit_entry: * - * Entry on the per-CRTC commit_list. Protected by crtc->commit_lock. + * Entry on the per-CRTC &drm_crtc.commit_list. Protected by + * $drm_crtc.commit_lock. */ struct list_head commit_entry; @@ -145,6 +146,7 @@ struct __drm_crtcs_state { struct drm_crtc_state *state; struct drm_crtc_commit *commit; s32 __user *out_fence_ptr; + unsigned last_vblank_count; }; struct __drm_connnectors_state { @@ -188,12 +190,31 @@ struct drm_atomic_state { struct work_struct commit_work; }; -void drm_crtc_commit_put(struct drm_crtc_commit *commit); +void __drm_crtc_commit_free(struct kref *kref); + +/** + * drm_crtc_commit_get - acquire a reference to the CRTC commit + * @commit: CRTC commit + * + * Increases the reference of @commit. + */ static inline void drm_crtc_commit_get(struct drm_crtc_commit *commit) { kref_get(&commit->ref); } +/** + * drm_crtc_commit_put - release a reference to the CRTC commmit + * @commit: CRTC commit + * + * This releases a reference to @commit which is freed after removing the + * final reference. No locking required and callable from any context. + */ +static inline void drm_crtc_commit_put(struct drm_crtc_commit *commit) +{ + kref_put(&commit->ref, __drm_crtc_commit_free); +} + struct drm_atomic_state * __must_check drm_atomic_state_alloc(struct drm_device *dev); void drm_atomic_state_clear(struct drm_atomic_state *state); @@ -369,12 +390,6 @@ int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state); void drm_state_dump(struct drm_device *dev, struct drm_printer *p); -#ifdef CONFIG_DEBUG_FS -struct drm_minor; -int drm_atomic_debugfs_init(struct drm_minor *minor); -int drm_atomic_debugfs_cleanup(struct drm_minor *minor); -#endif - #define for_each_connector_in_state(__state, connector, connector_state, __i) \ for ((__i) = 0; \ (__i) < (__state)->num_connector && \ @@ -403,7 +418,7 @@ int drm_atomic_debugfs_cleanup(struct drm_minor *minor); * drm_atomic_crtc_needs_modeset - compute combined modeset need * @state: &drm_crtc_state for the CRTC * - * To give drivers flexibility struct &drm_crtc_state has 3 booleans to track + * To give drivers flexibility &struct drm_crtc_state has 3 booleans to track * whether the state CRTC changed enough to need a full modeset cycle: * connectors_changed, mode_changed and active_changed. This helper simply * combines these three to compute the overall need for a modeset for @state. @@ -415,7 +430,8 @@ int drm_atomic_debugfs_cleanup(struct drm_minor *minor); * * For example if the CRTC mode has changed, and the hardware is able to enact * the requested mode change without going through a full modeset, the driver - * should clear mode_changed during its ->atomic_check. + * should clear mode_changed in its &drm_mode_config_funcs.atomic_check + * implementation. */ static inline bool drm_atomic_crtc_needs_modeset(const struct drm_crtc_state *state) @@ -424,5 +440,4 @@ drm_atomic_crtc_needs_modeset(const struct drm_crtc_state *state) state->connectors_changed; } - #endif /* DRM_ATOMIC_H_ */ diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h index 7ff92b09fd9c..d066e9491ae3 100644 --- a/include/drm/drm_atomic_helper.h +++ b/include/drm/drm_atomic_helper.h @@ -48,9 +48,6 @@ int drm_atomic_helper_commit(struct drm_device *dev, int drm_atomic_helper_wait_for_fences(struct drm_device *dev, struct drm_atomic_state *state, bool pre_swap); -bool drm_atomic_helper_framebuffer_changed(struct drm_device *dev, - struct drm_atomic_state *old_state, - struct drm_crtc *crtc); void drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, struct drm_atomic_state *old_state); @@ -124,6 +121,12 @@ int drm_atomic_helper_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_pending_vblank_event *event, uint32_t flags); +int drm_atomic_helper_page_flip_target( + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t flags, + uint32_t target); int drm_atomic_helper_connector_dpms(struct drm_connector *connector, int mode); struct drm_encoder * @@ -174,7 +177,8 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, * * This iterates over the current state, useful (for example) when applying * atomic state after it has been checked and swapped. To iterate over the - * planes which *will* be attached (for ->atomic_check()) see + * planes which *will* be attached (more useful in code called from + * &drm_mode_config_funcs.atomic_check) see * drm_atomic_crtc_state_for_each_plane(). */ #define drm_atomic_crtc_for_each_plane(plane, crtc) \ @@ -186,8 +190,9 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, * @crtc_state: the incoming crtc-state * * Similar to drm_crtc_for_each_plane(), but iterates the planes that will be - * attached if the specified state is applied. Useful during (for example) - * ->atomic_check() operations, to validate the incoming state. + * attached if the specified state is applied. Useful during for example + * in code called from &drm_mode_config_funcs.atomic_check operations, to + * validate the incoming state. */ #define drm_atomic_crtc_state_for_each_plane(plane, crtc_state) \ drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask) @@ -199,8 +204,9 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, * @crtc_state: the incoming crtc-state * * Similar to drm_crtc_for_each_plane(), but iterates the planes that will be - * attached if the specified state is applied. Useful during (for example) - * ->atomic_check() operations, to validate the incoming state. + * attached if the specified state is applied. Useful during for example + * in code called from &drm_mode_config_funcs.atomic_check operations, to + * validate the incoming state. * * Compared to just drm_atomic_crtc_state_for_each_plane() this also fills in a * const plane_state. This is useful when a driver just wants to peek at other diff --git a/include/drm/drm_auth.h b/include/drm/drm_auth.h index 610223b0481b..1eb4a52cad8d 100644 --- a/include/drm/drm_auth.h +++ b/include/drm/drm_auth.h @@ -33,10 +33,7 @@ * * @refcount: Refcount for this master object. * @dev: Link back to the DRM device - * @unique: Unique identifier: e.g. busid. Protected by drm_global_mutex. - * @unique_len: Length of unique field. Protected by drm_global_mutex. - * @magic_map: Map of used authentication tokens. Protected by struct_mutex. - * @lock: DRI lock information. + * @lock: DRI1 lock information. * @driver_priv: Pointer to driver-private information. * * Note that master structures are only relevant for the legacy/primary device @@ -45,8 +42,20 @@ struct drm_master { struct kref refcount; struct drm_device *dev; + /** + * @unique: Unique identifier: e.g. busid. Protected by + * &drm_device.master_mutex. + */ char *unique; + /** + * @unique_len: Length of unique field. Protected by + * &drm_device.master_mutex. + */ int unique_len; + /** + * @magic_map: Map of used authentication tokens. Protected by + * &drm_device.master_mutex. + */ struct idr magic_map; struct drm_lock_data lock; void *driver_priv; diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h index 530a1d6e8cde..fdd82fcbf168 100644 --- a/include/drm/drm_bridge.h +++ b/include/drm/drm_bridge.h @@ -96,9 +96,10 @@ struct drm_bridge_funcs { * This callback should disable the bridge. It is called right before * the preceding element in the display pipe is disabled. If the * preceding element is a bridge this means it's called before that - * bridge's ->disable() function. If the preceding element is a - * &drm_encoder it's called right before the encoder's ->disable(), - * ->prepare() or ->dpms() hook from struct &drm_encoder_helper_funcs. + * bridge's @disable vfunc. If the preceding element is a &drm_encoder + * it's called right before the &drm_encoder_helper_funcs.disable, + * &drm_encoder_helper_funcs.prepare or &drm_encoder_helper_funcs.dpms + * hook. * * The bridge can assume that the display pipe (i.e. clocks and timing * signals) feeding it is still running when this callback is called. @@ -110,12 +111,13 @@ struct drm_bridge_funcs { /** * @post_disable: * - * This callback should disable the bridge. It is called right after - * the preceding element in the display pipe is disabled. If the - * preceding element is a bridge this means it's called after that - * bridge's ->post_disable() function. If the preceding element is a - * &drm_encoder it's called right after the encoder's ->disable(), - * ->prepare() or ->dpms() hook from struct &drm_encoder_helper_funcs. + * This callback should disable the bridge. It is called right after the + * preceding element in the display pipe is disabled. If the preceding + * element is a bridge this means it's called after that bridge's + * @post_disable function. If the preceding element is a &drm_encoder + * it's called right after the encoder's + * &drm_encoder_helper_funcs.disable, &drm_encoder_helper_funcs.prepare + * or &drm_encoder_helper_funcs.dpms hook. * * The bridge must assume that the display pipe (i.e. clocks and timing * singals) feeding it is no longer running when this callback is @@ -129,9 +131,11 @@ struct drm_bridge_funcs { * @mode_set: * * This callback should set the given mode on the bridge. It is called - * after the ->mode_set() callback for the preceding element in the - * display pipeline has been called already. The display pipe (i.e. - * clocks and timing signals) is off when this function is called. + * after the @mode_set callback for the preceding element in the display + * pipeline has been called already. If the bridge is the first element + * then this would be &drm_encoder_helper_funcs.mode_set. The display + * pipe (i.e. clocks and timing signals) is off when this function is + * called. */ void (*mode_set)(struct drm_bridge *bridge, struct drm_display_mode *mode, @@ -142,9 +146,10 @@ struct drm_bridge_funcs { * This callback should enable the bridge. It is called right before * the preceding element in the display pipe is enabled. If the * preceding element is a bridge this means it's called before that - * bridge's ->pre_enable() function. If the preceding element is a - * &drm_encoder it's called right before the encoder's ->enable(), - * ->commit() or ->dpms() hook from struct &drm_encoder_helper_funcs. + * bridge's @pre_enable function. If the preceding element is a + * &drm_encoder it's called right before the encoder's + * &drm_encoder_helper_funcs.enable, &drm_encoder_helper_funcs.commit or + * &drm_encoder_helper_funcs.dpms hook. * * The display pipe (i.e. clocks and timing signals) feeding this bridge * will not yet be running when this callback is called. The bridge must @@ -161,9 +166,10 @@ struct drm_bridge_funcs { * This callback should enable the bridge. It is called right after * the preceding element in the display pipe is enabled. If the * preceding element is a bridge this means it's called after that - * bridge's ->enable() function. If the preceding element is a - * &drm_encoder it's called right after the encoder's ->enable(), - * ->commit() or ->dpms() hook from struct &drm_encoder_helper_funcs. + * bridge's @enable function. If the preceding element is a + * &drm_encoder it's called right after the encoder's + * &drm_encoder_helper_funcs.enable, &drm_encoder_helper_funcs.commit or + * &drm_encoder_helper_funcs.dpms hook. * * The bridge can assume that the display pipe (i.e. clocks and timing * signals) feeding it is running when this callback is called. This @@ -201,8 +207,8 @@ struct drm_bridge { int drm_bridge_add(struct drm_bridge *bridge); void drm_bridge_remove(struct drm_bridge *bridge); struct drm_bridge *of_drm_find_bridge(struct device_node *np); -int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge); -void drm_bridge_detach(struct drm_bridge *bridge); +int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge, + struct drm_bridge *previous); bool drm_bridge_mode_fixup(struct drm_bridge *bridge, const struct drm_display_mode *mode, diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h index cebecff536a3..beab0f0d0cfb 100644 --- a/include/drm/drm_cache.h +++ b/include/drm/drm_cache.h @@ -33,7 +33,11 @@ #ifndef _DRM_CACHE_H_ #define _DRM_CACHE_H_ +#include <linux/scatterlist.h> + void drm_clflush_pages(struct page *pages[], unsigned long num_pages); +void drm_clflush_sg(struct sg_table *st); +void drm_clflush_virt_range(void *addr, unsigned long length); static inline bool drm_arch_can_wc_memory(void) { diff --git a/include/drm/drm_color_mgmt.h b/include/drm/drm_color_mgmt.h index c767238ac9d5..bce4a532836d 100644 --- a/include/drm/drm_color_mgmt.h +++ b/include/drm/drm_color_mgmt.h @@ -25,6 +25,8 @@ #include <linux/ctype.h> +uint32_t drm_color_lut_extract(uint32_t user_input, uint32_t bit_precision); + void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc, uint degamma_lut_size, bool has_ctm, @@ -33,29 +35,4 @@ void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc, int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, int gamma_size); -/** - * drm_color_lut_extract - clamp&round LUT entries - * @user_input: input value - * @bit_precision: number of bits the hw LUT supports - * - * Extract a degamma/gamma LUT value provided by user (in the form of - * &drm_color_lut entries) and round it to the precision supported by the - * hardware. - */ -static inline uint32_t drm_color_lut_extract(uint32_t user_input, - uint32_t bit_precision) -{ - uint32_t val = user_input; - uint32_t max = 0xffff >> (16 - bit_precision); - - /* Round only if we're not using full precision. */ - if (bit_precision < 16) { - val += 1UL << (16 - bit_precision - 1); - val >>= 16 - bit_precision; - } - - return clamp_val(val, 0, max); -} - - #endif diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index 045a97cbeba2..e5e1eddd19fb 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -94,7 +94,7 @@ enum subpixel_order { * * Describes a given display (e.g. CRT or flat panel) and its limitations. For * fixed display sinks like built-in panels there's not much difference between - * this and struct &drm_connector. But for sinks with a real cable this + * this and &struct drm_connector. But for sinks with a real cable this * structure is meant to describe all the things at the other end of the cable. * * For sinks which provide an EDID this can be filled out by calling @@ -117,7 +117,7 @@ struct drm_display_info { /** * @pixel_clock: Maximum pixel clock supported by the sink, in units of - * 100Hz. This mismatches the clok in &drm_display_mode (which is in + * 100Hz. This mismatches the clock in &drm_display_mode (which is in * kHZ), because that's what the EDID uses as base unit. */ unsigned int pixel_clock; @@ -331,15 +331,15 @@ struct drm_connector_funcs { * * Entry point for output detection and basic mode validation. The * driver should reprobe the output if needed (e.g. when hotplug - * handling is unreliable), add all detected modes to connector->modes + * handling is unreliable), add all detected modes to &drm_connector.modes * and filter out any the device can't support in any configuration. It * also needs to filter out any modes wider or higher than the * parameters max_width and max_height indicate. * * The drivers must also prune any modes no longer valid from - * connector->modes. Furthermore it must update connector->status and - * connector->edid. If no EDID has been received for this output - * connector->edid must be NULL. + * &drm_connector.modes. Furthermore it must update + * &drm_connector.status and &drm_connector.edid. If no EDID has been + * received for this output connector->edid must be NULL. * * Drivers using the probe helpers should use * drm_helper_probe_single_connector_modes() or @@ -348,7 +348,7 @@ struct drm_connector_funcs { * * RETURNS: * - * The number of modes detected and filled into connector->modes. + * The number of modes detected and filled into &drm_connector.modes. */ int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height); @@ -381,7 +381,7 @@ struct drm_connector_funcs { * core drm connector interfaces. Everything added from this callback * should be unregistered in the early_unregister callback. * - * This is called while holding drm_connector->mutex. + * This is called while holding &drm_connector.mutex. * * Returns: * @@ -398,7 +398,7 @@ struct drm_connector_funcs { * early in the driver unload sequence to disable userspace access * before data structures are torndown. * - * This is called while holding drm_connector->mutex. + * This is called while holding &drm_connector.mutex. */ void (*early_unregister)(struct drm_connector *connector); @@ -418,17 +418,17 @@ struct drm_connector_funcs { * Duplicate the current atomic state for this connector and return it. * The core and helpers guarantee that any atomic state duplicated with * this hook and still owned by the caller (i.e. not transferred to the - * driver by calling ->atomic_commit() from struct - * &drm_mode_config_funcs) will be cleaned up by calling the - * @atomic_destroy_state hook in this structure. + * driver by calling &drm_mode_config_funcs.atomic_commit) will be + * cleaned up by calling the @atomic_destroy_state hook in this + * structure. * - * Atomic drivers which don't subclass struct &drm_connector_state should use + * Atomic drivers which don't subclass &struct drm_connector_state should use * drm_atomic_helper_connector_duplicate_state(). Drivers that subclass the * state structure to extend it with driver-private state should use * __drm_atomic_helper_connector_duplicate_state() to make sure shared state is * duplicated in a consistent fashion across drivers. * - * It is an error to call this hook before connector->state has been + * It is an error to call this hook before &drm_connector.state has been * initialized correctly. * * NOTE: @@ -525,7 +525,7 @@ struct drm_connector_funcs { /** * @atomic_print_state: * - * If driver subclasses struct &drm_connector_state, it should implement + * If driver subclasses &struct drm_connector_state, it should implement * this optional hook for printing additional driver specific state. * * Do not call this directly, use drm_atomic_connector_print_state() @@ -563,9 +563,6 @@ struct drm_cmdline_mode { * @interlace_allowed: can this connector handle interlaced modes? * @doublescan_allowed: can this connector handle doublescan? * @stereo_allowed: can this connector handle stereo modes? - * @modes: modes available on this connector (from fill_modes() + user) - * @status: one of the drm_connector_status enums (connected, not, or unknown) - * @probed_modes: list of modes derived directly from the display * @funcs: connector control functions * @edid_blob_ptr: DRM property containing EDID if present * @properties: property tracking for this connector @@ -612,8 +609,8 @@ struct drm_connector { /** * @mutex: Lock for general connector state, but currently only protects - * @registered. Most of the connector state is still protected by the - * mutex in &drm_mode_config. + * @registered. Most of the connector state is still protected by + * &drm_mode_config.mutex. */ struct mutex mutex; @@ -635,19 +632,37 @@ struct drm_connector { * Protected by @mutex. */ bool registered; - struct list_head modes; /* list of modes on this connector */ + /** + * @modes: + * Modes available on this connector (from fill_modes() + user). + * Protected by &drm_mode_config.mutex. + */ + struct list_head modes; + + /** + * @status: + * One of the drm_connector_status enums (connected, not, or unknown). + * Protected by &drm_mode_config.mutex. + */ enum drm_connector_status status; - /* these are modes added by probing with DDC or the BIOS */ + /** + * @probed_modes: + * These are modes added by probing with DDC or the BIOS, before + * filtering is applied. Used by the probe helpers. Protected by + * &drm_mode_config.mutex. + */ struct list_head probed_modes; /** * @display_info: Display information is filled from EDID information * when a display is detected. For non hot-pluggable displays such as * flat panels in embedded systems, the driver should initialize the - * display_info.width_mm and display_info.height_mm fields with the - * physical size of the display. + * &drm_display_info.width_mm and &drm_display_info.height_mm fields + * with the physical size of the display. + * + * Protected by &drm_mode_config.mutex. */ struct drm_display_info display_info; const struct drm_connector_funcs *funcs; @@ -853,12 +868,46 @@ void drm_mode_put_tile_group(struct drm_device *dev, * @dev: the DRM device * * Iterate over all connectors of @dev. + * + * WARNING: + * + * This iterator is not safe against hotadd/removal of connectors and is + * deprecated. Use drm_for_each_connector_iter() instead. */ #define drm_for_each_connector(connector, dev) \ - for (assert_drm_connector_list_read_locked(&(dev)->mode_config), \ - connector = list_first_entry(&(dev)->mode_config.connector_list, \ - struct drm_connector, head); \ - &connector->head != (&(dev)->mode_config.connector_list); \ - connector = list_next_entry(connector, head)) + list_for_each_entry(connector, &(dev)->mode_config.connector_list, head) + +/** + * struct drm_connector_list_iter - connector_list iterator + * + * This iterator tracks state needed to be able to walk the connector_list + * within struct drm_mode_config. Only use together with + * drm_connector_list_iter_get(), drm_connector_list_iter_put() and + * drm_connector_list_iter_next() respectively the convenience macro + * drm_for_each_connector_iter(). + */ +struct drm_connector_list_iter { +/* private: */ + struct drm_device *dev; + struct drm_connector *conn; +}; + +void drm_connector_list_iter_get(struct drm_device *dev, + struct drm_connector_list_iter *iter); +struct drm_connector * +drm_connector_list_iter_next(struct drm_connector_list_iter *iter); +void drm_connector_list_iter_put(struct drm_connector_list_iter *iter); + +/** + * drm_for_each_connector_iter - connector_list iterator macro + * @connector: &struct drm_connector pointer used as cursor + * @iter: &struct drm_connector_list_iter + * + * Note that @connector is only valid within the list body, if you want to use + * @connector after calling drm_connector_list_iter_put() then you need to grab + * your own reference first using drm_connector_reference(). + */ +#define drm_for_each_connector_iter(connector, iter) \ + while ((connector = drm_connector_list_iter_next(iter))) #endif diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 946672f97e1e..8f0b195e4a59 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -39,7 +39,6 @@ #include <drm/drm_framebuffer.h> #include <drm/drm_modes.h> #include <drm/drm_connector.h> -#include <drm/drm_encoder.h> #include <drm/drm_property.h> #include <drm/drm_bridge.h> #include <drm/drm_edid.h> @@ -68,14 +67,12 @@ static inline uint64_t I642U64(int64_t val) } struct drm_crtc; -struct drm_encoder; struct drm_pending_vblank_event; struct drm_plane; struct drm_bridge; struct drm_atomic_state; struct drm_crtc_helper_funcs; -struct drm_encoder_helper_funcs; struct drm_plane_helper_funcs; /** @@ -84,8 +81,8 @@ struct drm_plane_helper_funcs; * @enable: whether the CRTC should be enabled, gates all other state * @active: whether the CRTC is actively displaying (used for DPMS) * @planes_changed: planes on this crtc are updated - * @mode_changed: crtc_state->mode or crtc_state->enable has been changed - * @active_changed: crtc_state->active has been toggled. + * @mode_changed: @mode or @enable has been changed + * @active_changed: @active has been toggled. * @connectors_changed: connectors to this crtc have been updated * @zpos_changed: zpos values of planes on this crtc have been updated * @color_mgmt_changed: color management properties have changed (degamma or @@ -93,8 +90,6 @@ struct drm_plane_helper_funcs; * @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes * @connector_mask: bitmask of (1 << drm_connector_index(connector)) of attached connectors * @encoder_mask: bitmask of (1 << drm_encoder_index(encoder)) of attached encoders - * @last_vblank_count: for helpers and drivers to capture the vblank of the - * update to ensure framebuffer cleanup isn't done too early * @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings * @mode: current mode timings * @mode_blob: &drm_property_blob for @mode @@ -107,9 +102,10 @@ struct drm_plane_helper_funcs; * * Note that the distinction between @enable and @active is rather subtile: * Flipping @active while @enable is set without changing anything else may - * never return in a failure from the ->atomic_check callback. Userspace assumes - * that a DPMS On will always succeed. In other words: @enable controls resource - * assignment, @active controls the actual hardware state. + * never return in a failure from the &drm_mode_config_funcs.atomic_check + * callback. Userspace assumes that a DPMS On will always succeed. In other + * words: @enable controls resource assignment, @active controls the actual + * hardware state. * * The three booleans active_changed, connectors_changed and mode_changed are * intended to indicate whether a full modeset is needed, rather than strictly @@ -140,9 +136,6 @@ struct drm_crtc_state { u32 connector_mask; u32 encoder_mask; - /* last_vblank_count: for vblank waits before cleanup */ - u32 last_vblank_count; - /* adjusted_mode: for use by helpers and drivers */ struct drm_display_mode adjusted_mode; @@ -157,6 +150,15 @@ struct drm_crtc_state { struct drm_property_blob *gamma_lut; /** + * @target_vblank: + * + * Target vertical blank period when a page flip + * should take effect. + */ + + u32 target_vblank; + + /** * @event: * * Optional pointer to a DRM event to signal upon completion of the @@ -323,7 +325,7 @@ struct drm_crtc_funcs { * * This is the main legacy entry point to change the modeset state on a * CRTC. All the details of the desired configuration are passed in a - * struct &drm_mode_set - see there for details. + * &struct drm_mode_set - see there for details. * * Drivers implementing atomic modeset should use * drm_atomic_helper_set_config() to implement this hook. @@ -345,8 +347,8 @@ struct drm_crtc_funcs { * through the DRM_MODE_PAGE_FLIP_ASYNC flag). When an application * requests a page flip the DRM core verifies that the new frame buffer * is large enough to be scanned out by the CRTC in the currently - * configured mode and then calls the CRTC ->page_flip() operation with a - * pointer to the new frame buffer. + * configured mode and then calls this hook with a pointer to the new + * frame buffer. * * The driver must wait for any pending rendering to the new framebuffer * to complete before executing the flip. It should also wait for any @@ -354,7 +356,7 @@ struct drm_crtc_funcs { * shared dma-buf. * * An application can request to be notified when the page flip has - * completed. The drm core will supply a struct &drm_event in the event + * completed. The drm core will supply a &struct drm_event in the event * parameter in this case. This can be handled by the * drm_crtc_send_vblank_event() function, which the driver should call on * the provided event upon completion of the flip. Note that if @@ -381,7 +383,7 @@ struct drm_crtc_funcs { * RETURNS: * * 0 on success or a negative error code on failure. Note that if a - * ->page_flip() operation is already pending the callback should return + * page flip operation is already pending the callback should return * -EBUSY. Pageflips on a disabled CRTC (either by setting a NULL mode * or just runtime disabled through DPMS respectively the new atomic * "ACTIVE" state) should result in an -EINVAL error code. Note that @@ -433,19 +435,19 @@ struct drm_crtc_funcs { * @atomic_duplicate_state: * * Duplicate the current atomic state for this CRTC and return it. - * The core and helpers gurantee that any atomic state duplicated with + * The core and helpers guarantee that any atomic state duplicated with * this hook and still owned by the caller (i.e. not transferred to the - * driver by calling ->atomic_commit() from struct - * &drm_mode_config_funcs) will be cleaned up by calling the - * @atomic_destroy_state hook in this structure. + * driver by calling &drm_mode_config_funcs.atomic_commit) will be + * cleaned up by calling the @atomic_destroy_state hook in this + * structure. * - * Atomic drivers which don't subclass struct &drm_crtc should use + * Atomic drivers which don't subclass &struct drm_crtc_state should use * drm_atomic_helper_crtc_duplicate_state(). Drivers that subclass the * state structure to extend it with driver-private state should use * __drm_atomic_helper_crtc_duplicate_state() to make sure shared state is * duplicated in a consistent fashion across drivers. * - * It is an error to call this hook before crtc->state has been + * It is an error to call this hook before &drm_crtc.state has been * initialized correctly. * * NOTE: @@ -558,7 +560,7 @@ struct drm_crtc_funcs { * * This optional hook should be used to unregister the additional * userspace interfaces attached to the crtc from - * late_unregister(). It is called from drm_dev_unregister(), + * @late_register. It is called from drm_dev_unregister(), * early in the driver unload sequence to disable userspace access * before data structures are torndown. */ @@ -591,7 +593,7 @@ struct drm_crtc_funcs { /** * @atomic_print_state: * - * If driver subclasses struct &drm_crtc_state, it should implement + * If driver subclasses &struct drm_crtc_state, it should implement * this optional hook for printing additional driver specific state. * * Do not call this directly, use drm_atomic_crtc_print_state() @@ -639,8 +641,8 @@ struct drm_crtc { * * This provides a read lock for the overall crtc state (mode, dpms * state, ...) and a write lock for everything which can be update - * without a full modeset (fb, cursor data, crtc properties ...). Full - * modeset also need to grab dev->mode_config.connection_mutex. + * without a full modeset (fb, cursor data, crtc properties ...). A full + * modeset also need to grab &drm_mode_config.connection_mutex. */ struct drm_modeset_lock mutex; @@ -772,10 +774,8 @@ struct drm_crtc { * @connectors: array of connectors to drive with this CRTC if possible * @num_connectors: size of @connectors array * - * Represents a single crtc the connectors that it drives with what mode - * and from which framebuffer it scans out from. - * - * This is used to set modes. + * This represents a modeset configuration for the legacy SETCRTC ioctl and is + * also used internally. Atomic drivers instead use &drm_atomic_state. */ struct drm_mode_set { struct drm_framebuffer *fb; @@ -824,14 +824,21 @@ static inline uint32_t drm_crtc_mask(const struct drm_crtc *crtc) return 1 << drm_crtc_index(crtc); } -void drm_crtc_get_hv_timing(const struct drm_display_mode *mode, - int *hdisplay, int *vdisplay); int drm_crtc_force_disable(struct drm_crtc *crtc); int drm_crtc_force_disable_all(struct drm_device *dev); int drm_mode_set_config_internal(struct drm_mode_set *set); +struct drm_crtc *drm_crtc_from_index(struct drm_device *dev, int idx); -/* Helpers */ +/** + * drm_crtc_find - look up a CRTC object from its ID + * @dev: DRM device + * @id: &drm_mode_object ID + * + * This can be used to look up a CRTC from its userspace ID. Only used by + * drivers for legacy IOCTLs and interface, nowadays extensions to the KMS + * userspace interface should be done using &drm_property. + */ static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev, uint32_t id) { @@ -840,21 +847,14 @@ static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev, return mo ? obj_to_crtc(mo) : NULL; } +/** + * drm_for_each_crtc - iterate over all CRTCs + * @crtc: a &struct drm_crtc as the loop cursor + * @dev: the &struct drm_device + * + * Iterate over all CRTCs of @dev. + */ #define drm_for_each_crtc(crtc, dev) \ list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head) -static inline void -assert_drm_connector_list_read_locked(struct drm_mode_config *mode_config) -{ - /* - * The connector hotadd/remove code currently grabs both locks when - * updating lists. Hence readers need only hold either of them to be - * safe and the check amounts to - * - * WARN_ON(not_holding(A) && not_holding(B)). - */ - WARN_ON(!mutex_is_locked(&mode_config->mutex) && - !drm_modeset_is_locked(&mode_config->connection_mutex)); -} - #endif /* __DRM_CRTC_H__ */ diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h index 982c299e435a..d026f5017c33 100644 --- a/include/drm/drm_crtc_helper.h +++ b/include/drm/drm_crtc_helper.h @@ -73,6 +73,5 @@ extern void drm_kms_helper_hotplug_event(struct drm_device *dev); extern void drm_kms_helper_poll_disable(struct drm_device *dev); extern void drm_kms_helper_poll_enable(struct drm_device *dev); -extern void drm_kms_helper_poll_enable_locked(struct drm_device *dev); #endif diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index 55bbeb0ff594..04681359a6f5 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -194,7 +194,8 @@ # define DP_PSR_SETUP_TIME_0 (6 << 1) # define DP_PSR_SETUP_TIME_MASK (7 << 1) # define DP_PSR_SETUP_TIME_SHIFT 1 - +# define DP_PSR2_SU_Y_COORDINATE_REQUIRED (1 << 4) /* eDP 1.4a */ +# define DP_PSR2_SU_GRANULARITY_REQUIRED (1 << 5) /* eDP 1.4b */ /* * 0x80-0x8f describe downstream port capabilities, but there are two layouts * based on whether DP_DETAILED_CAP_INFO_AVAILABLE was set. If it was not, @@ -568,6 +569,16 @@ #define DP_RECEIVER_ALPM_STATUS 0x200b /* eDP 1.4 */ # define DP_ALPM_LOCK_TIMEOUT_ERROR (1 << 0) +#define DP_DPRX_FEATURE_ENUMERATION_LIST 0x2210 /* DP 1.3 */ +# define DP_GTC_CAP (1 << 0) /* DP 1.3 */ +# define DP_SST_SPLIT_SDP_CAP (1 << 1) /* DP 1.4 */ +# define DP_AV_SYNC_CAP (1 << 2) /* DP 1.3 */ +# define DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED (1 << 3) /* DP 1.3 */ +# define DP_VSC_EXT_VESA_SDP_SUPPORTED (1 << 4) /* DP 1.4 */ +# define DP_VSC_EXT_VESA_SDP_CHAINING_SUPPORTED (1 << 5) /* DP 1.4 */ +# define DP_VSC_EXT_CEA_SDP_SUPPORTED (1 << 6) /* DP 1.4 */ +# define DP_VSC_EXT_CEA_SDP_CHAINING_SUPPORTED (1 << 7) /* DP 1.4 */ + /* DP 1.2 Sideband message defines */ /* peer device type - DP 1.2a Table 2-92 */ #define DP_PEER_DEVICE_NONE 0x0 diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h index 003207670597..f4b4d154b98e 100644 --- a/include/drm/drm_dp_mst_helper.h +++ b/include/drm/drm_dp_mst_helper.h @@ -414,7 +414,7 @@ struct drm_dp_mst_topology_mgr { /** * @dev: device pointer for adding i2c devices etc. */ - struct device *dev; + struct drm_device *dev; /** * @cbs: callbacks for connector addition and destruction. */ @@ -493,8 +493,8 @@ struct drm_dp_mst_topology_mgr { int total_pbn; /** - * @qlock: protects @tx_msg_downq, the tx_slots in struct - * &drm_dp_mst_branch and txmsg->state once they are queued + * @qlock: protects @tx_msg_downq, the &drm_dp_mst_branch.txslost and + * &drm_dp_sideband_msg_tx.state once they are queued */ struct mutex qlock; /** @@ -508,8 +508,7 @@ struct drm_dp_mst_topology_mgr { struct mutex payload_lock; /** * @proposed_vcpis: Array of pointers for the new VCPI allocation. The - * VCPI structure itself is embedded into the corresponding - * &drm_dp_mst_port structure. + * VCPI structure itself is &drm_dp_mst_port.vcpi. */ struct drm_dp_vcpi **proposed_vcpis; /** @@ -556,7 +555,10 @@ struct drm_dp_mst_topology_mgr { struct work_struct destroy_connector_work; }; -int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, struct device *dev, struct drm_dp_aux *aux, int max_dpcd_transaction_bytes, int max_payloads, int conn_base_id); +int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, + struct drm_device *dev, struct drm_dp_aux *aux, + int max_dpcd_transaction_bytes, + int max_payloads, int conn_base_id); void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr); diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index c4fc49583dc0..5699f42195fe 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -64,16 +64,55 @@ struct drm_mode_create_dumb; * structure for GEM drivers. */ struct drm_driver { + + /** + * @load: + * + * Backward-compatible driver callback to complete + * initialization steps after the driver is registered. For + * this reason, may suffer from race conditions and its use is + * deprecated for new drivers. It is therefore only supported + * for existing drivers not yet converted to the new scheme. + * See drm_dev_init() and drm_dev_register() for proper and + * race-free way to set up a &struct drm_device. + * + * Returns: + * + * Zero on success, non-zero value on failure. + */ int (*load) (struct drm_device *, unsigned long flags); - int (*firstopen) (struct drm_device *); int (*open) (struct drm_device *, struct drm_file *); void (*preclose) (struct drm_device *, struct drm_file *file_priv); void (*postclose) (struct drm_device *, struct drm_file *); void (*lastclose) (struct drm_device *); - int (*unload) (struct drm_device *); - int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv); - int (*dma_quiescent) (struct drm_device *); - int (*context_dtor) (struct drm_device *dev, int context); + + /** + * @unload: + * + * Reverse the effects of the driver load callback. Ideally, + * the clean up performed by the driver should happen in the + * reverse order of the initialization. Similarly to the load + * hook, this handler is deprecated and its usage should be + * dropped in favor of an open-coded teardown function at the + * driver layer. See drm_dev_unregister() and drm_dev_unref() + * for the proper way to remove a &struct drm_device. + * + * The unload() hook is called right after unregistering + * the device. + * + */ + void (*unload) (struct drm_device *); + + /** + * @release: + * + * Optional callback for destroying device data after the final + * reference is released, i.e. the device is being destroyed. Drivers + * using this callback are responsible for calling drm_dev_fini() + * to finalize the device and then freeing the struct themselves. + */ + void (*release) (struct drm_device *); + int (*set_busid)(struct drm_device *dev, struct drm_master *master); /** @@ -119,20 +158,6 @@ struct drm_driver { void (*disable_vblank) (struct drm_device *dev, unsigned int pipe); /** - * @device_is_agp: - * - * Called by drm_device_is_agp(). Typically used to determine if a card - * is really attached to AGP or not. - * - * Returns: - * - * One of three values is returned depending on whether or not the - * card is absolutely not AGP (return of 0), absolutely is AGP - * (return of 1), or may or may not be AGP (return of 2). - */ - int (*device_is_agp) (struct drm_device *dev); - - /** * @get_scanout_position: * * Called by vblank timestamping code. @@ -282,7 +307,7 @@ struct drm_driver { /** * @gem_free_object_unlocked: deconstructor for drm_gem_objects * - * This is for drivers which are not encumbered with dev->struct_mutex + * This is for drivers which are not encumbered with &drm_device.struct_mutex * legacy locking schemes. Use this hook instead of @gem_free_object. */ void (*gem_free_object_unlocked) (struct drm_gem_object *obj); @@ -327,9 +352,6 @@ struct drm_driver { int (*gem_prime_mmap)(struct drm_gem_object *obj, struct vm_area_struct *vma); - /* vga arb irq handler */ - void (*vgaarb_irq)(struct drm_device *dev, bool state); - /** * @dumb_create: * @@ -398,13 +420,20 @@ struct drm_driver { char *date; u32 driver_features; - int dev_priv_size; const struct drm_ioctl_desc *ioctls; int num_ioctls; const struct file_operations *fops; + /* Everything below here is for legacy driver, never use! */ + /* private: */ + /* List of devices hanging off this driver with stealth attach. */ struct list_head legacy_dev_list; + int (*firstopen) (struct drm_device *); + int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv); + int (*dma_quiescent) (struct drm_device *); + int (*context_dtor) (struct drm_device *dev, int context); + int dev_priv_size; }; extern __printf(6, 7) @@ -419,6 +448,8 @@ extern unsigned int drm_debug; int drm_dev_init(struct drm_device *dev, struct drm_driver *driver, struct device *parent); +void drm_dev_fini(struct drm_device *dev); + struct drm_device *drm_dev_alloc(struct drm_driver *driver, struct device *parent); int drm_dev_register(struct drm_device *dev, unsigned long flags); diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index 38eabf65f19d..577d5063e63d 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -24,6 +24,7 @@ #define __DRM_EDID_H__ #include <linux/types.h> +#include <linux/hdmi.h> struct drm_device; struct i2c_adapter; @@ -248,6 +249,7 @@ struct detailed_timing { # define DRM_ELD_AUD_SYNCH_DELAY_MAX 0xfa /* 500 ms */ #define DRM_ELD_SPEAKER 7 +# define DRM_ELD_SPEAKER_MASK 0x7f # define DRM_ELD_SPEAKER_RLRC (1 << 6) # define DRM_ELD_SPEAKER_FLRC (1 << 5) # define DRM_ELD_SPEAKER_RC (1 << 4) @@ -322,8 +324,6 @@ struct cea_sad { struct drm_encoder; struct drm_connector; struct drm_display_mode; -struct hdmi_avi_infoframe; -struct hdmi_vendor_infoframe; void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid); int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads); @@ -346,6 +346,11 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, int drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame, const struct drm_display_mode *mode); +void +drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame, + const struct drm_display_mode *mode, + enum hdmi_quantization_range rgb_quant_range, + bool rgb_quant_range_selectable); /** * drm_eld_mnl - Get ELD monitor name length in bytes. @@ -414,6 +419,18 @@ static inline int drm_eld_size(const uint8_t *eld) } /** + * drm_eld_get_spk_alloc - Get speaker allocation + * @eld: pointer to an ELD memory structure + * + * The returned value is the speakers mask. User has to use %DRM_ELD_SPEAKER + * field definitions to identify speakers. + */ +static inline u8 drm_eld_get_spk_alloc(const uint8_t *eld) +{ + return eld[DRM_ELD_SPEAKER] & DRM_ELD_SPEAKER_MASK; +} + +/** * drm_eld_get_conn_type - Get device type hdmi/dp connected * @eld: pointer to an ELD memory structure * @@ -442,6 +459,8 @@ enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code); bool drm_detect_hdmi_monitor(struct edid *edid); bool drm_detect_monitor_audio(struct edid *edid); bool drm_rgb_quant_range_selectable(struct edid *edid); +enum hdmi_quantization_range +drm_default_rgb_quant_range(const struct drm_display_mode *mode); int drm_add_modes_noedid(struct drm_connector *connector, int hdisplay, int vdisplay); void drm_set_preferred_mode(struct drm_connector *connector, diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h index c7438ff0d609..8d8245ec0181 100644 --- a/include/drm/drm_encoder.h +++ b/include/drm/drm_encoder.h @@ -25,8 +25,12 @@ #include <linux/list.h> #include <linux/ctype.h> +#include <drm/drm_crtc.h> +#include <drm/drm_mode.h> #include <drm/drm_mode_object.h> +struct drm_encoder; + /** * struct drm_encoder_funcs - encoder controls * @@ -71,7 +75,7 @@ struct drm_encoder_funcs { * * This optional hook should be used to unregister the additional * userspace interfaces attached to the encoder from - * late_unregister(). It is called from drm_dev_unregister(), + * @late_register. It is called from drm_dev_unregister(), * early in the driver unload sequence to disable userspace access * before data structures are torndown. */ @@ -188,9 +192,6 @@ static inline unsigned int drm_encoder_index(struct drm_encoder *encoder) return encoder->index; } -/* FIXME: We have an include file mess still, drm_crtc.h needs untangling. */ -static inline uint32_t drm_crtc_mask(const struct drm_crtc *crtc); - /** * drm_encoder_crtc_ok - can a given crtc drive a given encoder? * @encoder: encoder to test diff --git a/include/drm/drm_encoder_slave.h b/include/drm/drm_encoder_slave.h index 82cdf611393d..1107b4b1c599 100644 --- a/include/drm/drm_encoder_slave.h +++ b/include/drm/drm_encoder_slave.h @@ -29,6 +29,7 @@ #include <drm/drmP.h> #include <drm/drm_crtc.h> +#include <drm/drm_encoder.h> /** * struct drm_encoder_slave_funcs - Entry points exposed by a slave encoder driver diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h index 3b00f6480b83..a5ecc0a58260 100644 --- a/include/drm/drm_fb_cma_helper.h +++ b/include/drm/drm_fb_cma_helper.h @@ -16,19 +16,17 @@ struct drm_plane; struct drm_plane_state; struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev, - unsigned int preferred_bpp, unsigned int num_crtc, - unsigned int max_conn_count, const struct drm_fb_helper_funcs *funcs); + unsigned int preferred_bpp, unsigned int max_conn_count, + const struct drm_framebuffer_funcs *funcs); struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev, - unsigned int preferred_bpp, unsigned int num_crtc, - unsigned int max_conn_count); + unsigned int preferred_bpp, unsigned int max_conn_count); void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma); void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma); void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma); void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, int state); -int drm_fbdev_cma_create_with_funcs(struct drm_fb_helper *helper, - struct drm_fb_helper_surface_size *sizes, - const struct drm_framebuffer_funcs *funcs); +void drm_fbdev_cma_set_suspend_unlocked(struct drm_fbdev_cma *fbdev_cma, + int state); void drm_fb_cma_destroy(struct drm_framebuffer *fb); int drm_fb_cma_create_handle(struct drm_framebuffer *fb, diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h index 975deedd593e..6f5acebb266a 100644 --- a/include/drm/drm_fb_helper.h +++ b/include/drm/drm_fb_helper.h @@ -181,7 +181,7 @@ struct drm_fb_helper_connector { * * This is the main structure used by the fbdev helpers. Drivers supporting * fbdev emulation should embedded this into their overall driver structure. - * Drivers must also fill out a struct &drm_fb_helper_funcs with a few + * Drivers must also fill out a &struct drm_fb_helper_funcs with a few * operations. */ struct drm_fb_helper { @@ -236,8 +236,7 @@ struct drm_fb_helper { void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper, const struct drm_fb_helper_funcs *funcs); int drm_fb_helper_init(struct drm_device *dev, - struct drm_fb_helper *helper, int crtc_count, - int max_conn); + struct drm_fb_helper *helper, int max_conn); void drm_fb_helper_fini(struct drm_fb_helper *helper); int drm_fb_helper_blank(int blank, struct fb_info *info); int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, @@ -295,8 +294,7 @@ struct drm_display_mode * drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, int width, int height); struct drm_display_mode * -drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn, - int width, int height); +drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn); int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector); int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, @@ -309,7 +307,7 @@ static inline void drm_fb_helper_prepare(struct drm_device *dev, } static inline int drm_fb_helper_init(struct drm_device *dev, - struct drm_fb_helper *helper, int crtc_count, + struct drm_fb_helper *helper, int max_conn) { return 0; diff --git a/include/drm/drm_flip_work.h b/include/drm/drm_flip_work.h index d387cf06ae05..21c3d512d25c 100644 --- a/include/drm/drm_flip_work.h +++ b/include/drm/drm_flip_work.h @@ -54,7 +54,7 @@ typedef void (*drm_flip_func_t)(struct drm_flip_work *work, void *val); /** * struct drm_flip_task - flip work task * @node: list entry element - * @data: data to pass to work->func + * @data: data to pass to &drm_flip_work.func */ struct drm_flip_task { struct list_head node; diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h index a232e7f0c869..dd1e3e99dcff 100644 --- a/include/drm/drm_framebuffer.h +++ b/include/drm/drm_framebuffer.h @@ -40,8 +40,8 @@ struct drm_framebuffer_funcs { * * Clean up framebuffer resources, specifically also unreference the * backing storage. The core guarantees to call this function for every - * framebuffer successfully created by ->fb_create() in - * &drm_mode_config_funcs. Drivers must also call + * framebuffer successfully created by calling + * &drm_mode_config_funcs.fb_create. Drivers must also call * drm_framebuffer_cleanup() to release DRM core resources for this * framebuffer. */ @@ -51,7 +51,7 @@ struct drm_framebuffer_funcs { * @create_handle: * * Create a buffer handle in the driver-specific buffer manager (either - * GEM or TTM) valid for the passed-in struct &drm_file. This is used by + * GEM or TTM) valid for the passed-in &struct drm_file. This is used by * the core to implement the GETFB IOCTL, which returns (for * sufficiently priviledged user) also a native buffer handle. This can * be used for seamless transitions between modesetting clients by @@ -112,8 +112,8 @@ struct drm_framebuffer { */ struct drm_device *dev; /** - * @head: Place on the dev->mode_config.fb_list, access protected by - * dev->mode_config.fb_lock. + * @head: Place on the &drm_mode_config.fb_list, access protected by + * &drm_mode_config.fb_lock. */ struct list_head head; @@ -122,6 +122,10 @@ struct drm_framebuffer { */ struct drm_mode_object base; /** + * @format: framebuffer format information + */ + const struct drm_format_info *format; + /** * @funcs: framebuffer vfunc table */ const struct drm_framebuffer_funcs *funcs; @@ -145,7 +149,7 @@ struct drm_framebuffer { * * This should not be used to specifiy x/y pixel offsets into the buffer * data (even for linear buffers). Specifying an x/y pixel offset is - * instead done through the source rectangle in struct &drm_plane_state. + * instead done through the source rectangle in &struct drm_plane_state. */ unsigned int offsets[4]; /** @@ -166,28 +170,11 @@ struct drm_framebuffer { */ unsigned int height; /** - * @depth: Depth in bits per pixel for RGB formats. 0 for everything - * else. Legacy information derived from @pixel_format, it's suggested to use - * the DRM FOURCC codes and helper functions directly instead. - */ - unsigned int depth; - /** - * @bits_per_pixel: Storage used bits per pixel for RGB formats. 0 for - * everything else. Legacy information derived from @pixel_format, it's - * suggested to use the DRM FOURCC codes and helper functions directly - * instead. - */ - int bits_per_pixel; - /** * @flags: Framebuffer flags like DRM_MODE_FB_INTERLACED or * DRM_MODE_FB_MODIFIERS. */ int flags; /** - * @pixel_format: DRM FOURCC code describing the pixel format. - */ - uint32_t pixel_format; /* fourcc format */ - /** * @hot_x: X coordinate of the cursor hotspot. Used by the legacy cursor * IOCTL when the driver supports cursor through a DRM_PLANE_TYPE_CURSOR * universal plane. @@ -200,8 +187,7 @@ struct drm_framebuffer { */ int hot_y; /** - * @filp_head: Placed on struct &drm_file fbs list_head, protected by - * fbs_lock in the same structure. + * @filp_head: Placed on &drm_file.fbs, protected by &drm_file.fbs_lock. */ struct list_head filp_head; }; @@ -273,8 +259,8 @@ static inline void drm_framebuffer_assign(struct drm_framebuffer **p, * @fb: the loop cursor * @dev: the DRM device * - * Iterate over all framebuffers of @dev. User must hold the fb_lock from - * &drm_mode_config. + * Iterate over all framebuffers of @dev. User must hold + * &drm_mode_config.fb_lock. */ #define drm_for_each_fb(fb, dev) \ for (WARN_ON(!mutex_is_locked(&(dev)->mode_config.fb_lock)), \ @@ -282,4 +268,10 @@ static inline void drm_framebuffer_assign(struct drm_framebuffer **p, struct drm_framebuffer, head); \ &fb->head != (&(dev)->mode_config.fb_list); \ fb = list_next_entry(fb, head)) + +int drm_framebuffer_plane_width(int width, + const struct drm_framebuffer *fb, int plane); +int drm_framebuffer_plane_height(int height, + const struct drm_framebuffer *fb, int plane); + #endif diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index 9f63736e6163..449a41b56ffc 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -63,7 +63,7 @@ struct drm_gem_object { * drops to 0 any global names (e.g. the id in the flink namespace) will * be cleared. * - * Protected by dev->object_name_lock. + * Protected by &drm_device.object_name_lock. */ unsigned handle_count; @@ -106,8 +106,8 @@ struct drm_gem_object { * @name: * * Global name for this object, starts at 1. 0 means unnamed. - * Access is covered by dev->object_name_lock. This is used by the GEM_FLINK - * and GEM_OPEN ioctls. + * Access is covered by &drm_device.object_name_lock. This is used by + * the GEM_FLINK and GEM_OPEN ioctls. */ int name; @@ -150,7 +150,7 @@ struct drm_gem_object { * through importing or exporting). We break the resulting reference * loop when the last gem handle for this object is released. * - * Protected by obj->object_name_lock. + * Protected by &drm_device.object_name_lock. */ struct dma_buf *dma_buf; @@ -163,7 +163,7 @@ struct drm_gem_object { * attachment point for the device. This is invariant over the lifetime * of a gem object. * - * The driver's ->gem_free_object callback is responsible for cleaning + * The &drm_driver.gem_free_object callback is responsible for cleaning * up the dma_buf attachment and references acquired at import time. * * Note that the drm gem/prime core does not depend upon drivers setting @@ -204,7 +204,7 @@ drm_gem_object_reference(struct drm_gem_object *obj) * @obj: GEM buffer object * * This function is meant to be used by drivers which are not encumbered with - * dev->struct_mutex legacy locking and which are using the + * &drm_device.struct_mutex legacy locking and which are using the * gem_free_object_unlocked callback. It avoids all the locking checks and * locking overhead of drm_gem_object_unreference() and * drm_gem_object_unreference_unlocked(). @@ -212,8 +212,8 @@ drm_gem_object_reference(struct drm_gem_object *obj) * Drivers should never call this directly in their code. Instead they should * wrap it up into a ``driver_gem_object_unreference(struct driver_gem_object * *obj)`` wrapper function, and use that. Shared code should never call this, to - * avoid breaking drivers by accident which still depend upon dev->struct_mutex - * locking. + * avoid breaking drivers by accident which still depend upon + * &drm_device.struct_mutex locking. */ static inline void __drm_gem_object_unreference(struct drm_gem_object *obj) diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h index acd6af8a8e67..2abcd5190cc1 100644 --- a/include/drm/drm_gem_cma_helper.h +++ b/include/drm/drm_gem_cma_helper.h @@ -53,6 +53,23 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, extern const struct vm_operations_struct drm_gem_cma_vm_ops; +#ifndef CONFIG_MMU +unsigned long drm_gem_cma_get_unmapped_area(struct file *filp, + unsigned long addr, + unsigned long len, + unsigned long pgoff, + unsigned long flags); +#else +static inline unsigned long drm_gem_cma_get_unmapped_area(struct file *filp, + unsigned long addr, + unsigned long len, + unsigned long pgoff, + unsigned long flags) +{ + return -EINVAL; +} +#endif + #ifdef CONFIG_DEBUG_FS void drm_gem_cma_describe(struct drm_gem_cma_object *obj, struct seq_file *m); #endif diff --git a/include/drm/drm_irq.h b/include/drm/drm_irq.h index 293d08caab60..2fb880462a57 100644 --- a/include/drm/drm_irq.h +++ b/include/drm/drm_irq.h @@ -51,8 +51,8 @@ struct drm_pending_vblank_event { * * Note that for historical reasons - the vblank handling code is still shared * with legacy/non-kms drivers - this is a free-standing structure not directly - * connected to struct &drm_crtc. But all public interface functions are taking - * a struct &drm_crtc to hide this implementation detail. + * connected to &struct drm_crtc. But all public interface functions are taking + * a &struct drm_crtc to hide this implementation detail. */ struct drm_vblank_crtc { /** @@ -67,7 +67,7 @@ struct drm_vblank_crtc { * @disable_timer: Disable timer for the delayed vblank disabling * hysteresis logic. Vblank disabling is controlled through the * drm_vblank_offdelay module option and the setting of the - * max_vblank_count value in the &drm_device structure. + * &drm_device.max_vblank_count value. */ struct timer_list disable_timer; @@ -92,7 +92,7 @@ struct drm_vblank_crtc { */ atomic_t refcount; /* number of users of vblank interruptsper crtc */ /** - * @last: Protected by dev->vbl_lock, used for wraparound handling. + * @last: Protected by &drm_device.vbl_lock, used for wraparound handling. */ u32 last; /** diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h index 0b8371795aeb..2ef16bf25826 100644 --- a/include/drm/drm_mm.h +++ b/include/drm/drm_mm.h @@ -1,6 +1,7 @@ /************************************************************************** * * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA. + * Copyright 2016 Intel Corporation * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -39,50 +40,132 @@ #include <linux/bug.h> #include <linux/rbtree.h> #include <linux/kernel.h> +#include <linux/mm_types.h> #include <linux/list.h> #include <linux/spinlock.h> -#ifdef CONFIG_DEBUG_FS -#include <linux/seq_file.h> -#endif #ifdef CONFIG_DRM_DEBUG_MM #include <linux/stackdepot.h> #endif +#include <drm/drm_print.h> -enum drm_mm_search_flags { - DRM_MM_SEARCH_DEFAULT = 0, - DRM_MM_SEARCH_BEST = 1 << 0, - DRM_MM_SEARCH_BELOW = 1 << 1, -}; +#ifdef CONFIG_DRM_DEBUG_MM +#define DRM_MM_BUG_ON(expr) BUG_ON(expr) +#else +#define DRM_MM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr) +#endif -enum drm_mm_allocator_flags { - DRM_MM_CREATE_DEFAULT = 0, - DRM_MM_CREATE_TOP = 1 << 0, -}; +/** + * enum drm_mm_insert_mode - control search and allocation behaviour + * + * The &struct drm_mm range manager supports finding a suitable modes using + * a number of search trees. These trees are oranised by size, by address and + * in most recent eviction order. This allows the user to find either the + * smallest hole to reuse, the lowest or highest address to reuse, or simply + * reuse the most recent eviction that fits. When allocating the &drm_mm_node + * from within the hole, the &drm_mm_insert_mode also dictate whether to + * allocate the lowest matching address or the highest. + */ +enum drm_mm_insert_mode { + /** + * @DRM_MM_INSERT_BEST: + * + * Search for the smallest hole (within the search range) that fits + * the desired node. + * + * Allocates the node from the bottom of the found hole. + */ + DRM_MM_INSERT_BEST = 0, + + /** + * @DRM_MM_INSERT_LOW: + * + * Search for the lowest hole (address closest to 0, within the search + * range) that fits the desired node. + * + * Allocates the node from the bottom of the found hole. + */ + DRM_MM_INSERT_LOW, -#define DRM_MM_BOTTOMUP DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT -#define DRM_MM_TOPDOWN DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP + /** + * @DRM_MM_INSERT_HIGH: + * + * Search for the highest hole (address closest to U64_MAX, within the + * search range) that fits the desired node. + * + * Allocates the node from the *top* of the found hole. The specified + * alignment for the node is applied to the base of the node + * (&drm_mm_node.start). + */ + DRM_MM_INSERT_HIGH, + /** + * @DRM_MM_INSERT_EVICT: + * + * Search for the most recently evicted hole (within the search range) + * that fits the desired node. This is appropriate for use immediately + * after performing an eviction scan (see drm_mm_scan_init()) and + * removing the selected nodes to form a hole. + * + * Allocates the node from the bottom of the found hole. + */ + DRM_MM_INSERT_EVICT, +}; + +/** + * struct drm_mm_node - allocated block in the DRM allocator + * + * This represents an allocated block in a &drm_mm allocator. Except for + * pre-reserved nodes inserted using drm_mm_reserve_node() the structure is + * entirely opaque and should only be accessed through the provided funcions. + * Since allocation of these nodes is entirely handled by the driver they can be + * embedded. + */ struct drm_mm_node { - struct list_head node_list; - struct list_head hole_stack; - struct rb_node rb; - unsigned hole_follows : 1; - unsigned scanned_block : 1; - unsigned scanned_prev_free : 1; - unsigned scanned_next_free : 1; - unsigned scanned_preceeds_hole : 1; - unsigned allocated : 1; + /** @color: Opaque driver-private tag. */ unsigned long color; + /** @start: Start address of the allocated block. */ u64 start; + /** @size: Size of the allocated block. */ u64 size; - u64 __subtree_last; + /* private: */ struct drm_mm *mm; + struct list_head node_list; + struct list_head hole_stack; + struct rb_node rb; + struct rb_node rb_hole_size; + struct rb_node rb_hole_addr; + u64 __subtree_last; + u64 hole_size; + bool allocated : 1; + bool scanned_block : 1; #ifdef CONFIG_DRM_DEBUG_MM depot_stack_handle_t stack; #endif }; +/** + * struct drm_mm - DRM allocator + * + * DRM range allocator with a few special functions and features geared towards + * managing GPU memory. Except for the @color_adjust callback the structure is + * entirely opaque and should only be accessed through the provided functions + * and macros. This structure can be embedded into larger driver structures. + */ struct drm_mm { + /** + * @color_adjust: + * + * Optional driver callback to further apply restrictions on a hole. The + * node argument points at the node containing the hole from which the + * block would be allocated (see drm_mm_hole_follows() and friends). The + * other arguments are the size of the block to be allocated. The driver + * can adjust the start and end as needed to e.g. insert guard pages. + */ + void (*color_adjust)(const struct drm_mm_node *node, + unsigned long color, + u64 *start, u64 *end); + + /* private: */ /* List of all memory nodes that immediately precede a free hole. */ struct list_head hole_stack; /* head_node.node_list is the list of all memory nodes, ordered @@ -90,33 +173,53 @@ struct drm_mm { struct drm_mm_node head_node; /* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */ struct rb_root interval_tree; + struct rb_root holes_size; + struct rb_root holes_addr; - unsigned int scan_check_range : 1; - unsigned scan_alignment; - unsigned long scan_color; - u64 scan_size; - u64 scan_hit_start; - u64 scan_hit_end; - unsigned scanned_blocks; - u64 scan_start; - u64 scan_end; - struct drm_mm_node *prev_scanned_node; - - void (*color_adjust)(struct drm_mm_node *node, unsigned long color, - u64 *start, u64 *end); + unsigned long scan_active; +}; + +/** + * struct drm_mm_scan - DRM allocator eviction roaster data + * + * This structure tracks data needed for the eviction roaster set up using + * drm_mm_scan_init(), and used with drm_mm_scan_add_block() and + * drm_mm_scan_remove_block(). The structure is entirely opaque and should only + * be accessed through the provided functions and macros. It is meant to be + * allocated temporarily by the driver on the stack. + */ +struct drm_mm_scan { + /* private: */ + struct drm_mm *mm; + + u64 size; + u64 alignment; + u64 remainder_mask; + + u64 range_start; + u64 range_end; + + u64 hit_start; + u64 hit_end; + + unsigned long color; + enum drm_mm_insert_mode mode; }; /** * drm_mm_node_allocated - checks whether a node is allocated * @node: drm_mm_node to check * - * Drivers should use this helpers for proper encapusulation of drm_mm + * Drivers are required to clear a node prior to using it with the + * drm_mm range manager. + * + * Drivers should use this helper for proper encapsulation of drm_mm * internals. * * Returns: * True if the @node is allocated. */ -static inline bool drm_mm_node_allocated(struct drm_mm_node *node) +static inline bool drm_mm_node_allocated(const struct drm_mm_node *node) { return node->allocated; } @@ -125,18 +228,38 @@ static inline bool drm_mm_node_allocated(struct drm_mm_node *node) * drm_mm_initialized - checks whether an allocator is initialized * @mm: drm_mm to check * - * Drivers should use this helpers for proper encapusulation of drm_mm + * Drivers should clear the struct drm_mm prior to initialisation if they + * want to use this function. + * + * Drivers should use this helper for proper encapsulation of drm_mm * internals. * * Returns: * True if the @mm is initialized. */ -static inline bool drm_mm_initialized(struct drm_mm *mm) +static inline bool drm_mm_initialized(const struct drm_mm *mm) { return mm->hole_stack.next; } -static inline u64 __drm_mm_hole_node_start(struct drm_mm_node *hole_node) +/** + * drm_mm_hole_follows - checks whether a hole follows this node + * @node: drm_mm_node to check + * + * Holes are embedded into the drm_mm using the tail of a drm_mm_node. + * If you wish to know whether a hole follows this particular node, + * query this function. See also drm_mm_hole_node_start() and + * drm_mm_hole_node_end(). + * + * Returns: + * True if a hole follows the @node. + */ +static inline bool drm_mm_hole_follows(const struct drm_mm_node *node) +{ + return node->hole_size; +} + +static inline u64 __drm_mm_hole_node_start(const struct drm_mm_node *hole_node) { return hole_node->start + hole_node->size; } @@ -145,20 +268,20 @@ static inline u64 __drm_mm_hole_node_start(struct drm_mm_node *hole_node) * drm_mm_hole_node_start - computes the start of the hole following @node * @hole_node: drm_mm_node which implicitly tracks the following hole * - * This is useful for driver-sepific debug dumpers. Otherwise drivers should not - * inspect holes themselves. Drivers must check first whether a hole indeed - * follows by looking at node->hole_follows. + * This is useful for driver-specific debug dumpers. Otherwise drivers should + * not inspect holes themselves. Drivers must check first whether a hole indeed + * follows by looking at drm_mm_hole_follows() * * Returns: * Start of the subsequent hole. */ -static inline u64 drm_mm_hole_node_start(struct drm_mm_node *hole_node) +static inline u64 drm_mm_hole_node_start(const struct drm_mm_node *hole_node) { - BUG_ON(!hole_node->hole_follows); + DRM_MM_BUG_ON(!drm_mm_hole_follows(hole_node)); return __drm_mm_hole_node_start(hole_node); } -static inline u64 __drm_mm_hole_node_end(struct drm_mm_node *hole_node) +static inline u64 __drm_mm_hole_node_end(const struct drm_mm_node *hole_node) { return list_next_entry(hole_node, node_list)->start; } @@ -167,148 +290,162 @@ static inline u64 __drm_mm_hole_node_end(struct drm_mm_node *hole_node) * drm_mm_hole_node_end - computes the end of the hole following @node * @hole_node: drm_mm_node which implicitly tracks the following hole * - * This is useful for driver-sepific debug dumpers. Otherwise drivers should not - * inspect holes themselves. Drivers must check first whether a hole indeed - * follows by looking at node->hole_follows. + * This is useful for driver-specific debug dumpers. Otherwise drivers should + * not inspect holes themselves. Drivers must check first whether a hole indeed + * follows by looking at drm_mm_hole_follows(). * * Returns: * End of the subsequent hole. */ -static inline u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node) +static inline u64 drm_mm_hole_node_end(const struct drm_mm_node *hole_node) { return __drm_mm_hole_node_end(hole_node); } /** + * drm_mm_nodes - list of nodes under the drm_mm range manager + * @mm: the struct drm_mm range manger + * + * As the drm_mm range manager hides its node_list deep with its + * structure, extracting it looks painful and repetitive. This is + * not expected to be used outside of the drm_mm_for_each_node() + * macros and similar internal functions. + * + * Returns: + * The node list, may be empty. + */ +#define drm_mm_nodes(mm) (&(mm)->head_node.node_list) + +/** * drm_mm_for_each_node - iterator to walk over all allocated nodes - * @entry: drm_mm_node structure to assign to in each iteration step - * @mm: drm_mm allocator to walk + * @entry: &struct drm_mm_node to assign to in each iteration step + * @mm: &drm_mm allocator to walk * * This iterator walks over all nodes in the range allocator. It is implemented - * with list_for_each, so not save against removal of elements. + * with list_for_each(), so not save against removal of elements. */ -#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \ - &(mm)->head_node.node_list, \ - node_list) - -#define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \ - for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \ - &entry->hole_stack != &(mm)->hole_stack ? \ - hole_start = drm_mm_hole_node_start(entry), \ - hole_end = drm_mm_hole_node_end(entry), \ - 1 : 0; \ - entry = list_entry((backwards) ? entry->hole_stack.prev : entry->hole_stack.next, struct drm_mm_node, hole_stack)) +#define drm_mm_for_each_node(entry, mm) \ + list_for_each_entry(entry, drm_mm_nodes(mm), node_list) + +/** + * drm_mm_for_each_node_safe - iterator to walk over all allocated nodes + * @entry: &struct drm_mm_node to assign to in each iteration step + * @next: &struct drm_mm_node to store the next step + * @mm: &drm_mm allocator to walk + * + * This iterator walks over all nodes in the range allocator. It is implemented + * with list_for_each_safe(), so save against removal of elements. + */ +#define drm_mm_for_each_node_safe(entry, next, mm) \ + list_for_each_entry_safe(entry, next, drm_mm_nodes(mm), node_list) /** * drm_mm_for_each_hole - iterator to walk over all holes - * @entry: drm_mm_node used internally to track progress - * @mm: drm_mm allocator to walk + * @pos: &drm_mm_node used internally to track progress + * @mm: &drm_mm allocator to walk * @hole_start: ulong variable to assign the hole start to on each iteration * @hole_end: ulong variable to assign the hole end to on each iteration * * This iterator walks over all holes in the range allocator. It is implemented - * with list_for_each, so not save against removal of elements. @entry is used + * with list_for_each(), so not save against removal of elements. @entry is used * internally and will not reflect a real drm_mm_node for the very first hole. * Hence users of this iterator may not access it. * * Implementation Note: * We need to inline list_for_each_entry in order to be able to set hole_start * and hole_end on each iteration while keeping the macro sane. - * - * The __drm_mm_for_each_hole version is similar, but with added support for - * going backwards. */ -#define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \ - __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, 0) +#define drm_mm_for_each_hole(pos, mm, hole_start, hole_end) \ + for (pos = list_first_entry(&(mm)->hole_stack, \ + typeof(*pos), hole_stack); \ + &pos->hole_stack != &(mm)->hole_stack ? \ + hole_start = drm_mm_hole_node_start(pos), \ + hole_end = hole_start + pos->hole_size, \ + 1 : 0; \ + pos = list_next_entry(pos, hole_stack)) /* * Basic range manager support (drm_mm.c) */ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node); +int drm_mm_insert_node_in_range(struct drm_mm *mm, + struct drm_mm_node *node, + u64 size, + u64 alignment, + unsigned long color, + u64 start, + u64 end, + enum drm_mm_insert_mode mode); -int drm_mm_insert_node_generic(struct drm_mm *mm, - struct drm_mm_node *node, - u64 size, - unsigned alignment, - unsigned long color, - enum drm_mm_search_flags sflags, - enum drm_mm_allocator_flags aflags); /** - * drm_mm_insert_node - search for space and insert @node + * drm_mm_insert_node_generic - search for space and insert @node * @mm: drm_mm to allocate from * @node: preallocate node to insert * @size: size of the allocation * @alignment: alignment of the allocation - * @flags: flags to fine-tune the allocation + * @color: opaque tag value to use for this node + * @mode: fine-tune the allocation search and placement * - * This is a simplified version of drm_mm_insert_node_generic() with @color set - * to 0. + * This is a simplified version of drm_mm_insert_node_in_range_generic() with no + * range restrictions applied. * * The preallocated node must be cleared to 0. * * Returns: * 0 on success, -ENOSPC if there's no suitable hole. */ -static inline int drm_mm_insert_node(struct drm_mm *mm, - struct drm_mm_node *node, - u64 size, - unsigned alignment, - enum drm_mm_search_flags flags) +static inline int +drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, + u64 size, u64 alignment, + unsigned long color, + enum drm_mm_insert_mode mode) { - return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags, - DRM_MM_CREATE_DEFAULT); + return drm_mm_insert_node_in_range(mm, node, + size, alignment, color, + 0, U64_MAX, mode); } -int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, - struct drm_mm_node *node, - u64 size, - unsigned alignment, - unsigned long color, - u64 start, - u64 end, - enum drm_mm_search_flags sflags, - enum drm_mm_allocator_flags aflags); /** - * drm_mm_insert_node_in_range - ranged search for space and insert @node + * drm_mm_insert_node - search for space and insert @node * @mm: drm_mm to allocate from * @node: preallocate node to insert * @size: size of the allocation - * @alignment: alignment of the allocation - * @start: start of the allowed range for this node - * @end: end of the allowed range for this node - * @flags: flags to fine-tune the allocation * - * This is a simplified version of drm_mm_insert_node_in_range_generic() with - * @color set to 0. + * This is a simplified version of drm_mm_insert_node_generic() with @color set + * to 0. * * The preallocated node must be cleared to 0. * * Returns: * 0 on success, -ENOSPC if there's no suitable hole. */ -static inline int drm_mm_insert_node_in_range(struct drm_mm *mm, - struct drm_mm_node *node, - u64 size, - unsigned alignment, - u64 start, - u64 end, - enum drm_mm_search_flags flags) +static inline int drm_mm_insert_node(struct drm_mm *mm, + struct drm_mm_node *node, + u64 size) { - return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, - 0, start, end, flags, - DRM_MM_CREATE_DEFAULT); + return drm_mm_insert_node_generic(mm, node, size, 0, 0, 0); } void drm_mm_remove_node(struct drm_mm_node *node); void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new); -void drm_mm_init(struct drm_mm *mm, - u64 start, - u64 size); +void drm_mm_init(struct drm_mm *mm, u64 start, u64 size); void drm_mm_takedown(struct drm_mm *mm); -bool drm_mm_clean(struct drm_mm *mm); + +/** + * drm_mm_clean - checks whether an allocator is clean + * @mm: drm_mm allocator to check + * + * Returns: + * True if the allocator is completely free, false if there's still a node + * allocated in it. + */ +static inline bool drm_mm_clean(const struct drm_mm *mm) +{ + return list_empty(drm_mm_nodes(mm)); +} struct drm_mm_node * -__drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last); +__drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last); /** * drm_mm_for_each_node_in_range - iterator to walk over a range of @@ -329,22 +466,49 @@ __drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last); node__ && node__->start < (end__); \ node__ = list_next_entry(node__, node_list)) -void drm_mm_init_scan(struct drm_mm *mm, - u64 size, - unsigned alignment, - unsigned long color); -void drm_mm_init_scan_with_range(struct drm_mm *mm, - u64 size, - unsigned alignment, - unsigned long color, - u64 start, - u64 end); -bool drm_mm_scan_add_block(struct drm_mm_node *node); -bool drm_mm_scan_remove_block(struct drm_mm_node *node); - -void drm_mm_debug_table(struct drm_mm *mm, const char *prefix); -#ifdef CONFIG_DEBUG_FS -int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm); -#endif +void drm_mm_scan_init_with_range(struct drm_mm_scan *scan, + struct drm_mm *mm, + u64 size, u64 alignment, unsigned long color, + u64 start, u64 end, + enum drm_mm_insert_mode mode); + +/** + * drm_mm_scan_init - initialize lru scanning + * @scan: scan state + * @mm: drm_mm to scan + * @size: size of the allocation + * @alignment: alignment of the allocation + * @color: opaque tag value to use for the allocation + * @mode: fine-tune the allocation search and placement + * + * This is a simplified version of drm_mm_scan_init_with_range() with no range + * restrictions applied. + * + * This simply sets up the scanning routines with the parameters for the desired + * hole. + * + * Warning: + * As long as the scan list is non-empty, no other operations than + * adding/removing nodes to/from the scan list are allowed. + */ +static inline void drm_mm_scan_init(struct drm_mm_scan *scan, + struct drm_mm *mm, + u64 size, + u64 alignment, + unsigned long color, + enum drm_mm_insert_mode mode) +{ + drm_mm_scan_init_with_range(scan, mm, + size, alignment, color, + 0, U64_MAX, mode); +} + +bool drm_mm_scan_add_block(struct drm_mm_scan *scan, + struct drm_mm_node *node); +bool drm_mm_scan_remove_block(struct drm_mm_scan *scan, + struct drm_mm_node *node); +struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan); + +void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p); #endif diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h index 137432386310..26ff46ab26fb 100644 --- a/include/drm/drm_mode_config.h +++ b/include/drm/drm_mode_config.h @@ -47,7 +47,7 @@ struct drm_mode_config_funcs { * * Create a new framebuffer object. The core does basic checks on the * requested metadata, but most of that is left to the driver. See - * struct &drm_mode_fb_cmd2 for details. + * &struct drm_mode_fb_cmd2 for details. * * If the parameters are deemed valid and the backing storage objects in * the underlying memory manager all exist, then the driver allocates @@ -132,10 +132,10 @@ struct drm_mode_config_funcs { * that before calling this hook. * * See the documentation of @atomic_commit for an exhaustive list of - * error conditions which don't have to be checked at the - * ->atomic_check() stage? + * error conditions which don't have to be checked at the in this + * callback. * - * See the documentation for struct &drm_atomic_state for how exactly + * See the documentation for &struct drm_atomic_state for how exactly * an atomic modeset update is described. * * Drivers using the atomic helpers can implement this hook using @@ -171,7 +171,7 @@ struct drm_mode_config_funcs { * calling this function, and that nothing has been changed in the * interim. * - * See the documentation for struct &drm_atomic_state for how exactly + * See the documentation for &struct drm_atomic_state for how exactly * an atomic modeset update is described. * * Drivers using the atomic helpers can implement this hook using @@ -198,10 +198,10 @@ struct drm_mode_config_funcs { * completed. These events are per-CRTC and can be distinguished by the * CRTC index supplied in &drm_event to userspace. * - * The drm core will supply a struct &drm_event in the event - * member of each CRTC's &drm_crtc_state structure. See the - * documentation for &drm_crtc_state for more details about the precise - * semantics of this event. + * The drm core will supply a &struct drm_event in each CRTC's + * &drm_crtc_state.event. See the documentation for + * &drm_crtc_state.event for more details about the precise semantics of + * this event. * * NOTE: * @@ -365,7 +365,13 @@ struct drm_mode_config { struct list_head fb_list; /** - * @num_connector: Number of connectors on this device. + * @connector_list_lock: Protects @num_connector and + * @connector_list. + */ + spinlock_t connector_list_lock; + /** + * @num_connector: Number of connectors on this device. Protected by + * @connector_list_lock. */ int num_connector; /** @@ -373,7 +379,9 @@ struct drm_mode_config { */ struct ida connector_ida; /** - * @connector_list: List of connector objects. + * @connector_list: List of connector objects. Protected by + * @connector_list_lock. Only use drm_for_each_connector_iter() and + * &struct drm_connector_list_iter to walk this list. */ struct list_head connector_list; int num_encoder; diff --git a/include/drm/drm_mode_object.h b/include/drm/drm_mode_object.h index 43460b21d112..2c017adf6d74 100644 --- a/include/drm/drm_mode_object.h +++ b/include/drm/drm_mode_object.h @@ -86,10 +86,15 @@ struct drm_object_properties { * * Note that atomic drivers do not store mutable properties in this * array, but only the decoded values in the corresponding state - * structure. The decoding is done using the ->atomic_get_property and - * ->atomic_set_property hooks of the corresponding object. Hence atomic - * drivers should not use drm_object_property_set_value() and - * drm_object_property_get_value() on mutable objects, i.e. those + * structure. The decoding is done using the &drm_crtc.atomic_get_property and + * &drm_crtc.atomic_set_property hooks for &struct drm_crtc. For + * &struct drm_plane the hooks are &drm_plane_funcs.atomic_get_property and + * &drm_plane_funcs.atomic_set_property. And for &struct drm_connector + * the hooks are &drm_connector_funcs.atomic_get_property and + * &drm_connector_funcs.atomic_set_property . + * + * Hence atomic drivers should not use drm_object_property_set_value() + * and drm_object_property_get_value() on mutable objects, i.e. those * without the DRM_MODE_PROP_IMMUTABLE flag set. */ uint64_t values[DRM_OBJECT_MAX_PROPERTY]; diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h index 9934d91619c1..6dd34280e892 100644 --- a/include/drm/drm_modes.h +++ b/include/drm/drm_modes.h @@ -459,6 +459,8 @@ int of_get_drm_display_mode(struct device_node *np, void drm_mode_set_name(struct drm_display_mode *mode); int drm_mode_hsync(const struct drm_display_mode *mode); int drm_mode_vrefresh(const struct drm_display_mode *mode); +void drm_mode_get_hv_timing(const struct drm_display_mode *mode, + int *hdisplay, int *vdisplay); void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags); diff --git a/include/drm/drm_modeset_helper.h b/include/drm/drm_modeset_helper.h index b8051d5abe10..cb0ec92e11e6 100644 --- a/include/drm/drm_modeset_helper.h +++ b/include/drm/drm_modeset_helper.h @@ -27,7 +27,8 @@ void drm_helper_move_panel_connectors_to_head(struct drm_device *); -void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, +void drm_helper_mode_fill_fb_struct(struct drm_device *dev, + struct drm_framebuffer *fb, const struct drm_mode_fb_cmd2 *mode_cmd); int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h index 69c3974bf133..091c42205667 100644 --- a/include/drm/drm_modeset_helper_vtables.h +++ b/include/drm/drm_modeset_helper_vtables.h @@ -30,6 +30,7 @@ #define __DRM_MODESET_HELPER_VTABLES_H__ #include <drm/drm_crtc.h> +#include <drm/drm_encoder.h> /** * DOC: overview @@ -110,9 +111,9 @@ struct drm_crtc_helper_funcs { * This callback is used to validate a mode. The parameter mode is the * display mode that userspace requested, adjusted_mode is the mode the * encoders need to be fed with. Note that this is the inverse semantics - * of the meaning for the &drm_encoder and &drm_bridge - * ->mode_fixup() functions. If the CRTC cannot support the requested - * conversion from mode to adjusted_mode it should reject the modeset. + * of the meaning for the &drm_encoder and &drm_bridge_funcs.mode_fixup + * vfunc. If the CRTC cannot support the requested conversion from mode + * to adjusted_mode it should reject the modeset. * * This function is used by both legacy CRTC helpers and atomic helpers. * With atomic helpers it is optional. @@ -133,17 +134,18 @@ struct drm_crtc_helper_funcs { * * Also beware that neither core nor helpers filter modes before * passing them to the driver: While the list of modes that is - * advertised to userspace is filtered using the connector's - * ->mode_valid() callback, neither the core nor the helpers do any - * filtering on modes passed in from userspace when setting a mode. It - * is therefore possible for userspace to pass in a mode that was - * previously filtered out using ->mode_valid() or add a custom mode - * that wasn't probed from EDID or similar to begin with. Even though - * this is an advanced feature and rarely used nowadays, some users rely - * on being able to specify modes manually so drivers must be prepared - * to deal with it. Specifically this means that all drivers need not - * only validate modes in ->mode_valid() but also in ->mode_fixup() to - * make sure invalid modes passed in from userspace are rejected. + * advertised to userspace is filtered using the + * &drm_connector.mode_valid callback, neither the core nor the helpers + * do any filtering on modes passed in from userspace when setting a + * mode. It is therefore possible for userspace to pass in a mode that + * was previously filtered out using &drm_connector.mode_valid or add a + * custom mode that wasn't probed from EDID or similar to begin with. + * Even though this is an advanced feature and rarely used nowadays, + * some users rely on being able to specify modes manually so drivers + * must be prepared to deal with it. Specifically this means that all + * drivers need not only validate modes in &drm_connector.mode_valid but + * also in this or in the &drm_encoder_helper_funcs.mode_fixup callback + * to make sure invalid modes passed in from userspace are rejected. * * RETURNS: * @@ -204,7 +206,7 @@ struct drm_crtc_helper_funcs { * optimized fast-path instead of a full mode set operation with all the * resulting flickering. If it is not present * drm_crtc_helper_set_config() will fall back to a full modeset, using - * the ->mode_set() callback. Since it can't update other planes it's + * the @mode_set callback. Since it can't update other planes it's * incompatible with atomic modeset support. * * This callback is only used by the CRTC helpers and deprecated. @@ -237,8 +239,7 @@ struct drm_crtc_helper_funcs { /** * @load_lut: * - * Load a LUT prepared with the @gamma_set functions from - * &drm_fb_helper_funcs. + * Load a LUT prepared with the &drm_fb_helper_funcs.gamma_set vfunc. * * This callback is optional and is only used by the fbdev emulation * helpers. @@ -256,10 +257,11 @@ struct drm_crtc_helper_funcs { * * This callback should be used to disable the CRTC. With the atomic * drivers it is called after all encoders connected to this CRTC have - * been shut off already using their own ->disable hook. If that - * sequence is too simple drivers can just add their own hooks and call - * it from this CRTC callback here by looping over all encoders - * connected to it using for_each_encoder_on_crtc(). + * been shut off already using their own + * &drm_encoder_helper_funcs.disable hook. If that sequence is too + * simple drivers can just add their own hooks and call it from this + * CRTC callback here by looping over all encoders connected to it using + * for_each_encoder_on_crtc(). * * This hook is used both by legacy CRTC helpers and atomic helpers. * Atomic drivers don't need to implement it if there's no need to @@ -288,10 +290,10 @@ struct drm_crtc_helper_funcs { * * This callback should be used to enable the CRTC. With the atomic * drivers it is called before all encoders connected to this CRTC are - * enabled through the encoder's own ->enable hook. If that sequence is - * too simple drivers can just add their own hooks and call it from this - * CRTC callback here by looping over all encoders connected to it using - * for_each_encoder_on_crtc(). + * enabled through the encoder's own &drm_encoder_helper_funcs.enable + * hook. If that sequence is too simple drivers can just add their own + * hooks and call it from this CRTC callback here by looping over all + * encoders connected to it using for_each_encoder_on_crtc(). * * This hook is used only by atomic helpers, for symmetry with @disable. * Atomic drivers don't need to implement it if there's no need to @@ -315,16 +317,16 @@ struct drm_crtc_helper_funcs { * beforehand. This is calling order used by the default helper * implementation in drm_atomic_helper_check(). * - * When using drm_atomic_helper_check_planes() CRTCs' ->atomic_check() - * hooks are called after the ones for planes, which allows drivers to - * assign shared resources requested by planes in the CRTC callback - * here. For more complicated dependencies the driver can call the provided - * check helpers multiple times until the computed state has a final - * configuration and everything has been checked. + * When using drm_atomic_helper_check_planes() this hook is called + * after the &drm_plane_helper_funcs.atomc_check hook for planes, which + * allows drivers to assign shared resources requested by planes in this + * callback here. For more complicated dependencies the driver can call + * the provided check helpers multiple times until the computed state + * has a final configuration and everything has been checked. * * This function is also allowed to inspect any other object's state and * can add more state objects to the atomic commit if needed. Care must - * be taken though to ensure that state check&compute functions for + * be taken though to ensure that state check and compute functions for * these added states are all called, and derived state in other objects * all updated. Again the recommendation is to just call check helpers * until a maximal configuration is reached. @@ -399,10 +401,11 @@ struct drm_crtc_helper_funcs { * * This callback should be used to disable the CRTC. With the atomic * drivers it is called after all encoders connected to this CRTC have - * been shut off already using their own ->disable hook. If that - * sequence is too simple drivers can just add their own hooks and call - * it from this CRTC callback here by looping over all encoders - * connected to it using for_each_encoder_on_crtc(). + * been shut off already using their own + * &drm_encoder_helper_funcs.disable hook. If that sequence is too + * simple drivers can just add their own hooks and call it from this + * CRTC callback here by looping over all encoders connected to it using + * for_each_encoder_on_crtc(). * * This hook is used only by atomic helpers. Atomic drivers don't * need to implement it if there's no need to disable anything at the @@ -482,16 +485,18 @@ struct drm_encoder_helper_funcs { * Also beware that neither core nor helpers filter modes before * passing them to the driver: While the list of modes that is * advertised to userspace is filtered using the connector's - * ->mode_valid() callback, neither the core nor the helpers do any - * filtering on modes passed in from userspace when setting a mode. It - * is therefore possible for userspace to pass in a mode that was - * previously filtered out using ->mode_valid() or add a custom mode - * that wasn't probed from EDID or similar to begin with. Even though - * this is an advanced feature and rarely used nowadays, some users rely - * on being able to specify modes manually so drivers must be prepared - * to deal with it. Specifically this means that all drivers need not - * only validate modes in ->mode_valid() but also in ->mode_fixup() to - * make sure invalid modes passed in from userspace are rejected. + * &drm_connector_helper_funcs.mode_valid callback, neither the core nor + * the helpers do any filtering on modes passed in from userspace when + * setting a mode. It is therefore possible for userspace to pass in a + * mode that was previously filtered out using + * &drm_connector_helper_funcs.mode_valid or add a custom mode that + * wasn't probed from EDID or similar to begin with. Even though this + * is an advanced feature and rarely used nowadays, some users rely on + * being able to specify modes manually so drivers must be prepared to + * deal with it. Specifically this means that all drivers need not only + * validate modes in &drm_connector.mode_valid but also in this or in + * the &drm_crtc_helper_funcs.mode_fixup callback to make sure + * invalid modes passed in from userspace are rejected. * * RETURNS: * @@ -543,7 +548,7 @@ struct drm_encoder_helper_funcs { * use this hook, because the helper library calls it only once and not * every time the display pipeline is suspend using either DPMS or the * new "ACTIVE" property. Such drivers should instead move all their - * encoder setup into the ->enable() callback. + * encoder setup into the @enable callback. * * This callback is used both by the legacy CRTC helpers and the atomic * modeset helpers. It is optional in the atomic helpers. @@ -569,7 +574,7 @@ struct drm_encoder_helper_funcs { * use this hook, because the helper library calls it only once and not * every time the display pipeline is suspended using either DPMS or the * new "ACTIVE" property. Such drivers should instead move all their - * encoder setup into the ->enable() callback. + * encoder setup into the @enable callback. * * This callback is used by the atomic modeset helpers in place of the * @mode_set callback, if set by the driver. It is optional and should @@ -620,10 +625,10 @@ struct drm_encoder_helper_funcs { * * This callback should be used to disable the encoder. With the atomic * drivers it is called before this encoder's CRTC has been shut off - * using the CRTC's own ->disable hook. If that sequence is too simple - * drivers can just add their own driver private encoder hooks and call - * them from CRTC's callback by looping over all encoders connected to - * it using for_each_encoder_on_crtc(). + * using their own &drm_crtc_helper_funcs.disable hook. If that + * sequence is too simple drivers can just add their own driver private + * encoder hooks and call them from CRTC's callback by looping over all + * encoders connected to it using for_each_encoder_on_crtc(). * * This hook is used both by legacy CRTC helpers and atomic helpers. * Atomic drivers don't need to implement it if there's no need to @@ -650,10 +655,10 @@ struct drm_encoder_helper_funcs { * * This callback should be used to enable the encoder. With the atomic * drivers it is called after this encoder's CRTC has been enabled using - * the CRTC's own ->enable hook. If that sequence is too simple drivers - * can just add their own driver private encoder hooks and call them - * from CRTC's callback by looping over all encoders connected to it - * using for_each_encoder_on_crtc(). + * their own &drm_crtc_helper_funcs.enable hook. If that sequence is + * too simple drivers can just add their own driver private encoder + * hooks and call them from CRTC's callback by looping over all encoders + * connected to it using for_each_encoder_on_crtc(). * * This hook is used only by atomic helpers, for symmetry with @disable. * Atomic drivers don't need to implement it if there's no need to @@ -715,7 +720,7 @@ struct drm_connector_helper_funcs { * @get_modes: * * This function should fill in all modes currently valid for the sink - * into the connector->probed_modes list. It should also update the + * into the &drm_connector.probed_modes list. It should also update the * EDID property by calling drm_mode_connector_update_edid_property(). * * The usual way to implement this is to cache the EDID retrieved in the @@ -724,8 +729,9 @@ struct drm_connector_helper_funcs { * them by calling drm_add_edid_modes(). But connectors that driver a * fixed panel can also manually add specific modes using * drm_mode_probed_add(). Drivers which manually add modes should also - * make sure that the @display_info, @width_mm and @height_mm fields of the - * struct &drm_connector are filled in. + * make sure that the &drm_connector.display_info, + * &drm_connector.width_mm and &drm_connector.height_mm fields are + * filled in. * * Virtual drivers that just want some standard VESA mode with a given * resolution can call drm_add_modes_noedid(), and mark the preferred @@ -734,7 +740,7 @@ struct drm_connector_helper_funcs { * Finally drivers that support audio probably want to update the ELD * data, too, using drm_edid_to_eld(). * - * This function is only called after the ->detect() hook has indicated + * This function is only called after the @detect hook has indicated * that a sink is connected and when the EDID isn't overridden through * sysfs or the kernel commandline. * @@ -767,8 +773,8 @@ struct drm_connector_helper_funcs { * * RETURNS: * - * Either MODE_OK or one of the failure reasons in enum - * &drm_mode_status. + * Either &drm_mode_status.MODE_OK or one of the failure reasons in &enum + * drm_mode_status. */ enum drm_mode_status (*mode_valid)(struct drm_connector *connector, struct drm_display_mode *mode); @@ -874,7 +880,7 @@ struct drm_plane_helper_funcs { * RETURNS: * * 0 on success or one of the following negative error codes allowed by - * the atomic_commit hook in &drm_mode_config_funcs. When using helpers + * the &drm_mode_config_funcs.atomic_commit vfunc. When using helpers * this callback is the only one which can fail an atomic commit, * everything else must complete successfully. */ @@ -897,7 +903,7 @@ struct drm_plane_helper_funcs { * * Drivers should check plane specific constraints in this hook. * - * When using drm_atomic_helper_check_planes() plane's ->atomic_check() + * When using drm_atomic_helper_check_planes() plane's @atomic_check * hooks are called before the ones for CRTCs, which allows drivers to * request shared resources that the CRTC controls here. For more * complicated dependencies the driver can call the provided check helpers @@ -906,7 +912,7 @@ struct drm_plane_helper_funcs { * * This function is also allowed to inspect any other object's state and * can add more state objects to the atomic commit if needed. Care must - * be taken though to ensure that state check&compute functions for + * be taken though to ensure that state check and compute functions for * these added states are all called, and derived state in other objects * all updated. Again the recommendation is to just call check helpers * until a maximal configuration is reached. @@ -935,8 +941,8 @@ struct drm_plane_helper_funcs { * @atomic_update: * * Drivers should use this function to update the plane state. This - * hook is called in-between the ->atomic_begin() and - * ->atomic_flush() of &drm_crtc_helper_funcs. + * hook is called in-between the &drm_crtc_helper_funcs.atomic_begin and + * drm_crtc_helper_funcs.atomic_flush callbacks. * * Note that the power state of the display pipe when this function is * called depends upon the exact helpers and calling sequence the driver @@ -952,14 +958,15 @@ struct drm_plane_helper_funcs { * @atomic_disable: * * Drivers should use this function to unconditionally disable a plane. - * This hook is called in-between the ->atomic_begin() and - * ->atomic_flush() of &drm_crtc_helper_funcs. It is an alternative to + * This hook is called in-between the + * &drm_crtc_helper_funcs.atomic_begin and + * drm_crtc_helper_funcs.atomic_flush callbacks. It is an alternative to * @atomic_update, which will be called for disabling planes, too, if * the @atomic_disable hook isn't implemented. * * This hook is also useful to disable planes in preparation of a modeset, * by calling drm_atomic_helper_disable_planes_on_crtc() from the - * ->disable() hook in &drm_crtc_helper_funcs. + * &drm_crtc_helper_funcs.disable hook. * * Note that the power state of the display pipe when this function is * called depends upon the exact helpers and calling sequence the driver diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h index d918ce45ec2c..96d39fbd12ca 100644 --- a/include/drm/drm_modeset_lock.h +++ b/include/drm/drm_modeset_lock.h @@ -64,7 +64,7 @@ struct drm_modeset_acquire_ctx { /** * struct drm_modeset_lock - used for locking modeset resources. * @mutex: resource locking - * @head: used to hold it's place on state->locked list when + * @head: used to hold it's place on &drm_atomi_state.locked list when * part of an atomic update * * Used for locking CRTCs and other modeset resources. diff --git a/include/drm/drm_os_linux.h b/include/drm/drm_os_linux.h index 86ab99bc0ac5..35e1482ba8a1 100644 --- a/include/drm/drm_os_linux.h +++ b/include/drm/drm_os_linux.h @@ -4,6 +4,7 @@ */ #include <linux/interrupt.h> /* For task queue support */ +#include <linux/sched/signal.h> #include <linux/delay.h> #ifndef readq diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h index 220d1e2b3db1..4b76cf2d5a7b 100644 --- a/include/drm/drm_panel.h +++ b/include/drm/drm_panel.h @@ -193,9 +193,9 @@ int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector); int drm_panel_detach(struct drm_panel *panel); #ifdef CONFIG_OF -struct drm_panel *of_drm_find_panel(struct device_node *np); +struct drm_panel *of_drm_find_panel(const struct device_node *np); #else -static inline struct drm_panel *of_drm_find_panel(struct device_node *np) +static inline struct drm_panel *of_drm_find_panel(const struct device_node *np) { return NULL; } diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h index db3bbdeb36d5..20867b4371ab 100644 --- a/include/drm/drm_plane.h +++ b/include/drm/drm_plane.h @@ -247,19 +247,19 @@ struct drm_plane_funcs { * @atomic_duplicate_state: * * Duplicate the current atomic state for this plane and return it. - * The core and helpers gurantee that any atomic state duplicated with + * The core and helpers guarantee that any atomic state duplicated with * this hook and still owned by the caller (i.e. not transferred to the - * driver by calling ->atomic_commit() from struct - * &drm_mode_config_funcs) will be cleaned up by calling the - * @atomic_destroy_state hook in this structure. + * driver by calling &drm_mode_config_funcs.atomic_commit) will be + * cleaned up by calling the @atomic_destroy_state hook in this + * structure. * - * Atomic drivers which don't subclass struct &drm_plane_state should use + * Atomic drivers which don't subclass &struct drm_plane_state should use * drm_atomic_helper_plane_duplicate_state(). Drivers that subclass the * state structure to extend it with driver-private state should use * __drm_atomic_helper_plane_duplicate_state() to make sure shared state is * duplicated in a consistent fashion across drivers. * - * It is an error to call this hook before plane->state has been + * It is an error to call this hook before &drm_plane.state has been * initialized correctly. * * NOTE: @@ -372,7 +372,7 @@ struct drm_plane_funcs { * * This optional hook should be used to unregister the additional * userspace interfaces attached to the plane from - * late_unregister(). It is called from drm_dev_unregister(), + * @late_register. It is called from drm_dev_unregister(), * early in the driver unload sequence to disable userspace access * before data structures are torndown. */ @@ -381,7 +381,7 @@ struct drm_plane_funcs { /** * @atomic_print_state: * - * If driver subclasses struct &drm_plane_state, it should implement + * If driver subclasses &struct drm_plane_state, it should implement * this optional hook for printing additional driver specific state. * * Do not call this directly, use drm_atomic_plane_print_state() @@ -423,8 +423,8 @@ enum drm_plane_type { * * Primary planes represent a "main" plane for a CRTC. Primary planes * are the planes operated upon by CRTC modesetting and flipping - * operations described in the page_flip and set_config hooks in struct - * &drm_crtc_funcs. + * operations described in the &drm_crtc_funcs.page_flip and + * &drm_crtc_funcs.set_config hooks. */ DRM_PLANE_TYPE_PRIMARY, @@ -470,9 +470,9 @@ struct drm_plane { /** * @mutex: * - * Protects modeset plane state, together with the mutex of &drm_crtc - * this plane is linked to (when active, getting actived or getting - * disabled). + * Protects modeset plane state, together with the &drm_crtc.mutex of + * CRTC this plane is linked to (when active, getting activated or + * getting disabled). */ struct drm_modeset_lock mutex; @@ -580,7 +580,7 @@ static inline struct drm_plane *drm_plane_find(struct drm_device *dev, * * Iterate over all legacy planes of @dev, excluding primary and cursor planes. * This is useful for implementing userspace apis when userspace is not - * universal plane aware. See also enum &drm_plane_type. + * universal plane aware. See also &enum drm_plane_type. */ #define drm_for_each_legacy_plane(plane, dev) \ list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) \ diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h index 1adf84aea622..7d98763c0444 100644 --- a/include/drm/drm_print.h +++ b/include/drm/drm_print.h @@ -60,26 +60,27 @@ /** * struct drm_printer - drm output "stream" - * @printfn: actual output fxn - * @arg: output fxn specific data * * Do not use struct members directly. Use drm_printer_seq_file(), * drm_printer_info(), etc to initialize. And drm_printf() for output. */ struct drm_printer { + /* private: */ void (*printfn)(struct drm_printer *p, struct va_format *vaf); void *arg; + const char *prefix; }; void __drm_printfn_seq_file(struct drm_printer *p, struct va_format *vaf); void __drm_printfn_info(struct drm_printer *p, struct va_format *vaf); +void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf); void drm_printf(struct drm_printer *p, const char *f, ...); /** * drm_seq_file_printer - construct a &drm_printer that outputs to &seq_file - * @f: the struct &seq_file to output to + * @f: the &struct seq_file to output to * * RETURNS: * The &drm_printer object @@ -95,7 +96,7 @@ static inline struct drm_printer drm_seq_file_printer(struct seq_file *f) /** * drm_info_printer - construct a &drm_printer that outputs to dev_printk() - * @dev: the struct &device pointer + * @dev: the &struct device pointer * * RETURNS: * The &drm_printer object @@ -109,4 +110,19 @@ static inline struct drm_printer drm_info_printer(struct device *dev) return p; } +/** + * drm_debug_printer - construct a &drm_printer that outputs to pr_debug() + * @prefix: debug output prefix + * + * RETURNS: + * The &drm_printer object + */ +static inline struct drm_printer drm_debug_printer(const char *prefix) +{ + struct drm_printer p = { + .printfn = __drm_printfn_debug, + .prefix = prefix + }; + return p; +} #endif /* DRM_PRINT_H_ */ diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h index 43c4b6a2046d..f66fdb47551c 100644 --- a/include/drm/drm_property.h +++ b/include/drm/drm_property.h @@ -30,7 +30,7 @@ /** * struct drm_property_enum - symbolic values for enumerations * @value: numeric property value for this enum entry - * @head: list of enum values, linked to enum_list in &drm_property + * @head: list of enum values, linked to &drm_property.enum_list * @name: symbolic name for the enum * * For enumeration and bitmask properties this structure stores the symbolic @@ -191,9 +191,9 @@ struct drm_property { * struct drm_property_blob - Blob data for &drm_property * @base: base KMS object * @dev: DRM device - * @head_global: entry on the global blob list in &drm_mode_config - * property_blob_list. - * @head_file: entry on the per-file blob list in &drm_file blobs list. + * @head_global: entry on the global blob list in + * &drm_mode_config.property_blob_list. + * @head_file: entry on the per-file blob list in &drm_file.blobs list. * @length: size of the blob in bytes, invariant over the lifetime of the object * @data: actual data, embedded at the end of this structure * diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h index 01a8436ccb0a..fffbb95a0915 100644 --- a/include/drm/drm_simple_kms_helper.h +++ b/include/drm/drm_simple_kms_helper.h @@ -10,6 +10,10 @@ #ifndef __LINUX_DRM_SIMPLE_KMS_HELPER_H #define __LINUX_DRM_SIMPLE_KMS_HELPER_H +#include <drm/drm_crtc.h> +#include <drm/drm_encoder.h> +#include <drm/drm_plane.h> + struct drm_simple_display_pipe; /** @@ -73,9 +77,9 @@ struct drm_simple_display_pipe_funcs { /** * @prepare_fb: * - * Optional, called by struct &drm_plane_helper_funcs ->prepare_fb . - * Please read the documentation for the ->prepare_fb hook in - * struct &drm_plane_helper_funcs for more details. + * Optional, called by &drm_plane_helper_funcs.prepare_fb. Please read + * the documentation for the &drm_plane_helper_funcs.prepare_fb hook for + * more details. */ int (*prepare_fb)(struct drm_simple_display_pipe *pipe, struct drm_plane_state *plane_state); @@ -83,9 +87,9 @@ struct drm_simple_display_pipe_funcs { /** * @cleanup_fb: * - * Optional, called by struct &drm_plane_helper_funcs ->cleanup_fb . - * Please read the documentation for the ->cleanup_fb hook in - * struct &drm_plane_helper_funcs for more details. + * Optional, called by &drm_plane_helper_funcs.cleanup_fb. Please read + * the documentation for the &drm_plane_helper_funcs.cleanup_fb hook for + * more details. */ void (*cleanup_fb)(struct drm_simple_display_pipe *pipe, struct drm_plane_state *plane_state); @@ -114,8 +118,6 @@ struct drm_simple_display_pipe { int drm_simple_display_pipe_attach_bridge(struct drm_simple_display_pipe *pipe, struct drm_bridge *bridge); -void drm_simple_display_pipe_detach_bridge(struct drm_simple_display_pipe *pipe); - int drm_simple_display_pipe_init(struct drm_device *dev, struct drm_simple_display_pipe *pipe, const struct drm_simple_display_pipe_funcs *funcs, diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index 0d5f4268d75f..a1dd21d6b723 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -226,23 +226,18 @@ INTEL_VGA_DEVICE(0x162A, info), /* Server */ \ INTEL_VGA_DEVICE(0x162D, info) /* Workstation */ -#define INTEL_BDW_RSVDM_IDS(info) \ +#define INTEL_BDW_RSVD_IDS(info) \ INTEL_VGA_DEVICE(0x1632, info), /* ULT */ \ INTEL_VGA_DEVICE(0x1636, info), /* ULT */ \ INTEL_VGA_DEVICE(0x163B, info), /* Iris */ \ - INTEL_VGA_DEVICE(0x163E, info) /* ULX */ - -#define INTEL_BDW_RSVDD_IDS(info) \ + INTEL_VGA_DEVICE(0x163E, info), /* ULX */ \ INTEL_VGA_DEVICE(0x163A, info), /* Server */ \ INTEL_VGA_DEVICE(0x163D, info) /* Workstation */ #define INTEL_BDW_IDS(info) \ INTEL_BDW_GT12_IDS(info), \ INTEL_BDW_GT3_IDS(info), \ - INTEL_BDW_RSVDM_IDS(info), \ - INTEL_BDW_GT12_IDS(info), \ - INTEL_BDW_GT3_IDS(info), \ - INTEL_BDW_RSVDD_IDS(info) + INTEL_BDW_RSVD_IDS(info) #define INTEL_CHV_IDS(info) \ INTEL_VGA_DEVICE(0x22b0, info), \ @@ -270,14 +265,14 @@ INTEL_VGA_DEVICE(0x1923, info), /* ULT GT3 */ \ INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \ INTEL_VGA_DEVICE(0x1927, info), /* ULT GT3 */ \ - INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \ - INTEL_VGA_DEVICE(0x192A, info) /* SRV GT3 */ + INTEL_VGA_DEVICE(0x192B, info) /* Halo GT3 */ \ #define INTEL_SKL_GT4_IDS(info) \ INTEL_VGA_DEVICE(0x1932, info), /* DT GT4 */ \ INTEL_VGA_DEVICE(0x193B, info), /* Halo GT4 */ \ INTEL_VGA_DEVICE(0x193D, info), /* WKS GT4 */ \ - INTEL_VGA_DEVICE(0x193A, info) /* SRV GT4 */ + INTEL_VGA_DEVICE(0x192A, info), /* SRV GT4 */ \ + INTEL_VGA_DEVICE(0x193A, info) /* SRV GT4e */ #define INTEL_SKL_IDS(info) \ INTEL_SKL_GT1_IDS(info), \ @@ -292,6 +287,10 @@ INTEL_VGA_DEVICE(0x5A84, info), /* APL HD Graphics 505 */ \ INTEL_VGA_DEVICE(0x5A85, info) /* APL HD Graphics 500 */ +#define INTEL_GLK_IDS(info) \ + INTEL_VGA_DEVICE(0x3184, info), \ + INTEL_VGA_DEVICE(0x3185, info) + #define INTEL_KBL_GT1_IDS(info) \ INTEL_VGA_DEVICE(0x5913, info), /* ULT GT1.5 */ \ INTEL_VGA_DEVICE(0x5915, info), /* ULX GT1.5 */ \ diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h index f49edecd66a3..b3bf717cfc45 100644 --- a/include/drm/intel-gtt.h +++ b/include/drm/intel-gtt.h @@ -3,8 +3,10 @@ #ifndef _DRM_INTEL_GTT_H #define _DRM_INTEL_GTT_H -void intel_gtt_get(u64 *gtt_total, size_t *stolen_size, - phys_addr_t *mappable_base, u64 *mappable_end); +void intel_gtt_get(u64 *gtt_total, + u32 *stolen_size, + phys_addr_t *mappable_base, + u64 *mappable_end); int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, struct agp_bridge_data *bridge); diff --git a/include/drm/intel_lpe_audio.h b/include/drm/intel_lpe_audio.h new file mode 100644 index 000000000000..e9892b4c3af1 --- /dev/null +++ b/include/drm/intel_lpe_audio.h @@ -0,0 +1,51 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef _INTEL_LPE_AUDIO_H_ +#define _INTEL_LPE_AUDIO_H_ + +#include <linux/types.h> +#include <linux/spinlock_types.h> + +struct platform_device; + +#define HDMI_MAX_ELD_BYTES 128 + +struct intel_hdmi_lpe_audio_eld { + int port_id; + int pipe_id; + unsigned char eld_data[HDMI_MAX_ELD_BYTES]; +}; + +struct intel_hdmi_lpe_audio_pdata { + bool notify_pending; + int tmds_clock_speed; + bool hdmi_connected; + bool dp_output; + int link_rate; + struct intel_hdmi_lpe_audio_eld eld; + void (*notify_audio_lpe)(struct platform_device *pdev); + spinlock_t lpe_audio_slock; +}; + +#endif /* _I915_LPE_AUDIO_H_ */ diff --git a/include/drm/tinydrm/ili9341.h b/include/drm/tinydrm/ili9341.h new file mode 100644 index 000000000000..807a09f43cad --- /dev/null +++ b/include/drm/tinydrm/ili9341.h @@ -0,0 +1,54 @@ +/* + * ILI9341 LCD controller + * + * Copyright 2016 Noralf Trønnes + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __LINUX_ILI9341_H +#define __LINUX_ILI9341_H + +#define ILI9341_FRMCTR1 0xb1 +#define ILI9341_FRMCTR2 0xb2 +#define ILI9341_FRMCTR3 0xb3 +#define ILI9341_INVTR 0xb4 +#define ILI9341_PRCTR 0xb5 +#define ILI9341_DISCTRL 0xb6 +#define ILI9341_ETMOD 0xb7 + +#define ILI9341_PWCTRL1 0xc0 +#define ILI9341_PWCTRL2 0xc1 +#define ILI9341_VMCTRL1 0xc5 +#define ILI9341_VMCTRL2 0xc7 +#define ILI9341_PWCTRLA 0xcb +#define ILI9341_PWCTRLB 0xcf + +#define ILI9341_RDID1 0xda +#define ILI9341_RDID2 0xdb +#define ILI9341_RDID3 0xdc +#define ILI9341_RDID4 0xd3 + +#define ILI9341_PGAMCTRL 0xe0 +#define ILI9341_NGAMCTRL 0xe1 +#define ILI9341_DGAMCTRL1 0xe2 +#define ILI9341_DGAMCTRL2 0xe3 +#define ILI9341_DTCTRLA 0xe8 +#define ILI9341_DTCTRLB 0xea +#define ILI9341_PWRSEQ 0xed + +#define ILI9341_EN3GAM 0xf2 +#define ILI9341_IFCTRL 0xf6 +#define ILI9341_PUMPCTRL 0xf7 + +#define ILI9341_MADCTL_MH BIT(2) +#define ILI9341_MADCTL_BGR BIT(3) +#define ILI9341_MADCTL_ML BIT(4) +#define ILI9341_MADCTL_MV BIT(5) +#define ILI9341_MADCTL_MX BIT(6) +#define ILI9341_MADCTL_MY BIT(7) + +#endif /* __LINUX_ILI9341_H */ diff --git a/include/drm/tinydrm/mipi-dbi.h b/include/drm/tinydrm/mipi-dbi.h new file mode 100644 index 000000000000..d137b16ee873 --- /dev/null +++ b/include/drm/tinydrm/mipi-dbi.h @@ -0,0 +1,107 @@ +/* + * MIPI Display Bus Interface (DBI) LCD controller support + * + * Copyright 2016 Noralf Trønnes + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __LINUX_MIPI_DBI_H +#define __LINUX_MIPI_DBI_H + +#include <drm/tinydrm/tinydrm.h> + +struct spi_device; +struct gpio_desc; +struct regulator; + +/** + * struct mipi_dbi - MIPI DBI controller + * @tinydrm: tinydrm base + * @spi: SPI device + * @enabled: Pipeline is enabled + * @cmdlock: Command lock + * @command: Bus specific callback executing commands. + * @read_commands: Array of read commands terminated by a zero entry. + * Reading is disabled if this is NULL. + * @dc: Optional D/C gpio. + * @tx_buf: Buffer used for transfer (copy clip rect area) + * @tx_buf9: Buffer used for Option 1 9-bit conversion + * @tx_buf9_len: Size of tx_buf9. + * @swap_bytes: Swap bytes in buffer before transfer + * @reset: Optional reset gpio + * @rotation: initial rotation in degrees Counter Clock Wise + * @backlight: backlight device (optional) + * @regulator: power regulator (optional) + */ +struct mipi_dbi { + struct tinydrm_device tinydrm; + struct spi_device *spi; + bool enabled; + struct mutex cmdlock; + int (*command)(struct mipi_dbi *mipi, u8 cmd, u8 *param, size_t num); + const u8 *read_commands; + struct gpio_desc *dc; + u16 *tx_buf; + void *tx_buf9; + size_t tx_buf9_len; + bool swap_bytes; + struct gpio_desc *reset; + unsigned int rotation; + struct backlight_device *backlight; + struct regulator *regulator; +}; + +static inline struct mipi_dbi * +mipi_dbi_from_tinydrm(struct tinydrm_device *tdev) +{ + return container_of(tdev, struct mipi_dbi, tinydrm); +} + +int mipi_dbi_spi_init(struct spi_device *spi, struct mipi_dbi *mipi, + struct gpio_desc *dc, + const struct drm_simple_display_pipe_funcs *pipe_funcs, + struct drm_driver *driver, + const struct drm_display_mode *mode, + unsigned int rotation); +int mipi_dbi_init(struct device *dev, struct mipi_dbi *mipi, + const struct drm_simple_display_pipe_funcs *pipe_funcs, + struct drm_driver *driver, + const struct drm_display_mode *mode, unsigned int rotation); +void mipi_dbi_pipe_enable(struct drm_simple_display_pipe *pipe, + struct drm_crtc_state *crtc_state); +void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe); +void mipi_dbi_hw_reset(struct mipi_dbi *mipi); +bool mipi_dbi_display_is_on(struct mipi_dbi *mipi); + +int mipi_dbi_command_read(struct mipi_dbi *mipi, u8 cmd, u8 *val); +int mipi_dbi_command_buf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len); + +/** + * mipi_dbi_command - MIPI DCS command with optional parameter(s) + * @mipi: MIPI structure + * @cmd: Command + * @seq...: Optional parameter(s) + * + * Send MIPI DCS command to the controller. Use mipi_dbi_command_read() for + * get/read. + * + * Returns: + * Zero on success, negative error code on failure. + */ +#define mipi_dbi_command(mipi, cmd, seq...) \ +({ \ + u8 d[] = { seq }; \ + mipi_dbi_command_buf(mipi, cmd, d, ARRAY_SIZE(d)); \ +}) + +#ifdef CONFIG_DEBUG_FS +int mipi_dbi_debugfs_init(struct drm_minor *minor); +#else +#define mipi_dbi_debugfs_init NULL +#endif + +#endif /* __LINUX_MIPI_DBI_H */ diff --git a/include/drm/tinydrm/tinydrm-helpers.h b/include/drm/tinydrm/tinydrm-helpers.h new file mode 100644 index 000000000000..9b9b6cfe3ba5 --- /dev/null +++ b/include/drm/tinydrm/tinydrm-helpers.h @@ -0,0 +1,81 @@ +/* + * Copyright (C) 2016 Noralf Trønnes + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __LINUX_TINYDRM_HELPERS_H +#define __LINUX_TINYDRM_HELPERS_H + +struct backlight_device; +struct tinydrm_device; +struct drm_clip_rect; +struct spi_transfer; +struct spi_message; +struct spi_device; +struct device; + +/** + * tinydrm_machine_little_endian - Machine is little endian + * + * Returns: + * true if *defined(__LITTLE_ENDIAN)*, false otherwise + */ +static inline bool tinydrm_machine_little_endian(void) +{ +#if defined(__LITTLE_ENDIAN) + return true; +#else + return false; +#endif +} + +bool tinydrm_merge_clips(struct drm_clip_rect *dst, + struct drm_clip_rect *src, unsigned int num_clips, + unsigned int flags, u32 max_width, u32 max_height); +void tinydrm_memcpy(void *dst, void *vaddr, struct drm_framebuffer *fb, + struct drm_clip_rect *clip); +void tinydrm_swab16(u16 *dst, void *vaddr, struct drm_framebuffer *fb, + struct drm_clip_rect *clip); +void tinydrm_xrgb8888_to_rgb565(u16 *dst, void *vaddr, + struct drm_framebuffer *fb, + struct drm_clip_rect *clip, bool swap); + +struct backlight_device *tinydrm_of_find_backlight(struct device *dev); +int tinydrm_enable_backlight(struct backlight_device *backlight); +int tinydrm_disable_backlight(struct backlight_device *backlight); + +size_t tinydrm_spi_max_transfer_size(struct spi_device *spi, size_t max_len); +bool tinydrm_spi_bpw_supported(struct spi_device *spi, u8 bpw); +int tinydrm_spi_transfer(struct spi_device *spi, u32 speed_hz, + struct spi_transfer *header, u8 bpw, const void *buf, + size_t len); +void _tinydrm_dbg_spi_message(struct spi_device *spi, struct spi_message *m); + +#ifdef DEBUG +/** + * tinydrm_dbg_spi_message - Dump SPI message + * @spi: SPI device + * @m: SPI message + * + * Dumps info about the transfers in a SPI message including buffer content. + * DEBUG has to be defined for this function to be enabled alongside setting + * the DRM_UT_DRIVER bit of &drm_debug. + */ +static inline void tinydrm_dbg_spi_message(struct spi_device *spi, + struct spi_message *m) +{ + if (drm_debug & DRM_UT_DRIVER) + _tinydrm_dbg_spi_message(spi, m); +} +#else +static inline void tinydrm_dbg_spi_message(struct spi_device *spi, + struct spi_message *m) +{ +} +#endif /* DEBUG */ + +#endif /* __LINUX_TINYDRM_HELPERS_H */ diff --git a/include/drm/tinydrm/tinydrm.h b/include/drm/tinydrm/tinydrm.h new file mode 100644 index 000000000000..cf9ca207b8b1 --- /dev/null +++ b/include/drm/tinydrm/tinydrm.h @@ -0,0 +1,115 @@ +/* + * Copyright (C) 2016 Noralf Trønnes + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __LINUX_TINYDRM_H +#define __LINUX_TINYDRM_H + +#include <drm/drm_gem_cma_helper.h> +#include <drm/drm_fb_cma_helper.h> +#include <drm/drm_simple_kms_helper.h> + +/** + * struct tinydrm_device - tinydrm device + * @drm: DRM device + * @pipe: Display pipe structure + * @dirty_lock: Serializes framebuffer flushing + * @fbdev_cma: CMA fbdev structure + * @suspend_state: Atomic state when suspended + * @fb_funcs: Framebuffer functions used when creating framebuffers + */ +struct tinydrm_device { + struct drm_device *drm; + struct drm_simple_display_pipe pipe; + struct mutex dirty_lock; + struct drm_fbdev_cma *fbdev_cma; + struct drm_atomic_state *suspend_state; + const struct drm_framebuffer_funcs *fb_funcs; +}; + +static inline struct tinydrm_device * +pipe_to_tinydrm(struct drm_simple_display_pipe *pipe) +{ + return container_of(pipe, struct tinydrm_device, pipe); +} + +/** + * TINYDRM_GEM_DRIVER_OPS - default tinydrm gem operations + * + * This macro provides a shortcut for setting the tinydrm GEM operations in + * the &drm_driver structure. + */ +#define TINYDRM_GEM_DRIVER_OPS \ + .gem_free_object = tinydrm_gem_cma_free_object, \ + .gem_vm_ops = &drm_gem_cma_vm_ops, \ + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \ + .gem_prime_import = drm_gem_prime_import, \ + .gem_prime_export = drm_gem_prime_export, \ + .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, \ + .gem_prime_import_sg_table = tinydrm_gem_cma_prime_import_sg_table, \ + .gem_prime_vmap = drm_gem_cma_prime_vmap, \ + .gem_prime_vunmap = drm_gem_cma_prime_vunmap, \ + .gem_prime_mmap = drm_gem_cma_prime_mmap, \ + .dumb_create = drm_gem_cma_dumb_create, \ + .dumb_map_offset = drm_gem_cma_dumb_map_offset, \ + .dumb_destroy = drm_gem_dumb_destroy, \ + .fops = &tinydrm_fops + +/** + * TINYDRM_MODE - tinydrm display mode + * @hd: Horizontal resolution, width + * @vd: Vertical resolution, height + * @hd_mm: Display width in millimeters + * @vd_mm: Display height in millimeters + * + * This macro creates a &drm_display_mode for use with tinydrm. + */ +#define TINYDRM_MODE(hd, vd, hd_mm, vd_mm) \ + .hdisplay = (hd), \ + .hsync_start = (hd), \ + .hsync_end = (hd), \ + .htotal = (hd), \ + .vdisplay = (vd), \ + .vsync_start = (vd), \ + .vsync_end = (vd), \ + .vtotal = (vd), \ + .width_mm = (hd_mm), \ + .height_mm = (vd_mm), \ + .type = DRM_MODE_TYPE_DRIVER, \ + .clock = 1 /* pass validation */ + +extern const struct file_operations tinydrm_fops; +void tinydrm_lastclose(struct drm_device *drm); +void tinydrm_gem_cma_free_object(struct drm_gem_object *gem_obj); +struct drm_gem_object * +tinydrm_gem_cma_prime_import_sg_table(struct drm_device *drm, + struct dma_buf_attachment *attach, + struct sg_table *sgt); +int devm_tinydrm_init(struct device *parent, struct tinydrm_device *tdev, + const struct drm_framebuffer_funcs *fb_funcs, + struct drm_driver *driver); +int devm_tinydrm_register(struct tinydrm_device *tdev); +void tinydrm_shutdown(struct tinydrm_device *tdev); +int tinydrm_suspend(struct tinydrm_device *tdev); +int tinydrm_resume(struct tinydrm_device *tdev); + +void tinydrm_display_pipe_update(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *old_state); +int tinydrm_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *plane_state); +int +tinydrm_display_pipe_init(struct tinydrm_device *tdev, + const struct drm_simple_display_pipe_funcs *funcs, + int connector_type, + const uint32_t *formats, + unsigned int format_count, + const struct drm_display_mode *mode, + unsigned int rotation); + +#endif /* __LINUX_TINYDRM_H */ diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 9a465314572c..8f619f499e55 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -215,6 +215,8 @@ struct ttm_buffer_object { struct drm_vma_offset_node vma_node; + unsigned priority; + /** * Special members that are protected by the reserve lock * and the bo::lock when written to. Can be read with diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index feecf33a1212..8145773c582c 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -42,6 +42,8 @@ #include <linux/spinlock.h> #include <linux/reservation.h> +#define TTM_MAX_BO_PRIORITY 16U + struct ttm_backend_func { /** * struct ttm_backend_func member bind @@ -298,7 +300,7 @@ struct ttm_mem_type_manager { * Protected by the global->lru_lock. */ - struct list_head lru; + struct list_head lru[TTM_MAX_BO_PRIORITY]; /* * Protected by @move_lock. @@ -431,9 +433,15 @@ struct ttm_bo_driver { int (*verify_access)(struct ttm_buffer_object *bo, struct file *filp); - /* hook to notify driver about a driver move so it - * can do tiling things */ + /** + * Hook to notify driver about a driver move so it + * can do tiling things and book-keeping. + * + * @evict: whether this move is evicting the buffer from the graphics + * address space + */ void (*move_notify)(struct ttm_buffer_object *bo, + bool evict, struct ttm_mem_reg *new_mem); /* notify the driver we are taking a fault on this BO * and have reserved it */ @@ -454,18 +462,6 @@ struct ttm_bo_driver { struct ttm_mem_reg *mem); void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem); - - /** - * Optional driver callback for when BO is removed from the LRU. - * Called with LRU lock held immediately before the removal. - */ - void (*lru_removal)(struct ttm_buffer_object *bo); - - /** - * Return the list_head after which a BO should be inserted in the LRU. - */ - struct list_head *(*lru_tail)(struct ttm_buffer_object *bo); - struct list_head *(*swap_lru_tail)(struct ttm_buffer_object *bo); }; /** @@ -512,7 +508,7 @@ struct ttm_bo_global { /** * Protected by the lru_lock. */ - struct list_head swap_lru; + struct list_head swap_lru[TTM_MAX_BO_PRIORITY]; /** * Internal protection. @@ -780,9 +776,6 @@ extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man); extern void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo); extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo); -struct list_head *ttm_bo_default_lru_tail(struct ttm_buffer_object *bo); -struct list_head *ttm_bo_default_swap_lru_tail(struct ttm_buffer_object *bo); - /** * __ttm_bo_reserve: * diff --git a/include/dt-bindings/clock/bcm2835.h b/include/dt-bindings/clock/bcm2835.h index 360e00cefd35..a0c812b0fa39 100644 --- a/include/dt-bindings/clock/bcm2835.h +++ b/include/dt-bindings/clock/bcm2835.h @@ -64,3 +64,5 @@ #define BCM2835_CLOCK_CAM1 46 #define BCM2835_CLOCK_DSI0E 47 #define BCM2835_CLOCK_DSI1E 48 +#define BCM2835_CLOCK_DSI0P 49 +#define BCM2835_CLOCK_DSI1P 50 diff --git a/include/dt-bindings/clock/exynos4415.h b/include/dt-bindings/clock/exynos4415.h deleted file mode 100644 index 7eed55100721..000000000000 --- a/include/dt-bindings/clock/exynos4415.h +++ /dev/null @@ -1,360 +0,0 @@ -/* - * Copyright (c) 2014 Samsung Electronics Co., Ltd. - * Author: Chanwoo Choi <cw00.choi@samsung.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Device Tree binding constants for Samsung Exynos4415 clock controllers. - */ - -#ifndef _DT_BINDINGS_CLOCK_SAMSUNG_EXYNOS4415_CLOCK_H -#define _DT_BINDINGS_CLOCK_SAMSUNG_EXYNOS4415_CLOCK_H - -/* - * Let each exported clock get a unique index, which is used on DT-enabled - * platforms to lookup the clock from a clock specifier. These indices are - * therefore considered an ABI and so must not be changed. This implies - * that new clocks should be added either in free spaces between clock groups - * or at the end. - */ - -/* - * Main CMU - */ - -#define CLK_OSCSEL 1 -#define CLK_FIN_PLL 2 -#define CLK_FOUT_APLL 3 -#define CLK_FOUT_MPLL 4 -#define CLK_FOUT_EPLL 5 -#define CLK_FOUT_G3D_PLL 6 -#define CLK_FOUT_ISP_PLL 7 -#define CLK_FOUT_DISP_PLL 8 - -/* Muxes */ -#define CLK_MOUT_MPLL_USER_L 16 -#define CLK_MOUT_GDL 17 -#define CLK_MOUT_MPLL_USER_R 18 -#define CLK_MOUT_GDR 19 -#define CLK_MOUT_EBI 20 -#define CLK_MOUT_ACLK_200 21 -#define CLK_MOUT_ACLK_160 22 -#define CLK_MOUT_ACLK_100 23 -#define CLK_MOUT_ACLK_266 24 -#define CLK_MOUT_G3D_PLL 25 -#define CLK_MOUT_EPLL 26 -#define CLK_MOUT_EBI_1 27 -#define CLK_MOUT_ISP_PLL 28 -#define CLK_MOUT_DISP_PLL 29 -#define CLK_MOUT_MPLL_USER_T 30 -#define CLK_MOUT_ACLK_400_MCUISP 31 -#define CLK_MOUT_G3D_PLLSRC 32 -#define CLK_MOUT_CSIS1 33 -#define CLK_MOUT_CSIS0 34 -#define CLK_MOUT_CAM1 35 -#define CLK_MOUT_FIMC3_LCLK 36 -#define CLK_MOUT_FIMC2_LCLK 37 -#define CLK_MOUT_FIMC1_LCLK 38 -#define CLK_MOUT_FIMC0_LCLK 39 -#define CLK_MOUT_MFC 40 -#define CLK_MOUT_MFC_1 41 -#define CLK_MOUT_MFC_0 42 -#define CLK_MOUT_G3D 43 -#define CLK_MOUT_G3D_1 44 -#define CLK_MOUT_G3D_0 45 -#define CLK_MOUT_MIPI0 46 -#define CLK_MOUT_FIMD0 47 -#define CLK_MOUT_TSADC_ISP 48 -#define CLK_MOUT_UART_ISP 49 -#define CLK_MOUT_SPI1_ISP 50 -#define CLK_MOUT_SPI0_ISP 51 -#define CLK_MOUT_PWM_ISP 52 -#define CLK_MOUT_AUDIO0 53 -#define CLK_MOUT_TSADC 54 -#define CLK_MOUT_MMC2 55 -#define CLK_MOUT_MMC1 56 -#define CLK_MOUT_MMC0 57 -#define CLK_MOUT_UART3 58 -#define CLK_MOUT_UART2 59 -#define CLK_MOUT_UART1 60 -#define CLK_MOUT_UART0 61 -#define CLK_MOUT_SPI2 62 -#define CLK_MOUT_SPI1 63 -#define CLK_MOUT_SPI0 64 -#define CLK_MOUT_SPDIF 65 -#define CLK_MOUT_AUDIO2 66 -#define CLK_MOUT_AUDIO1 67 -#define CLK_MOUT_MPLL_USER_C 68 -#define CLK_MOUT_HPM 69 -#define CLK_MOUT_CORE 70 -#define CLK_MOUT_APLL 71 -#define CLK_MOUT_PXLASYNC_CSIS1_FIMC 72 -#define CLK_MOUT_PXLASYNC_CSIS0_FIMC 73 -#define CLK_MOUT_JPEG 74 -#define CLK_MOUT_JPEG1 75 -#define CLK_MOUT_JPEG0 76 -#define CLK_MOUT_ACLK_ISP0_300 77 -#define CLK_MOUT_ACLK_ISP0_400 78 -#define CLK_MOUT_ACLK_ISP0_300_USER 79 -#define CLK_MOUT_ACLK_ISP1_300 80 -#define CLK_MOUT_ACLK_ISP1_300_USER 81 -#define CLK_MOUT_HDMI 82 - -/* Dividers */ -#define CLK_DIV_GPL 90 -#define CLK_DIV_GDL 91 -#define CLK_DIV_GPR 92 -#define CLK_DIV_GDR 93 -#define CLK_DIV_ACLK_400_MCUISP 94 -#define CLK_DIV_EBI 95 -#define CLK_DIV_ACLK_200 96 -#define CLK_DIV_ACLK_160 97 -#define CLK_DIV_ACLK_100 98 -#define CLK_DIV_ACLK_266 99 -#define CLK_DIV_CSIS1 100 -#define CLK_DIV_CSIS0 101 -#define CLK_DIV_CAM1 102 -#define CLK_DIV_FIMC3_LCLK 103 -#define CLK_DIV_FIMC2_LCLK 104 -#define CLK_DIV_FIMC1_LCLK 105 -#define CLK_DIV_FIMC0_LCLK 106 -#define CLK_DIV_TV_BLK 107 -#define CLK_DIV_MFC 108 -#define CLK_DIV_G3D 109 -#define CLK_DIV_MIPI0_PRE 110 -#define CLK_DIV_MIPI0 111 -#define CLK_DIV_FIMD0 112 -#define CLK_DIV_UART_ISP 113 -#define CLK_DIV_SPI1_ISP_PRE 114 -#define CLK_DIV_SPI1_ISP 115 -#define CLK_DIV_SPI0_ISP_PRE 116 -#define CLK_DIV_SPI0_ISP 117 -#define CLK_DIV_PWM_ISP 118 -#define CLK_DIV_PCM0 119 -#define CLK_DIV_AUDIO0 120 -#define CLK_DIV_TSADC_PRE 121 -#define CLK_DIV_TSADC 122 -#define CLK_DIV_MMC1_PRE 123 -#define CLK_DIV_MMC1 124 -#define CLK_DIV_MMC0_PRE 125 -#define CLK_DIV_MMC0 126 -#define CLK_DIV_MMC2_PRE 127 -#define CLK_DIV_MMC2 128 -#define CLK_DIV_UART3 129 -#define CLK_DIV_UART2 130 -#define CLK_DIV_UART1 131 -#define CLK_DIV_UART0 132 -#define CLK_DIV_SPI1_PRE 133 -#define CLK_DIV_SPI1 134 -#define CLK_DIV_SPI0_PRE 135 -#define CLK_DIV_SPI0 136 -#define CLK_DIV_SPI2_PRE 137 -#define CLK_DIV_SPI2 138 -#define CLK_DIV_PCM2 139 -#define CLK_DIV_AUDIO2 140 -#define CLK_DIV_PCM1 141 -#define CLK_DIV_AUDIO1 142 -#define CLK_DIV_I2S1 143 -#define CLK_DIV_PXLASYNC_CSIS1_FIMC 144 -#define CLK_DIV_PXLASYNC_CSIS0_FIMC 145 -#define CLK_DIV_JPEG 146 -#define CLK_DIV_CORE2 147 -#define CLK_DIV_APLL 148 -#define CLK_DIV_PCLK_DBG 149 -#define CLK_DIV_ATB 150 -#define CLK_DIV_PERIPH 151 -#define CLK_DIV_COREM1 152 -#define CLK_DIV_COREM0 153 -#define CLK_DIV_CORE 154 -#define CLK_DIV_HPM 155 -#define CLK_DIV_COPY 156 - -/* Gates */ -#define CLK_ASYNC_G3D 180 -#define CLK_ASYNC_MFCL 181 -#define CLK_ASYNC_TVX 182 -#define CLK_PPMULEFT 183 -#define CLK_GPIO_LEFT 184 -#define CLK_PPMUIMAGE 185 -#define CLK_QEMDMA2 186 -#define CLK_QEROTATOR 187 -#define CLK_SMMUMDMA2 188 -#define CLK_SMMUROTATOR 189 -#define CLK_MDMA2 190 -#define CLK_ROTATOR 191 -#define CLK_ASYNC_ISPMX 192 -#define CLK_ASYNC_MAUDIOX 193 -#define CLK_ASYNC_MFCR 194 -#define CLK_ASYNC_FSYSD 195 -#define CLK_ASYNC_LCD0X 196 -#define CLK_ASYNC_CAMX 197 -#define CLK_PPMURIGHT 198 -#define CLK_GPIO_RIGHT 199 -#define CLK_ANTIRBK_APBIF 200 -#define CLK_EFUSE_WRITER_APBIF 201 -#define CLK_MONOCNT 202 -#define CLK_TZPC6 203 -#define CLK_PROVISIONKEY1 204 -#define CLK_PROVISIONKEY0 205 -#define CLK_CMU_ISPPART 206 -#define CLK_TMU_APBIF 207 -#define CLK_KEYIF 208 -#define CLK_RTC 209 -#define CLK_WDT 210 -#define CLK_MCT 211 -#define CLK_SECKEY 212 -#define CLK_HDMI_CEC 213 -#define CLK_TZPC5 214 -#define CLK_TZPC4 215 -#define CLK_TZPC3 216 -#define CLK_TZPC2 217 -#define CLK_TZPC1 218 -#define CLK_TZPC0 219 -#define CLK_CMU_COREPART 220 -#define CLK_CMU_TOPPART 221 -#define CLK_PMU_APBIF 222 -#define CLK_SYSREG 223 -#define CLK_CHIP_ID 224 -#define CLK_SMMUFIMC_LITE2 225 -#define CLK_FIMC_LITE2 226 -#define CLK_PIXELASYNCM1 227 -#define CLK_PIXELASYNCM0 228 -#define CLK_PPMUCAMIF 229 -#define CLK_SMMUJPEG 230 -#define CLK_SMMUFIMC3 231 -#define CLK_SMMUFIMC2 232 -#define CLK_SMMUFIMC1 233 -#define CLK_SMMUFIMC0 234 -#define CLK_JPEG 235 -#define CLK_CSIS1 236 -#define CLK_CSIS0 237 -#define CLK_FIMC3 238 -#define CLK_FIMC2 239 -#define CLK_FIMC1 240 -#define CLK_FIMC0 241 -#define CLK_PPMUTV 242 -#define CLK_SMMUTV 243 -#define CLK_HDMI 244 -#define CLK_MIXER 245 -#define CLK_VP 246 -#define CLK_PPMUMFC_R 247 -#define CLK_PPMUMFC_L 248 -#define CLK_SMMUMFC_R 249 -#define CLK_SMMUMFC_L 250 -#define CLK_MFC 251 -#define CLK_PPMUG3D 252 -#define CLK_G3D 253 -#define CLK_PPMULCD0 254 -#define CLK_SMMUFIMD0 255 -#define CLK_DSIM0 256 -#define CLK_SMIES 257 -#define CLK_MIE0 258 -#define CLK_FIMD0 259 -#define CLK_TSADC 260 -#define CLK_PPMUFILE 261 -#define CLK_NFCON 262 -#define CLK_USBDEVICE 263 -#define CLK_USBHOST 264 -#define CLK_SROMC 265 -#define CLK_SDMMC2 266 -#define CLK_SDMMC1 267 -#define CLK_SDMMC0 268 -#define CLK_PDMA1 269 -#define CLK_PDMA0 270 -#define CLK_SPDIF 271 -#define CLK_PWM 272 -#define CLK_PCM2 273 -#define CLK_PCM1 274 -#define CLK_I2S1 275 -#define CLK_SPI2 276 -#define CLK_SPI1 277 -#define CLK_SPI0 278 -#define CLK_I2CHDMI 279 -#define CLK_I2C7 280 -#define CLK_I2C6 281 -#define CLK_I2C5 282 -#define CLK_I2C4 283 -#define CLK_I2C3 284 -#define CLK_I2C2 285 -#define CLK_I2C1 286 -#define CLK_I2C0 287 -#define CLK_UART3 288 -#define CLK_UART2 289 -#define CLK_UART1 290 -#define CLK_UART0 291 - -/* Special clocks */ -#define CLK_SCLK_PXLAYSNC_CSIS1_FIMC 330 -#define CLK_SCLK_PXLAYSNC_CSIS0_FIMC 331 -#define CLK_SCLK_JPEG 332 -#define CLK_SCLK_CSIS1 333 -#define CLK_SCLK_CSIS0 334 -#define CLK_SCLK_CAM1 335 -#define CLK_SCLK_FIMC3_LCLK 336 -#define CLK_SCLK_FIMC2_LCLK 337 -#define CLK_SCLK_FIMC1_LCLK 338 -#define CLK_SCLK_FIMC0_LCLK 339 -#define CLK_SCLK_PIXEL 340 -#define CLK_SCLK_HDMI 341 -#define CLK_SCLK_MIXER 342 -#define CLK_SCLK_MFC 343 -#define CLK_SCLK_G3D 344 -#define CLK_SCLK_MIPIDPHY4L 345 -#define CLK_SCLK_MIPI0 346 -#define CLK_SCLK_MDNIE0 347 -#define CLK_SCLK_FIMD0 348 -#define CLK_SCLK_PCM0 349 -#define CLK_SCLK_AUDIO0 350 -#define CLK_SCLK_TSADC 351 -#define CLK_SCLK_EBI 352 -#define CLK_SCLK_MMC2 353 -#define CLK_SCLK_MMC1 354 -#define CLK_SCLK_MMC0 355 -#define CLK_SCLK_I2S 356 -#define CLK_SCLK_PCM2 357 -#define CLK_SCLK_PCM1 358 -#define CLK_SCLK_AUDIO2 359 -#define CLK_SCLK_AUDIO1 360 -#define CLK_SCLK_SPDIF 361 -#define CLK_SCLK_SPI2 362 -#define CLK_SCLK_SPI1 363 -#define CLK_SCLK_SPI0 364 -#define CLK_SCLK_UART3 365 -#define CLK_SCLK_UART2 366 -#define CLK_SCLK_UART1 367 -#define CLK_SCLK_UART0 368 -#define CLK_SCLK_HDMIPHY 369 - -/* - * Total number of clocks of main CMU. - * NOTE: Must be equal to last clock ID increased by one. - */ -#define CLK_NR_CLKS 370 - -/* - * CMU DMC - */ -#define CLK_DMC_FOUT_MPLL 1 -#define CLK_DMC_FOUT_BPLL 2 - -#define CLK_DMC_MOUT_MPLL 3 -#define CLK_DMC_MOUT_BPLL 4 -#define CLK_DMC_MOUT_DPHY 5 -#define CLK_DMC_MOUT_DMC_BUS 6 - -#define CLK_DMC_DIV_DMC 7 -#define CLK_DMC_DIV_DPHY 8 -#define CLK_DMC_DIV_DMC_PRE 9 -#define CLK_DMC_DIV_DMCP 10 -#define CLK_DMC_DIV_DMCD 11 -#define CLK_DMC_DIV_MPLL_PRE 12 - -/* - * Total number of clocks of CMU_DMC. - * NOTE: Must be equal to highest clock ID increased by one. - */ -#define NR_CLKS_DMC 13 - -#endif /* _DT_BINDINGS_CLOCK_SAMSUNG_EXYNOS4415_CLOCK_H */ diff --git a/include/dt-bindings/clock/exynos5433.h b/include/dt-bindings/clock/exynos5433.h index 4fa6bb2136e3..be39d23e6a32 100644 --- a/include/dt-bindings/clock/exynos5433.h +++ b/include/dt-bindings/clock/exynos5433.h @@ -771,7 +771,10 @@ #define CLK_PCLK_DECON 113 -#define DISP_NR_CLK 114 +#define CLK_PHYCLK_MIPIDPHY0_BITCLKDIV8_PHY 114 +#define CLK_PHYCLK_MIPIDPHY0_RXCLKESC0_PHY 115 + +#define DISP_NR_CLK 116 /* CMU_AUD */ #define CLK_MOUT_AUD_PLL_USER 1 diff --git a/include/dt-bindings/clock/gxbb-clkc.h b/include/dt-bindings/clock/gxbb-clkc.h index baade6f429d0..692846c7941b 100644 --- a/include/dt-bindings/clock/gxbb-clkc.h +++ b/include/dt-bindings/clock/gxbb-clkc.h @@ -14,15 +14,21 @@ #define CLKID_MPLL2 15 #define CLKID_SPI 34 #define CLKID_I2C 22 +#define CLKID_SAR_ADC 23 #define CLKID_ETH 36 #define CLKID_USB0 50 #define CLKID_USB1 51 #define CLKID_USB 55 +#define CLKID_HDMI_PCLK 63 #define CLKID_USB1_DDR_BRIDGE 64 #define CLKID_USB0_DDR_BRIDGE 65 +#define CLKID_SANA 69 +#define CLKID_GCLK_VENCI_INT0 77 #define CLKID_AO_I2C 93 #define CLKID_SD_EMMC_A 94 #define CLKID_SD_EMMC_B 95 #define CLKID_SD_EMMC_C 96 +#define CLKID_SAR_ADC_CLK 97 +#define CLKID_SAR_ADC_SEL 98 #endif /* __GXBB_CLKC_H */ diff --git a/include/dt-bindings/clock/hi3660-clock.h b/include/dt-bindings/clock/hi3660-clock.h new file mode 100644 index 000000000000..1c00b7fe296f --- /dev/null +++ b/include/dt-bindings/clock/hi3660-clock.h @@ -0,0 +1,194 @@ +/* + * Copyright (c) 2016-2017 Linaro Ltd. + * Copyright (c) 2016-2017 HiSilicon Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __DTS_HI3660_CLOCK_H +#define __DTS_HI3660_CLOCK_H + +/* fixed rate clocks */ +#define HI3660_CLKIN_SYS 0 +#define HI3660_CLKIN_REF 1 +#define HI3660_CLK_FLL_SRC 2 +#define HI3660_CLK_PPLL0 3 +#define HI3660_CLK_PPLL1 4 +#define HI3660_CLK_PPLL2 5 +#define HI3660_CLK_PPLL3 6 +#define HI3660_CLK_SCPLL 7 +#define HI3660_PCLK 8 +#define HI3660_CLK_UART0_DBG 9 +#define HI3660_CLK_UART6 10 +#define HI3660_OSC32K 11 +#define HI3660_OSC19M 12 +#define HI3660_CLK_480M 13 +#define HI3660_CLK_INV 14 + +/* clk in crgctrl */ +#define HI3660_FACTOR_UART3 15 +#define HI3660_CLK_FACTOR_MMC 16 +#define HI3660_CLK_GATE_I2C0 17 +#define HI3660_CLK_GATE_I2C1 18 +#define HI3660_CLK_GATE_I2C2 19 +#define HI3660_CLK_GATE_I2C6 20 +#define HI3660_CLK_DIV_SYSBUS 21 +#define HI3660_CLK_DIV_320M 22 +#define HI3660_CLK_DIV_A53 23 +#define HI3660_CLK_GATE_SPI0 24 +#define HI3660_CLK_GATE_SPI2 25 +#define HI3660_PCIEPHY_REF 26 +#define HI3660_CLK_ABB_USB 27 +#define HI3660_HCLK_GATE_SDIO0 28 +#define HI3660_HCLK_GATE_SD 29 +#define HI3660_CLK_GATE_AOMM 30 +#define HI3660_PCLK_GPIO0 31 +#define HI3660_PCLK_GPIO1 32 +#define HI3660_PCLK_GPIO2 33 +#define HI3660_PCLK_GPIO3 34 +#define HI3660_PCLK_GPIO4 35 +#define HI3660_PCLK_GPIO5 36 +#define HI3660_PCLK_GPIO6 37 +#define HI3660_PCLK_GPIO7 38 +#define HI3660_PCLK_GPIO8 39 +#define HI3660_PCLK_GPIO9 40 +#define HI3660_PCLK_GPIO10 41 +#define HI3660_PCLK_GPIO11 42 +#define HI3660_PCLK_GPIO12 43 +#define HI3660_PCLK_GPIO13 44 +#define HI3660_PCLK_GPIO14 45 +#define HI3660_PCLK_GPIO15 46 +#define HI3660_PCLK_GPIO16 47 +#define HI3660_PCLK_GPIO17 48 +#define HI3660_PCLK_GPIO18 49 +#define HI3660_PCLK_GPIO19 50 +#define HI3660_PCLK_GPIO20 51 +#define HI3660_PCLK_GPIO21 52 +#define HI3660_CLK_GATE_SPI3 53 +#define HI3660_CLK_GATE_I2C7 54 +#define HI3660_CLK_GATE_I2C3 55 +#define HI3660_CLK_GATE_SPI1 56 +#define HI3660_CLK_GATE_UART1 57 +#define HI3660_CLK_GATE_UART2 58 +#define HI3660_CLK_GATE_UART4 59 +#define HI3660_CLK_GATE_UART5 60 +#define HI3660_CLK_GATE_I2C4 61 +#define HI3660_CLK_GATE_DMAC 62 +#define HI3660_PCLK_GATE_DSS 63 +#define HI3660_ACLK_GATE_DSS 64 +#define HI3660_CLK_GATE_LDI1 65 +#define HI3660_CLK_GATE_LDI0 66 +#define HI3660_CLK_GATE_VIVOBUS 67 +#define HI3660_CLK_GATE_EDC0 68 +#define HI3660_CLK_GATE_TXDPHY0_CFG 69 +#define HI3660_CLK_GATE_TXDPHY0_REF 70 +#define HI3660_CLK_GATE_TXDPHY1_CFG 71 +#define HI3660_CLK_GATE_TXDPHY1_REF 72 +#define HI3660_ACLK_GATE_USB3OTG 73 +#define HI3660_CLK_GATE_SPI4 74 +#define HI3660_CLK_GATE_SD 75 +#define HI3660_CLK_GATE_SDIO0 76 +#define HI3660_CLK_GATE_UFS_SUBSYS 77 +#define HI3660_PCLK_GATE_DSI0 78 +#define HI3660_PCLK_GATE_DSI1 79 +#define HI3660_ACLK_GATE_PCIE 80 +#define HI3660_PCLK_GATE_PCIE_SYS 81 +#define HI3660_CLK_GATE_PCIEAUX 82 +#define HI3660_PCLK_GATE_PCIE_PHY 83 +#define HI3660_CLK_ANDGT_LDI0 84 +#define HI3660_CLK_ANDGT_LDI1 85 +#define HI3660_CLK_ANDGT_EDC0 86 +#define HI3660_CLK_GATE_UFSPHY_GT 87 +#define HI3660_CLK_ANDGT_MMC 88 +#define HI3660_CLK_ANDGT_SD 89 +#define HI3660_CLK_A53HPM_ANDGT 90 +#define HI3660_CLK_ANDGT_SDIO 91 +#define HI3660_CLK_ANDGT_UART0 92 +#define HI3660_CLK_ANDGT_UART1 93 +#define HI3660_CLK_ANDGT_UARTH 94 +#define HI3660_CLK_ANDGT_SPI 95 +#define HI3660_CLK_VIVOBUS_ANDGT 96 +#define HI3660_CLK_AOMM_ANDGT 97 +#define HI3660_CLK_320M_PLL_GT 98 +#define HI3660_AUTODIV_EMMC0BUS 99 +#define HI3660_AUTODIV_SYSBUS 100 +#define HI3660_CLK_GATE_UFSPHY_CFG 101 +#define HI3660_CLK_GATE_UFSIO_REF 102 +#define HI3660_CLK_MUX_SYSBUS 103 +#define HI3660_CLK_MUX_UART0 104 +#define HI3660_CLK_MUX_UART1 105 +#define HI3660_CLK_MUX_UARTH 106 +#define HI3660_CLK_MUX_SPI 107 +#define HI3660_CLK_MUX_I2C 108 +#define HI3660_CLK_MUX_MMC_PLL 109 +#define HI3660_CLK_MUX_LDI1 110 +#define HI3660_CLK_MUX_LDI0 111 +#define HI3660_CLK_MUX_SD_PLL 112 +#define HI3660_CLK_MUX_SD_SYS 113 +#define HI3660_CLK_MUX_EDC0 114 +#define HI3660_CLK_MUX_SDIO_SYS 115 +#define HI3660_CLK_MUX_SDIO_PLL 116 +#define HI3660_CLK_MUX_VIVOBUS 117 +#define HI3660_CLK_MUX_A53HPM 118 +#define HI3660_CLK_MUX_320M 119 +#define HI3660_CLK_MUX_IOPERI 120 +#define HI3660_CLK_DIV_UART0 121 +#define HI3660_CLK_DIV_UART1 122 +#define HI3660_CLK_DIV_UARTH 123 +#define HI3660_CLK_DIV_MMC 124 +#define HI3660_CLK_DIV_SD 125 +#define HI3660_CLK_DIV_EDC0 126 +#define HI3660_CLK_DIV_LDI0 127 +#define HI3660_CLK_DIV_SDIO 128 +#define HI3660_CLK_DIV_LDI1 129 +#define HI3660_CLK_DIV_SPI 130 +#define HI3660_CLK_DIV_VIVOBUS 131 +#define HI3660_CLK_DIV_I2C 132 +#define HI3660_CLK_DIV_UFSPHY 133 +#define HI3660_CLK_DIV_CFGBUS 134 +#define HI3660_CLK_DIV_MMC0BUS 135 +#define HI3660_CLK_DIV_MMC1BUS 136 +#define HI3660_CLK_DIV_UFSPERI 137 +#define HI3660_CLK_DIV_AOMM 138 +#define HI3660_CLK_DIV_IOPERI 139 + +/* clk in pmuctrl */ +#define HI3660_GATE_ABB_192 0 + +/* clk in pctrl */ +#define HI3660_GATE_UFS_TCXO_EN 0 +#define HI3660_GATE_USB_TCXO_EN 1 + +/* clk in sctrl */ +#define HI3660_PCLK_AO_GPIO0 0 +#define HI3660_PCLK_AO_GPIO1 1 +#define HI3660_PCLK_AO_GPIO2 2 +#define HI3660_PCLK_AO_GPIO3 3 +#define HI3660_PCLK_AO_GPIO4 4 +#define HI3660_PCLK_AO_GPIO5 5 +#define HI3660_PCLK_AO_GPIO6 6 +#define HI3660_PCLK_GATE_MMBUF 7 +#define HI3660_CLK_GATE_DSS_AXI_MM 8 +#define HI3660_PCLK_MMBUF_ANDGT 9 +#define HI3660_CLK_MMBUF_PLL_ANDGT 10 +#define HI3660_CLK_FLL_MMBUF_ANDGT 11 +#define HI3660_CLK_SYS_MMBUF_ANDGT 12 +#define HI3660_CLK_GATE_PCIEPHY_GT 13 +#define HI3660_ACLK_MUX_MMBUF 14 +#define HI3660_CLK_SW_MMBUF 15 +#define HI3660_CLK_DIV_AOBUS 16 +#define HI3660_PCLK_DIV_MMBUF 17 +#define HI3660_ACLK_DIV_MMBUF 18 +#define HI3660_CLK_DIV_PCIEPHY 19 + +/* clk in iomcu */ +#define HI3660_CLK_I2C0_IOMCU 0 +#define HI3660_CLK_I2C1_IOMCU 1 +#define HI3660_CLK_I2C2_IOMCU 2 +#define HI3660_CLK_I2C6_IOMCU 3 +#define HI3660_CLK_IOMCU_PERI0 4 + +#endif /* __DTS_HI3660_CLOCK_H */ diff --git a/include/dt-bindings/clock/imx7d-clock.h b/include/dt-bindings/clock/imx7d-clock.h index 1183347c383f..a7a1a50f33ef 100644 --- a/include/dt-bindings/clock/imx7d-clock.h +++ b/include/dt-bindings/clock/imx7d-clock.h @@ -449,5 +449,6 @@ #define IMX7D_ADC_ROOT_CLK 436 #define IMX7D_CLK_ARM 437 #define IMX7D_CKIL 438 -#define IMX7D_CLK_END 439 +#define IMX7D_OCOTP_CLK 439 +#define IMX7D_CLK_END 440 #endif /* __DT_BINDINGS_CLOCK_IMX7D_H */ diff --git a/include/dt-bindings/clock/qcom,gcc-ipq4019.h b/include/dt-bindings/clock/qcom,gcc-ipq4019.h index 6240e5b0e900..7e8a7be6dcda 100644 --- a/include/dt-bindings/clock/qcom,gcc-ipq4019.h +++ b/include/dt-bindings/clock/qcom,gcc-ipq4019.h @@ -81,6 +81,17 @@ #define GCC_WCSS5G_CLK 62 #define GCC_WCSS5G_REF_CLK 63 #define GCC_WCSS5G_RTC_CLK 64 +#define GCC_APSS_DDRPLL_VCO 65 +#define GCC_SDCC_PLLDIV_CLK 66 +#define GCC_FEPLL_VCO 67 +#define GCC_FEPLL125_CLK 68 +#define GCC_FEPLL125DLY_CLK 69 +#define GCC_FEPLL200_CLK 70 +#define GCC_FEPLL500_CLK 71 +#define GCC_FEPLL_WCSS2G_CLK 72 +#define GCC_FEPLL_WCSS5G_CLK 73 +#define GCC_APSS_CPU_PLLDIV_CLK 74 +#define GCC_PCNOC_AHB_CLK_SRC 75 #define WIFI0_CPU_INIT_RESET 0 #define WIFI0_RADIO_SRIF_RESET 1 diff --git a/include/dt-bindings/clock/qcom,gcc-mdm9615.h b/include/dt-bindings/clock/qcom,gcc-mdm9615.h index 9ab2c4087120..787e448958bd 100644 --- a/include/dt-bindings/clock/qcom,gcc-mdm9615.h +++ b/include/dt-bindings/clock/qcom,gcc-mdm9615.h @@ -323,5 +323,7 @@ #define CE3_H_CLK 305 #define USB_HS1_SYSTEM_CLK_SRC 306 #define USB_HS1_SYSTEM_CLK 307 +#define EBI2_CLK 308 +#define EBI2_AON_CLK 309 #endif diff --git a/include/dt-bindings/clock/qcom,gcc-msm8994.h b/include/dt-bindings/clock/qcom,gcc-msm8994.h index 8fa535be2ebc..df47da0860f7 100644 --- a/include/dt-bindings/clock/qcom,gcc-msm8994.h +++ b/include/dt-bindings/clock/qcom,gcc-msm8994.h @@ -133,5 +133,6 @@ #define GCC_USB30_MOCK_UTMI_CLK 115 #define GCC_USB3_PHY_AUX_CLK 116 #define GCC_USB_HS_SYSTEM_CLK 117 +#define GCC_SDCC1_AHB_CLK 118 #endif diff --git a/include/dt-bindings/clock/qcom,gcc-msm8996.h b/include/dt-bindings/clock/qcom,gcc-msm8996.h index 1828723eb621..1f5c42254798 100644 --- a/include/dt-bindings/clock/qcom,gcc-msm8996.h +++ b/include/dt-bindings/clock/qcom,gcc-msm8996.h @@ -339,6 +339,7 @@ #define GCC_PCIE_PHY_COM_NOCSR_BCR 102 #define GCC_USB3_PHY_BCR 103 #define GCC_USB3PHY_PHY_BCR 104 +#define GCC_MSS_RESTART 105 /* Indexes for GDSCs */ diff --git a/include/dt-bindings/clock/qcom,rpmcc.h b/include/dt-bindings/clock/qcom,rpmcc.h index 5924cdb71336..96b63c00249e 100644 --- a/include/dt-bindings/clock/qcom,rpmcc.h +++ b/include/dt-bindings/clock/qcom,rpmcc.h @@ -14,7 +14,7 @@ #ifndef _DT_BINDINGS_CLK_MSM_RPMCC_H #define _DT_BINDINGS_CLK_MSM_RPMCC_H -/* apq8064 */ +/* RPM clocks */ #define RPM_PXO_CLK 0 #define RPM_PXO_A_CLK 1 #define RPM_CXO_CLK 2 @@ -38,7 +38,7 @@ #define RPM_SFPB_CLK 20 #define RPM_SFPB_A_CLK 21 -/* msm8916 */ +/* SMD RPM clocks */ #define RPM_SMD_XO_CLK_SRC 0 #define RPM_SMD_XO_A_CLK_SRC 1 #define RPM_SMD_PCNOC_CLK 2 @@ -65,5 +65,41 @@ #define RPM_SMD_RF_CLK1_A_PIN 23 #define RPM_SMD_RF_CLK2_PIN 24 #define RPM_SMD_RF_CLK2_A_PIN 25 +#define RPM_SMD_PNOC_CLK 26 +#define RPM_SMD_PNOC_A_CLK 27 +#define RPM_SMD_CNOC_CLK 28 +#define RPM_SMD_CNOC_A_CLK 29 +#define RPM_SMD_MMSSNOC_AHB_CLK 30 +#define RPM_SMD_MMSSNOC_AHB_A_CLK 31 +#define RPM_SMD_GFX3D_CLK_SRC 32 +#define RPM_SMD_GFX3D_A_CLK_SRC 33 +#define RPM_SMD_OCMEMGX_CLK 34 +#define RPM_SMD_OCMEMGX_A_CLK 35 +#define RPM_SMD_CXO_D0 36 +#define RPM_SMD_CXO_D0_A 37 +#define RPM_SMD_CXO_D1 38 +#define RPM_SMD_CXO_D1_A 39 +#define RPM_SMD_CXO_A0 40 +#define RPM_SMD_CXO_A0_A 41 +#define RPM_SMD_CXO_A1 42 +#define RPM_SMD_CXO_A1_A 43 +#define RPM_SMD_CXO_A2 44 +#define RPM_SMD_CXO_A2_A 45 +#define RPM_SMD_DIV_CLK1 46 +#define RPM_SMD_DIV_A_CLK1 47 +#define RPM_SMD_DIV_CLK2 48 +#define RPM_SMD_DIV_A_CLK2 49 +#define RPM_SMD_DIFF_CLK 50 +#define RPM_SMD_DIFF_A_CLK 51 +#define RPM_SMD_CXO_D0_PIN 52 +#define RPM_SMD_CXO_D0_A_PIN 53 +#define RPM_SMD_CXO_D1_PIN 54 +#define RPM_SMD_CXO_D1_A_PIN 55 +#define RPM_SMD_CXO_A0_PIN 56 +#define RPM_SMD_CXO_A0_A_PIN 57 +#define RPM_SMD_CXO_A1_PIN 58 +#define RPM_SMD_CXO_A1_A_PIN 59 +#define RPM_SMD_CXO_A2_PIN 60 +#define RPM_SMD_CXO_A2_A_PIN 61 #endif diff --git a/include/dt-bindings/clock/r7s72100-clock.h b/include/dt-bindings/clock/r7s72100-clock.h index 29e01ed10e74..ce09915c298f 100644 --- a/include/dt-bindings/clock/r7s72100-clock.h +++ b/include/dt-bindings/clock/r7s72100-clock.h @@ -25,6 +25,10 @@ #define R7S72100_CLK_SCIF6 1 #define R7S72100_CLK_SCIF7 0 +/* MSTP5 */ +#define R7S72100_CLK_OSTM0 1 +#define R7S72100_CLK_OSTM1 0 + /* MSTP7 */ #define R7S72100_CLK_ETHER 4 diff --git a/include/dt-bindings/clock/rk3188-cru-common.h b/include/dt-bindings/clock/rk3188-cru-common.h index d141c1f0c778..eff4319d008b 100644 --- a/include/dt-bindings/clock/rk3188-cru-common.h +++ b/include/dt-bindings/clock/rk3188-cru-common.h @@ -108,6 +108,8 @@ #define PCLK_TSADC 349 #define PCLK_CPU 350 #define PCLK_PERI 351 +#define PCLK_DDRUPCTL 352 +#define PCLK_PUBL 353 /* hclk gates */ #define HCLK_SDMMC 448 diff --git a/include/dt-bindings/clock/rk3288-cru.h b/include/dt-bindings/clock/rk3288-cru.h index 9a586e2d9c91..d7b6c83ea63f 100644 --- a/include/dt-bindings/clock/rk3288-cru.h +++ b/include/dt-bindings/clock/rk3288-cru.h @@ -88,6 +88,7 @@ #define SCLK_PVTM_GPU 124 #define SCLK_CRYPTO 125 #define SCLK_MIPIDSI_24M 126 +#define SCLK_VIP_OUT 127 #define SCLK_MAC 151 #define SCLK_MACREF_OUT 152 @@ -168,6 +169,7 @@ #define PCLK_WDT 368 #define PCLK_EFUSE256 369 #define PCLK_EFUSE1024 370 +#define PCLK_ISP_IN 371 /* hclk gates */ #define HCLK_GPS 448 diff --git a/include/dt-bindings/clock/rk3328-cru.h b/include/dt-bindings/clock/rk3328-cru.h new file mode 100644 index 000000000000..ee702c8e4c09 --- /dev/null +++ b/include/dt-bindings/clock/rk3328-cru.h @@ -0,0 +1,400 @@ +/* + * Copyright (c) 2016 Rockchip Electronics Co. Ltd. + * Author: Elaine <zhangqing@rock-chips.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3328_H +#define _DT_BINDINGS_CLK_ROCKCHIP_RK3328_H + +/* core clocks */ +#define PLL_APLL 1 +#define PLL_DPLL 2 +#define PLL_CPLL 3 +#define PLL_GPLL 4 +#define PLL_NPLL 5 +#define ARMCLK 6 + +/* sclk gates (special clocks) */ +#define SCLK_RTC32K 30 +#define SCLK_SDMMC_EXT 31 +#define SCLK_SPI 32 +#define SCLK_SDMMC 33 +#define SCLK_SDIO 34 +#define SCLK_EMMC 35 +#define SCLK_TSADC 36 +#define SCLK_SARADC 37 +#define SCLK_UART0 38 +#define SCLK_UART1 39 +#define SCLK_UART2 40 +#define SCLK_I2S0 41 +#define SCLK_I2S1 42 +#define SCLK_I2S2 43 +#define SCLK_I2S1_OUT 44 +#define SCLK_I2S2_OUT 45 +#define SCLK_SPDIF 46 +#define SCLK_TIMER0 47 +#define SCLK_TIMER1 48 +#define SCLK_TIMER2 49 +#define SCLK_TIMER3 50 +#define SCLK_TIMER4 51 +#define SCLK_TIMER5 52 +#define SCLK_WIFI 53 +#define SCLK_CIF_OUT 54 +#define SCLK_I2C0 55 +#define SCLK_I2C1 56 +#define SCLK_I2C2 57 +#define SCLK_I2C3 58 +#define SCLK_CRYPTO 59 +#define SCLK_PWM 60 +#define SCLK_PDM 61 +#define SCLK_EFUSE 62 +#define SCLK_OTP 63 +#define SCLK_DDRCLK 64 +#define SCLK_VDEC_CABAC 65 +#define SCLK_VDEC_CORE 66 +#define SCLK_VENC_DSP 67 +#define SCLK_VENC_CORE 68 +#define SCLK_RGA 69 +#define SCLK_HDMI_SFC 70 +#define SCLK_HDMI_CEC 71 +#define SCLK_USB3_REF 72 +#define SCLK_USB3_SUSPEND 73 +#define SCLK_SDMMC_DRV 74 +#define SCLK_SDIO_DRV 75 +#define SCLK_EMMC_DRV 76 +#define SCLK_SDMMC_EXT_DRV 77 +#define SCLK_SDMMC_SAMPLE 78 +#define SCLK_SDIO_SAMPLE 79 +#define SCLK_EMMC_SAMPLE 80 +#define SCLK_SDMMC_EXT_SAMPLE 81 +#define SCLK_VOP 82 +#define SCLK_MAC2PHY_RXTX 83 +#define SCLK_MAC2PHY_SRC 84 +#define SCLK_MAC2PHY_REF 85 +#define SCLK_MAC2PHY_OUT 86 +#define SCLK_MAC2IO_RX 87 +#define SCLK_MAC2IO_TX 88 +#define SCLK_MAC2IO_REFOUT 89 +#define SCLK_MAC2IO_REF 90 +#define SCLK_MAC2IO_OUT 91 +#define SCLK_TSP 92 +#define SCLK_HSADC_TSP 93 +#define SCLK_USB3PHY_REF 94 +#define SCLK_REF_USB3OTG 95 +#define SCLK_USB3OTG_REF 96 +#define SCLK_USB3OTG_SUSPEND 97 +#define SCLK_REF_USB3OTG_SRC 98 +#define SCLK_MAC2IO_SRC 99 +#define SCLK_MAC2IO 100 +#define SCLK_MAC2PHY 101 + +/* dclk gates */ +#define DCLK_LCDC 120 +#define DCLK_HDMIPHY 121 +#define HDMIPHY 122 +#define USB480M 123 +#define DCLK_LCDC_SRC 124 + +/* aclk gates */ +#define ACLK_AXISRAM 130 +#define ACLK_VOP_PRE 131 +#define ACLK_USB3OTG 132 +#define ACLK_RGA_PRE 133 +#define ACLK_DMAC 134 +#define ACLK_GPU 135 +#define ACLK_BUS_PRE 136 +#define ACLK_PERI_PRE 137 +#define ACLK_RKVDEC_PRE 138 +#define ACLK_RKVDEC 139 +#define ACLK_RKVENC 140 +#define ACLK_VPU_PRE 141 +#define ACLK_VIO_PRE 142 +#define ACLK_VPU 143 +#define ACLK_VIO 144 +#define ACLK_VOP 145 +#define ACLK_GMAC 146 +#define ACLK_H265 147 +#define ACLK_H264 148 +#define ACLK_MAC2PHY 149 +#define ACLK_MAC2IO 150 +#define ACLK_DCF 151 +#define ACLK_TSP 152 +#define ACLK_PERI 153 +#define ACLK_RGA 154 +#define ACLK_IEP 155 +#define ACLK_CIF 156 +#define ACLK_HDCP 157 + +/* pclk gates */ +#define PCLK_GPIO0 200 +#define PCLK_GPIO1 201 +#define PCLK_GPIO2 202 +#define PCLK_GPIO3 203 +#define PCLK_GRF 204 +#define PCLK_I2C0 205 +#define PCLK_I2C1 206 +#define PCLK_I2C2 207 +#define PCLK_I2C3 208 +#define PCLK_SPI 209 +#define PCLK_UART0 210 +#define PCLK_UART1 211 +#define PCLK_UART2 212 +#define PCLK_TSADC 213 +#define PCLK_PWM 214 +#define PCLK_TIMER 215 +#define PCLK_BUS_PRE 216 +#define PCLK_PERI_PRE 217 +#define PCLK_HDMI_CTRL 218 +#define PCLK_HDMI_PHY 219 +#define PCLK_GMAC 220 +#define PCLK_H265 221 +#define PCLK_MAC2PHY 222 +#define PCLK_MAC2IO 223 +#define PCLK_USB3PHY_OTG 224 +#define PCLK_USB3PHY_PIPE 225 +#define PCLK_USB3_GRF 226 +#define PCLK_USB2_GRF 227 +#define PCLK_HDMIPHY 228 +#define PCLK_DDR 229 +#define PCLK_PERI 230 +#define PCLK_HDMI 231 +#define PCLK_HDCP 232 +#define PCLK_DCF 233 +#define PCLK_SARADC 234 + +/* hclk gates */ +#define HCLK_PERI 308 +#define HCLK_TSP 309 +#define HCLK_GMAC 310 +#define HCLK_I2S0_8CH 311 +#define HCLK_I2S1_8CH 313 +#define HCLK_I2S2_2CH 313 +#define HCLK_SPDIF_8CH 314 +#define HCLK_VOP 315 +#define HCLK_NANDC 316 +#define HCLK_SDMMC 317 +#define HCLK_SDIO 318 +#define HCLK_EMMC 319 +#define HCLK_SDMMC_EXT 320 +#define HCLK_RKVDEC_PRE 321 +#define HCLK_RKVDEC 322 +#define HCLK_RKVENC 323 +#define HCLK_VPU_PRE 324 +#define HCLK_VIO_PRE 325 +#define HCLK_VPU 326 +#define HCLK_VIO 327 +#define HCLK_BUS_PRE 328 +#define HCLK_PERI_PRE 329 +#define HCLK_H264 330 +#define HCLK_CIF 331 +#define HCLK_OTG_PMU 332 +#define HCLK_OTG 333 +#define HCLK_HOST0 334 +#define HCLK_HOST0_ARB 335 +#define HCLK_CRYPTO_MST 336 +#define HCLK_CRYPTO_SLV 337 +#define HCLK_PDM 338 +#define HCLK_IEP 339 +#define HCLK_RGA 340 +#define HCLK_HDCP 341 + +#define CLK_NR_CLKS (HCLK_HDCP + 1) + +/* soft-reset indices */ +#define SRST_CORE0_PO 0 +#define SRST_CORE1_PO 1 +#define SRST_CORE2_PO 2 +#define SRST_CORE3_PO 3 +#define SRST_CORE0 4 +#define SRST_CORE1 5 +#define SRST_CORE2 6 +#define SRST_CORE3 7 +#define SRST_CORE0_DBG 8 +#define SRST_CORE1_DBG 9 +#define SRST_CORE2_DBG 10 +#define SRST_CORE3_DBG 11 +#define SRST_TOPDBG 12 +#define SRST_CORE_NIU 13 +#define SRST_STRC_A 14 +#define SRST_L2C 15 + +#define SRST_A53_GIC 18 +#define SRST_DAP 19 +#define SRST_PMU_P 21 +#define SRST_EFUSE 22 +#define SRST_BUSSYS_H 23 +#define SRST_BUSSYS_P 24 +#define SRST_SPDIF 25 +#define SRST_INTMEM 26 +#define SRST_ROM 27 +#define SRST_GPIO0 28 +#define SRST_GPIO1 29 +#define SRST_GPIO2 30 +#define SRST_GPIO3 31 + +#define SRST_I2S0 32 +#define SRST_I2S1 33 +#define SRST_I2S2 34 +#define SRST_I2S0_H 35 +#define SRST_I2S1_H 36 +#define SRST_I2S2_H 37 +#define SRST_UART0 38 +#define SRST_UART1 39 +#define SRST_UART2 40 +#define SRST_UART0_P 41 +#define SRST_UART1_P 42 +#define SRST_UART2_P 43 +#define SRST_I2C0 44 +#define SRST_I2C1 45 +#define SRST_I2C2 46 +#define SRST_I2C3 47 + +#define SRST_I2C0_P 48 +#define SRST_I2C1_P 49 +#define SRST_I2C2_P 50 +#define SRST_I2C3_P 51 +#define SRST_EFUSE_SE_P 52 +#define SRST_EFUSE_NS_P 53 +#define SRST_PWM0 54 +#define SRST_PWM0_P 55 +#define SRST_DMA 56 +#define SRST_TSP_A 57 +#define SRST_TSP_H 58 +#define SRST_TSP 59 +#define SRST_TSP_HSADC 60 +#define SRST_DCF_A 61 +#define SRST_DCF_P 62 + +#define SRST_SCR 64 +#define SRST_SPI 65 +#define SRST_TSADC 66 +#define SRST_TSADC_P 67 +#define SRST_CRYPTO 68 +#define SRST_SGRF 69 +#define SRST_GRF 70 +#define SRST_USB_GRF 71 +#define SRST_TIMER_6CH_P 72 +#define SRST_TIMER0 73 +#define SRST_TIMER1 74 +#define SRST_TIMER2 75 +#define SRST_TIMER3 76 +#define SRST_TIMER4 77 +#define SRST_TIMER5 78 +#define SRST_USB3GRF 79 + +#define SRST_PHYNIU 80 +#define SRST_HDMIPHY 81 +#define SRST_VDAC 82 +#define SRST_ACODEC_p 83 +#define SRST_SARADC 85 +#define SRST_SARADC_P 86 +#define SRST_GRF_DDR 87 +#define SRST_DFIMON 88 +#define SRST_MSCH 89 +#define SRST_DDRMSCH 91 +#define SRST_DDRCTRL 92 +#define SRST_DDRCTRL_P 93 +#define SRST_DDRPHY 94 +#define SRST_DDRPHY_P 95 + +#define SRST_GMAC_NIU_A 96 +#define SRST_GMAC_NIU_P 97 +#define SRST_GMAC2PHY_A 98 +#define SRST_GMAC2IO_A 99 +#define SRST_MACPHY 100 +#define SRST_OTP_PHY 101 +#define SRST_GPU_A 102 +#define SRST_GPU_NIU_A 103 +#define SRST_SDMMCEXT 104 +#define SRST_PERIPH_NIU_A 105 +#define SRST_PERIHP_NIU_H 106 +#define SRST_PERIHP_P 107 +#define SRST_PERIPHSYS_H 108 +#define SRST_MMC0 109 +#define SRST_SDIO 110 +#define SRST_EMMC 111 + +#define SRST_USB2OTG_H 112 +#define SRST_USB2OTG 113 +#define SRST_USB2OTG_ADP 114 +#define SRST_USB2HOST_H 115 +#define SRST_USB2HOST_ARB 116 +#define SRST_USB2HOST_AUX 117 +#define SRST_USB2HOST_EHCIPHY 118 +#define SRST_USB2HOST_UTMI 119 +#define SRST_USB3OTG 120 +#define SRST_USBPOR 121 +#define SRST_USB2OTG_UTMI 122 +#define SRST_USB2HOST_PHY_UTMI 123 +#define SRST_USB3OTG_UTMI 124 +#define SRST_USB3PHY_U2 125 +#define SRST_USB3PHY_U3 126 +#define SRST_USB3PHY_PIPE 127 + +#define SRST_VIO_A 128 +#define SRST_VIO_BUS_H 129 +#define SRST_VIO_H2P_H 130 +#define SRST_VIO_ARBI_H 131 +#define SRST_VOP_NIU_A 132 +#define SRST_VOP_A 133 +#define SRST_VOP_H 134 +#define SRST_VOP_D 135 +#define SRST_RGA 136 +#define SRST_RGA_NIU_A 137 +#define SRST_RGA_A 138 +#define SRST_RGA_H 139 +#define SRST_IEP_A 140 +#define SRST_IEP_H 141 +#define SRST_HDMI 142 +#define SRST_HDMI_P 143 + +#define SRST_HDCP_A 144 +#define SRST_HDCP 145 +#define SRST_HDCP_H 146 +#define SRST_CIF_A 147 +#define SRST_CIF_H 148 +#define SRST_CIF_P 149 +#define SRST_OTP_P 150 +#define SRST_OTP_SBPI 151 +#define SRST_OTP_USER 152 +#define SRST_DDRCTRL_A 153 +#define SRST_DDRSTDY_P 154 +#define SRST_DDRSTDY 155 +#define SRST_PDM_H 156 +#define SRST_PDM 157 +#define SRST_USB3PHY_OTG_P 158 +#define SRST_USB3PHY_PIPE_P 159 + +#define SRST_VCODEC_A 160 +#define SRST_VCODEC_NIU_A 161 +#define SRST_VCODEC_H 162 +#define SRST_VCODEC_NIU_H 163 +#define SRST_VDEC_A 164 +#define SRST_VDEC_NIU_A 165 +#define SRST_VDEC_H 166 +#define SRST_VDEC_NIU_H 167 +#define SRST_VDEC_CORE 168 +#define SRST_VDEC_CABAC 169 +#define SRST_DDRPHYDIV 175 + +#define SRST_RKVENC_NIU_A 176 +#define SRST_RKVENC_NIU_H 177 +#define SRST_RKVENC_H265_A 178 +#define SRST_RKVENC_H265_P 179 +#define SRST_RKVENC_H265_CORE 180 +#define SRST_RKVENC_H265_DSP 181 +#define SRST_RKVENC_H264_A 182 +#define SRST_RKVENC_H264_H 183 +#define SRST_RKVENC_INTMEM 184 + +#endif diff --git a/include/dt-bindings/clock/ste-ab8500.h b/include/dt-bindings/clock/ste-ab8500.h new file mode 100644 index 000000000000..6731f1f00a84 --- /dev/null +++ b/include/dt-bindings/clock/ste-ab8500.h @@ -0,0 +1,11 @@ +#ifndef __STE_CLK_AB8500_H__ +#define __STE_CLK_AB8500_H__ + +#define AB8500_SYSCLK_BUF2 0 +#define AB8500_SYSCLK_BUF3 1 +#define AB8500_SYSCLK_BUF4 2 +#define AB8500_SYSCLK_ULP 3 +#define AB8500_SYSCLK_INT 4 +#define AB8500_SYSCLK_AUDIO 5 + +#endif diff --git a/include/dt-bindings/clock/stm32fx-clock.h b/include/dt-bindings/clock/stm32fx-clock.h new file mode 100644 index 000000000000..49bb3c203e5c --- /dev/null +++ b/include/dt-bindings/clock/stm32fx-clock.h @@ -0,0 +1,59 @@ +/* + * stm32fx-clock.h + * + * Copyright (C) 2016 STMicroelectronics + * Author: Gabriel Fernandez for STMicroelectronics. + * License terms: GNU General Public License (GPL), version 2 + */ + +/* + * List of clocks wich are not derived from system clock (SYSCLOCK) + * + * The index of these clocks is the secondary index of DT bindings + * (see Documentatoin/devicetree/bindings/clock/st,stm32-rcc.txt) + * + * e.g: + <assigned-clocks = <&rcc 1 CLK_LSE>; +*/ + +#ifndef _DT_BINDINGS_CLK_STMFX_H +#define _DT_BINDINGS_CLK_STMFX_H + +#define SYSTICK 0 +#define FCLK 1 +#define CLK_LSI 2 +#define CLK_LSE 3 +#define CLK_HSE_RTC 4 +#define CLK_RTC 5 +#define PLL_VCO_I2S 6 +#define PLL_VCO_SAI 7 +#define CLK_LCD 8 +#define CLK_I2S 9 +#define CLK_SAI1 10 +#define CLK_SAI2 11 +#define CLK_I2SQ_PDIV 12 +#define CLK_SAIQ_PDIV 13 + +#define END_PRIMARY_CLK 14 + +#define CLK_HSI 14 +#define CLK_SYSCLK 15 +#define CLK_HDMI_CEC 16 +#define CLK_SPDIF 17 +#define CLK_USART1 18 +#define CLK_USART2 19 +#define CLK_USART3 20 +#define CLK_UART4 21 +#define CLK_UART5 22 +#define CLK_USART6 23 +#define CLK_UART7 24 +#define CLK_UART8 25 +#define CLK_I2C1 26 +#define CLK_I2C2 27 +#define CLK_I2C3 28 +#define CLK_I2C4 29 +#define CLK_LPTIMER 30 + +#define END_PRIMARY_CLK_F7 31 + +#endif diff --git a/include/dt-bindings/clock/sun5i-ccu.h b/include/dt-bindings/clock/sun5i-ccu.h new file mode 100644 index 000000000000..aeb2e2f781fb --- /dev/null +++ b/include/dt-bindings/clock/sun5i-ccu.h @@ -0,0 +1,103 @@ +/* + * Copyright 2016 Maxime Ripard + * + * Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_SUN5I_H_ +#define _DT_BINDINGS_CLK_SUN5I_H_ + +#define CLK_HOSC 1 + +#define CLK_CPU 17 + +#define CLK_AHB_OTG 23 +#define CLK_AHB_EHCI 24 +#define CLK_AHB_OHCI 25 +#define CLK_AHB_SS 26 +#define CLK_AHB_DMA 27 +#define CLK_AHB_BIST 28 +#define CLK_AHB_MMC0 29 +#define CLK_AHB_MMC1 30 +#define CLK_AHB_MMC2 31 +#define CLK_AHB_NAND 32 +#define CLK_AHB_SDRAM 33 +#define CLK_AHB_EMAC 34 +#define CLK_AHB_TS 35 +#define CLK_AHB_SPI0 36 +#define CLK_AHB_SPI1 37 +#define CLK_AHB_SPI2 38 +#define CLK_AHB_GPS 39 +#define CLK_AHB_HSTIMER 40 +#define CLK_AHB_VE 41 +#define CLK_AHB_TVE 42 +#define CLK_AHB_LCD 43 +#define CLK_AHB_CSI 44 +#define CLK_AHB_HDMI 45 +#define CLK_AHB_DE_BE 46 +#define CLK_AHB_DE_FE 47 +#define CLK_AHB_IEP 48 +#define CLK_AHB_GPU 49 +#define CLK_APB0_CODEC 50 +#define CLK_APB0_SPDIF 51 +#define CLK_APB0_I2S 52 +#define CLK_APB0_PIO 53 +#define CLK_APB0_IR 54 +#define CLK_APB0_KEYPAD 55 +#define CLK_APB1_I2C0 56 +#define CLK_APB1_I2C1 57 +#define CLK_APB1_I2C2 58 +#define CLK_APB1_UART0 59 +#define CLK_APB1_UART1 60 +#define CLK_APB1_UART2 61 +#define CLK_APB1_UART3 62 +#define CLK_NAND 63 +#define CLK_MMC0 64 +#define CLK_MMC1 65 +#define CLK_MMC2 66 +#define CLK_TS 67 +#define CLK_SS 68 +#define CLK_SPI0 69 +#define CLK_SPI1 70 +#define CLK_SPI2 71 +#define CLK_IR 72 +#define CLK_I2S 73 +#define CLK_SPDIF 74 +#define CLK_KEYPAD 75 +#define CLK_USB_OHCI 76 +#define CLK_USB_PHY0 77 +#define CLK_USB_PHY1 78 +#define CLK_GPS 79 +#define CLK_DRAM_VE 80 +#define CLK_DRAM_CSI 81 +#define CLK_DRAM_TS 82 +#define CLK_DRAM_TVE 83 +#define CLK_DRAM_DE_FE 84 +#define CLK_DRAM_DE_BE 85 +#define CLK_DRAM_ACE 86 +#define CLK_DRAM_IEP 87 +#define CLK_DE_BE 88 +#define CLK_DE_FE 89 +#define CLK_TCON_CH0 90 + +#define CLK_TCON_CH1 92 +#define CLK_CSI 93 +#define CLK_VE 94 +#define CLK_CODEC 95 +#define CLK_AVS 96 +#define CLK_HDMI 97 +#define CLK_GPU 98 + +#define CLK_IEP 100 + +#endif /* _DT_BINDINGS_CLK_SUN5I_H_ */ diff --git a/include/dt-bindings/clock/sun8i-v3s-ccu.h b/include/dt-bindings/clock/sun8i-v3s-ccu.h new file mode 100644 index 000000000000..c0d5d5599c87 --- /dev/null +++ b/include/dt-bindings/clock/sun8i-v3s-ccu.h @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2016 Icenowy Zheng <icenowy@aosc.xyz> + * + * Based on sun8i-h3-ccu.h, which is: + * Copyright (C) 2016 Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_CLK_SUN8I_V3S_H_ +#define _DT_BINDINGS_CLK_SUN8I_V3S_H_ + +#define CLK_CPU 14 + +#define CLK_BUS_CE 20 +#define CLK_BUS_DMA 21 +#define CLK_BUS_MMC0 22 +#define CLK_BUS_MMC1 23 +#define CLK_BUS_MMC2 24 +#define CLK_BUS_DRAM 25 +#define CLK_BUS_EMAC 26 +#define CLK_BUS_HSTIMER 27 +#define CLK_BUS_SPI0 28 +#define CLK_BUS_OTG 29 +#define CLK_BUS_EHCI0 30 +#define CLK_BUS_OHCI0 31 +#define CLK_BUS_VE 32 +#define CLK_BUS_TCON0 33 +#define CLK_BUS_CSI 34 +#define CLK_BUS_DE 35 +#define CLK_BUS_CODEC 36 +#define CLK_BUS_PIO 37 +#define CLK_BUS_I2C0 38 +#define CLK_BUS_I2C1 39 +#define CLK_BUS_UART0 40 +#define CLK_BUS_UART1 41 +#define CLK_BUS_UART2 42 +#define CLK_BUS_EPHY 43 +#define CLK_BUS_DBG 44 + +#define CLK_MMC0 45 +#define CLK_MMC0_SAMPLE 46 +#define CLK_MMC0_OUTPUT 47 +#define CLK_MMC1 48 +#define CLK_MMC1_SAMPLE 49 +#define CLK_MMC1_OUTPUT 50 +#define CLK_MMC2 51 +#define CLK_MMC2_SAMPLE 52 +#define CLK_MMC2_OUTPUT 53 +#define CLK_CE 54 +#define CLK_SPI0 55 +#define CLK_USB_PHY0 56 +#define CLK_USB_OHCI0 57 + +#define CLK_DRAM_VE 59 +#define CLK_DRAM_CSI 60 +#define CLK_DRAM_EHCI 61 +#define CLK_DRAM_OHCI 62 +#define CLK_DE 63 +#define CLK_TCON0 64 +#define CLK_CSI_MISC 65 +#define CLK_CSI0_MCLK 66 +#define CLK_CSI1_SCLK 67 +#define CLK_CSI1_MCLK 68 +#define CLK_VE 69 +#define CLK_AC_DIG 70 +#define CLK_AVS 71 + +#define CLK_MIPI_CSI 73 + +#endif /* _DT_BINDINGS_CLK_SUN8I_V3S_H_ */ diff --git a/include/dt-bindings/clock/sun9i-a80-ccu.h b/include/dt-bindings/clock/sun9i-a80-ccu.h new file mode 100644 index 000000000000..6ea1492a73a6 --- /dev/null +++ b/include/dt-bindings/clock/sun9i-a80-ccu.h @@ -0,0 +1,162 @@ +/* + * Copyright (C) 2016 Chen-Yu Tsai <wens@csie.org> + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_CLOCK_SUN9I_A80_CCU_H_ +#define _DT_BINDINGS_CLOCK_SUN9I_A80_CCU_H_ + +#define CLK_PLL_AUDIO 2 +#define CLK_PLL_PERIPH0 3 + +#define CLK_C0CPUX 12 +#define CLK_C1CPUX 13 + +#define CLK_OUT_A 27 +#define CLK_OUT_B 28 + +#define CLK_NAND0_0 29 +#define CLK_NAND0_1 30 +#define CLK_NAND1_0 31 +#define CLK_NAND1_1 32 +#define CLK_MMC0 33 +#define CLK_MMC0_SAMPLE 34 +#define CLK_MMC0_OUTPUT 35 +#define CLK_MMC1 36 +#define CLK_MMC1_SAMPLE 37 +#define CLK_MMC1_OUTPUT 38 +#define CLK_MMC2 39 +#define CLK_MMC2_SAMPLE 40 +#define CLK_MMC2_OUTPUT 41 +#define CLK_MMC3 42 +#define CLK_MMC3_SAMPLE 43 +#define CLK_MMC3_OUTPUT 44 +#define CLK_TS 45 +#define CLK_SS 46 +#define CLK_SPI0 47 +#define CLK_SPI1 48 +#define CLK_SPI2 49 +#define CLK_SPI3 50 +#define CLK_I2S0 51 +#define CLK_I2S1 52 +#define CLK_SPDIF 53 +#define CLK_SDRAM 54 +#define CLK_DE 55 +#define CLK_EDP 56 +#define CLK_MP 57 +#define CLK_LCD0 58 +#define CLK_LCD1 59 +#define CLK_MIPI_DSI0 60 +#define CLK_MIPI_DSI1 61 +#define CLK_HDMI 62 +#define CLK_HDMI_SLOW 63 +#define CLK_MIPI_CSI 64 +#define CLK_CSI_ISP 65 +#define CLK_CSI_MISC 66 +#define CLK_CSI0_MCLK 67 +#define CLK_CSI1_MCLK 68 +#define CLK_FD 69 +#define CLK_VE 70 +#define CLK_AVS 71 +#define CLK_GPU_CORE 72 +#define CLK_GPU_MEMORY 73 +#define CLK_GPU_AXI 74 +#define CLK_SATA 75 +#define CLK_AC97 76 +#define CLK_MIPI_HSI 77 +#define CLK_GPADC 78 +#define CLK_CIR_TX 79 + +#define CLK_BUS_FD 80 +#define CLK_BUS_VE 81 +#define CLK_BUS_GPU_CTRL 82 +#define CLK_BUS_SS 83 +#define CLK_BUS_MMC 84 +#define CLK_BUS_NAND0 85 +#define CLK_BUS_NAND1 86 +#define CLK_BUS_SDRAM 87 +#define CLK_BUS_MIPI_HSI 88 +#define CLK_BUS_SATA 89 +#define CLK_BUS_TS 90 +#define CLK_BUS_SPI0 91 +#define CLK_BUS_SPI1 92 +#define CLK_BUS_SPI2 93 +#define CLK_BUS_SPI3 94 + +#define CLK_BUS_OTG 95 +#define CLK_BUS_USB 96 +#define CLK_BUS_GMAC 97 +#define CLK_BUS_MSGBOX 98 +#define CLK_BUS_SPINLOCK 99 +#define CLK_BUS_HSTIMER 100 +#define CLK_BUS_DMA 101 + +#define CLK_BUS_LCD0 102 +#define CLK_BUS_LCD1 103 +#define CLK_BUS_EDP 104 +#define CLK_BUS_CSI 105 +#define CLK_BUS_HDMI 106 +#define CLK_BUS_DE 107 +#define CLK_BUS_MP 108 +#define CLK_BUS_MIPI_DSI 109 + +#define CLK_BUS_SPDIF 110 +#define CLK_BUS_PIO 111 +#define CLK_BUS_AC97 112 +#define CLK_BUS_I2S0 113 +#define CLK_BUS_I2S1 114 +#define CLK_BUS_LRADC 115 +#define CLK_BUS_GPADC 116 +#define CLK_BUS_TWD 117 +#define CLK_BUS_CIR_TX 118 + +#define CLK_BUS_I2C0 119 +#define CLK_BUS_I2C1 120 +#define CLK_BUS_I2C2 121 +#define CLK_BUS_I2C3 122 +#define CLK_BUS_I2C4 123 +#define CLK_BUS_UART0 124 +#define CLK_BUS_UART1 125 +#define CLK_BUS_UART2 126 +#define CLK_BUS_UART3 127 +#define CLK_BUS_UART4 128 +#define CLK_BUS_UART5 129 + +#endif /* _DT_BINDINGS_CLOCK_SUN9I_A80_CCU_H_ */ diff --git a/include/dt-bindings/clock/sun9i-a80-de.h b/include/dt-bindings/clock/sun9i-a80-de.h new file mode 100644 index 000000000000..3dad6c3cd131 --- /dev/null +++ b/include/dt-bindings/clock/sun9i-a80-de.h @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2016 Chen-Yu Tsai <wens@csie.org> + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_CLOCK_SUN9I_A80_DE_H_ +#define _DT_BINDINGS_CLOCK_SUN9I_A80_DE_H_ + +#define CLK_FE0 0 +#define CLK_FE1 1 +#define CLK_FE2 2 +#define CLK_IEP_DEU0 3 +#define CLK_IEP_DEU1 4 +#define CLK_BE0 5 +#define CLK_BE1 6 +#define CLK_BE2 7 +#define CLK_IEP_DRC0 8 +#define CLK_IEP_DRC1 9 +#define CLK_MERGE 10 + +#define CLK_DRAM_FE0 11 +#define CLK_DRAM_FE1 12 +#define CLK_DRAM_FE2 13 +#define CLK_DRAM_DEU0 14 +#define CLK_DRAM_DEU1 15 +#define CLK_DRAM_BE0 16 +#define CLK_DRAM_BE1 17 +#define CLK_DRAM_BE2 18 +#define CLK_DRAM_DRC0 19 +#define CLK_DRAM_DRC1 20 + +#define CLK_BUS_FE0 21 +#define CLK_BUS_FE1 22 +#define CLK_BUS_FE2 23 +#define CLK_BUS_DEU0 24 +#define CLK_BUS_DEU1 25 +#define CLK_BUS_BE0 26 +#define CLK_BUS_BE1 27 +#define CLK_BUS_BE2 28 +#define CLK_BUS_DRC0 29 +#define CLK_BUS_DRC1 30 + +#endif /* _DT_BINDINGS_CLOCK_SUN9I_A80_DE_H_ */ diff --git a/include/dt-bindings/clock/sun9i-a80-usb.h b/include/dt-bindings/clock/sun9i-a80-usb.h new file mode 100644 index 000000000000..783a60d2ccea --- /dev/null +++ b/include/dt-bindings/clock/sun9i-a80-usb.h @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2016 Chen-Yu Tsai <wens@csie.org> + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_CLOCK_SUN9I_A80_USB_H_ +#define _DT_BINDINGS_CLOCK_SUN9I_A80_USB_H_ + +#define CLK_BUS_HCI0 0 +#define CLK_USB_OHCI0 1 +#define CLK_BUS_HCI1 2 +#define CLK_BUS_HCI2 3 +#define CLK_USB_OHCI2 4 + +#define CLK_USB0_PHY 5 +#define CLK_USB1_HSIC 6 +#define CLK_USB1_PHY 7 +#define CLK_USB2_HSIC 8 +#define CLK_USB2_PHY 9 +#define CLK_USB_HSIC 10 + +#endif /* _DT_BINDINGS_CLOCK_SUN9I_A80_USB_H_ */ diff --git a/include/dt-bindings/mfd/stm32f4-rcc.h b/include/dt-bindings/mfd/stm32f4-rcc.h index e98942dc0d44..082a81c94298 100644 --- a/include/dt-bindings/mfd/stm32f4-rcc.h +++ b/include/dt-bindings/mfd/stm32f4-rcc.h @@ -18,14 +18,20 @@ #define STM32F4_RCC_AHB1_GPIOJ 9 #define STM32F4_RCC_AHB1_GPIOK 10 #define STM32F4_RCC_AHB1_CRC 12 +#define STM32F4_RCC_AHB1_BKPSRAM 18 +#define STM32F4_RCC_AHB1_CCMDATARAM 20 #define STM32F4_RCC_AHB1_DMA1 21 #define STM32F4_RCC_AHB1_DMA2 22 #define STM32F4_RCC_AHB1_DMA2D 23 #define STM32F4_RCC_AHB1_ETHMAC 25 -#define STM32F4_RCC_AHB1_OTGHS 29 +#define STM32F4_RCC_AHB1_ETHMACTX 26 +#define STM32F4_RCC_AHB1_ETHMACRX 27 +#define STM32F4_RCC_AHB1_ETHMACPTP 28 +#define STM32F4_RCC_AHB1_OTGHS 29 +#define STM32F4_RCC_AHB1_OTGHSULPI 30 #define STM32F4_AHB1_RESET(bit) (STM32F4_RCC_AHB1_##bit + (0x10 * 8)) -#define STM32F4_AHB1_CLOCK(bit) (STM32F4_RCC_AHB1_##bit + (0x30 * 8)) +#define STM32F4_AHB1_CLOCK(bit) (STM32F4_RCC_AHB1_##bit) /* AHB2 */ @@ -36,13 +42,14 @@ #define STM32F4_RCC_AHB2_OTGFS 7 #define STM32F4_AHB2_RESET(bit) (STM32F4_RCC_AHB2_##bit + (0x14 * 8)) -#define STM32F4_AHB2_CLOCK(bit) (STM32F4_RCC_AHB2_##bit + (0x34 * 8)) +#define STM32F4_AHB2_CLOCK(bit) (STM32F4_RCC_AHB2_##bit + 0x20) /* AHB3 */ #define STM32F4_RCC_AHB3_FMC 0 +#define STM32F4_RCC_AHB3_QSPI 1 #define STM32F4_AHB3_RESET(bit) (STM32F4_RCC_AHB3_##bit + (0x18 * 8)) -#define STM32F4_AHB3_CLOCK(bit) (STM32F4_RCC_AHB3_##bit + (0x38 * 8)) +#define STM32F4_AHB3_CLOCK(bit) (STM32F4_RCC_AHB3_##bit + 0x40) /* APB1 */ #define STM32F4_RCC_APB1_TIM2 0 @@ -72,14 +79,16 @@ #define STM32F4_RCC_APB1_UART8 31 #define STM32F4_APB1_RESET(bit) (STM32F4_RCC_APB1_##bit + (0x20 * 8)) -#define STM32F4_APB1_CLOCK(bit) (STM32F4_RCC_APB1_##bit + (0x40 * 8)) +#define STM32F4_APB1_CLOCK(bit) (STM32F4_RCC_APB1_##bit + 0x80) /* APB2 */ #define STM32F4_RCC_APB2_TIM1 0 #define STM32F4_RCC_APB2_TIM8 1 #define STM32F4_RCC_APB2_USART1 4 #define STM32F4_RCC_APB2_USART6 5 -#define STM32F4_RCC_APB2_ADC 8 +#define STM32F4_RCC_APB2_ADC1 8 +#define STM32F4_RCC_APB2_ADC2 9 +#define STM32F4_RCC_APB2_ADC3 10 #define STM32F4_RCC_APB2_SDIO 11 #define STM32F4_RCC_APB2_SPI1 12 #define STM32F4_RCC_APB2_SPI4 13 @@ -91,8 +100,9 @@ #define STM32F4_RCC_APB2_SPI6 21 #define STM32F4_RCC_APB2_SAI1 22 #define STM32F4_RCC_APB2_LTDC 26 +#define STM32F4_RCC_APB2_DSI 27 #define STM32F4_APB2_RESET(bit) (STM32F4_RCC_APB2_##bit + (0x24 * 8)) -#define STM32F4_APB2_CLOCK(bit) (STM32F4_RCC_APB2_##bit + (0x44 * 8)) +#define STM32F4_APB2_CLOCK(bit) (STM32F4_RCC_APB2_##bit + 0xA0) #endif /* _DT_BINDINGS_MFD_STM32F4_RCC_H */ diff --git a/include/dt-bindings/pinctrl/omap.h b/include/dt-bindings/pinctrl/omap.h index effadd05695b..fbd6f7202476 100644 --- a/include/dt-bindings/pinctrl/omap.h +++ b/include/dt-bindings/pinctrl/omap.h @@ -45,8 +45,8 @@ #define PIN_OFF_NONE 0 #define PIN_OFF_OUTPUT_HIGH (OFF_EN | OFFOUT_EN | OFFOUT_VAL) #define PIN_OFF_OUTPUT_LOW (OFF_EN | OFFOUT_EN) -#define PIN_OFF_INPUT_PULLUP (OFF_EN | OFF_PULL_EN | OFF_PULL_UP) -#define PIN_OFF_INPUT_PULLDOWN (OFF_EN | OFF_PULL_EN) +#define PIN_OFF_INPUT_PULLUP (OFF_EN | OFFOUT_EN | OFF_PULL_EN | OFF_PULL_UP) +#define PIN_OFF_INPUT_PULLDOWN (OFF_EN | OFFOUT_EN | OFF_PULL_EN) #define PIN_OFF_WAKEUPENABLE WAKEUP_EN /* diff --git a/include/dt-bindings/pinctrl/samsung.h b/include/dt-bindings/pinctrl/samsung.h index 6276eb785e2b..b7aa3646208b 100644 --- a/include/dt-bindings/pinctrl/samsung.h +++ b/include/dt-bindings/pinctrl/samsung.h @@ -45,6 +45,20 @@ #define EXYNOS5420_PIN_DRV_LV3 2 #define EXYNOS5420_PIN_DRV_LV4 3 +/* Drive strengths for Exynos5433 */ +#define EXYNOS5433_PIN_DRV_FAST_SR1 0 +#define EXYNOS5433_PIN_DRV_FAST_SR2 1 +#define EXYNOS5433_PIN_DRV_FAST_SR3 2 +#define EXYNOS5433_PIN_DRV_FAST_SR4 3 +#define EXYNOS5433_PIN_DRV_FAST_SR5 4 +#define EXYNOS5433_PIN_DRV_FAST_SR6 5 +#define EXYNOS5433_PIN_DRV_SLOW_SR1 8 +#define EXYNOS5433_PIN_DRV_SLOW_SR2 9 +#define EXYNOS5433_PIN_DRV_SLOW_SR3 0xa +#define EXYNOS5433_PIN_DRV_SLOW_SR4 0xb +#define EXYNOS5433_PIN_DRV_SLOW_SR5 0xc +#define EXYNOS5433_PIN_DRV_SLOW_SR6 0xf + #define EXYNOS_PIN_FUNC_INPUT 0 #define EXYNOS_PIN_FUNC_OUTPUT 1 #define EXYNOS_PIN_FUNC_2 2 @@ -54,4 +68,12 @@ #define EXYNOS_PIN_FUNC_6 6 #define EXYNOS_PIN_FUNC_F 0xf +/* Drive strengths for Exynos7 FSYS1 block */ +#define EXYNOS7_FSYS1_PIN_DRV_LV1 0 +#define EXYNOS7_FSYS1_PIN_DRV_LV2 4 +#define EXYNOS7_FSYS1_PIN_DRV_LV3 2 +#define EXYNOS7_FSYS1_PIN_DRV_LV4 6 +#define EXYNOS7_FSYS1_PIN_DRV_LV5 1 +#define EXYNOS7_FSYS1_PIN_DRV_LV6 5 + #endif /* __DT_BINDINGS_PINCTRL_SAMSUNG_H__ */ diff --git a/include/dt-bindings/power/rk3328-power.h b/include/dt-bindings/power/rk3328-power.h new file mode 100644 index 000000000000..10c3c3715334 --- /dev/null +++ b/include/dt-bindings/power/rk3328-power.h @@ -0,0 +1,18 @@ +#ifndef __DT_BINDINGS_POWER_RK3328_POWER_H__ +#define __DT_BINDINGS_POWER_RK3328_POWER_H__ + +/** + * RK3328 idle id Summary. + */ +#define RK3328_PD_CORE 0 +#define RK3328_PD_GPU 1 +#define RK3328_PD_BUS 2 +#define RK3328_PD_MSCH 3 +#define RK3328_PD_PERI 4 +#define RK3328_PD_VIDEO 5 +#define RK3328_PD_HEVC 6 +#define RK3328_PD_SYS 7 +#define RK3328_PD_VPU 8 +#define RK3328_PD_VIO 9 + +#endif diff --git a/include/dt-bindings/reset/sun5i-ccu.h b/include/dt-bindings/reset/sun5i-ccu.h new file mode 100644 index 000000000000..c2b9726b5026 --- /dev/null +++ b/include/dt-bindings/reset/sun5i-ccu.h @@ -0,0 +1,32 @@ +/* + * Copyright 2016 Maxime Ripard + * + * Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _RST_SUN5I_H_ +#define _RST_SUN5I_H_ + +#define RST_USB_PHY0 0 +#define RST_USB_PHY1 1 +#define RST_GPS 2 +#define RST_DE_BE 3 +#define RST_DE_FE 4 +#define RST_TVE 5 +#define RST_LCD 6 +#define RST_CSI 7 +#define RST_VE 8 +#define RST_GPU 9 +#define RST_IEP 10 + +#endif /* _RST_SUN5I_H_ */ diff --git a/include/dt-bindings/reset/sun8i-v3s-ccu.h b/include/dt-bindings/reset/sun8i-v3s-ccu.h new file mode 100644 index 000000000000..b58ef21a2e18 --- /dev/null +++ b/include/dt-bindings/reset/sun8i-v3s-ccu.h @@ -0,0 +1,78 @@ +/* + * Copyright (C) 2016 Icenowy Zheng <icenowy@aosc.xyz> + * + * Based on sun8i-v3s-ccu.h, which is + * Copyright (C) 2016 Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_RST_SUN8I_V3S_H_ +#define _DT_BINDINGS_RST_SUN8I_V3S_H_ + +#define RST_USB_PHY0 0 + +#define RST_MBUS 1 + +#define RST_BUS_CE 5 +#define RST_BUS_DMA 6 +#define RST_BUS_MMC0 7 +#define RST_BUS_MMC1 8 +#define RST_BUS_MMC2 9 +#define RST_BUS_DRAM 11 +#define RST_BUS_EMAC 12 +#define RST_BUS_HSTIMER 14 +#define RST_BUS_SPI0 15 +#define RST_BUS_OTG 17 +#define RST_BUS_EHCI0 18 +#define RST_BUS_OHCI0 22 +#define RST_BUS_VE 26 +#define RST_BUS_TCON0 27 +#define RST_BUS_CSI 30 +#define RST_BUS_DE 34 +#define RST_BUS_DBG 38 +#define RST_BUS_EPHY 39 +#define RST_BUS_CODEC 40 +#define RST_BUS_I2C0 46 +#define RST_BUS_I2C1 47 +#define RST_BUS_UART0 49 +#define RST_BUS_UART1 50 +#define RST_BUS_UART2 51 + +#endif /* _DT_BINDINGS_RST_SUN8I_H3_H_ */ diff --git a/include/dt-bindings/reset/sun9i-a80-ccu.h b/include/dt-bindings/reset/sun9i-a80-ccu.h new file mode 100644 index 000000000000..4b8df4b36788 --- /dev/null +++ b/include/dt-bindings/reset/sun9i-a80-ccu.h @@ -0,0 +1,102 @@ +/* + * Copyright (C) 2016 Chen-Yu Tsai <wens@csie.org> + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_RESET_SUN9I_A80_CCU_H_ +#define _DT_BINDINGS_RESET_SUN9I_A80_CCU_H_ + +#define RST_BUS_FD 0 +#define RST_BUS_VE 1 +#define RST_BUS_GPU_CTRL 2 +#define RST_BUS_SS 3 +#define RST_BUS_MMC 4 +#define RST_BUS_NAND0 5 +#define RST_BUS_NAND1 6 +#define RST_BUS_SDRAM 7 +#define RST_BUS_SATA 8 +#define RST_BUS_TS 9 +#define RST_BUS_SPI0 10 +#define RST_BUS_SPI1 11 +#define RST_BUS_SPI2 12 +#define RST_BUS_SPI3 13 + +#define RST_BUS_OTG 14 +#define RST_BUS_OTG_PHY 15 +#define RST_BUS_MIPI_HSI 16 +#define RST_BUS_GMAC 17 +#define RST_BUS_MSGBOX 18 +#define RST_BUS_SPINLOCK 19 +#define RST_BUS_HSTIMER 20 +#define RST_BUS_DMA 21 + +#define RST_BUS_LCD0 22 +#define RST_BUS_LCD1 23 +#define RST_BUS_EDP 24 +#define RST_BUS_LVDS 25 +#define RST_BUS_CSI 26 +#define RST_BUS_HDMI0 27 +#define RST_BUS_HDMI1 28 +#define RST_BUS_DE 29 +#define RST_BUS_MP 30 +#define RST_BUS_GPU 31 +#define RST_BUS_MIPI_DSI 32 + +#define RST_BUS_SPDIF 33 +#define RST_BUS_AC97 34 +#define RST_BUS_I2S0 35 +#define RST_BUS_I2S1 36 +#define RST_BUS_LRADC 37 +#define RST_BUS_GPADC 38 +#define RST_BUS_CIR_TX 39 + +#define RST_BUS_I2C0 40 +#define RST_BUS_I2C1 41 +#define RST_BUS_I2C2 42 +#define RST_BUS_I2C3 43 +#define RST_BUS_I2C4 44 +#define RST_BUS_UART0 45 +#define RST_BUS_UART1 46 +#define RST_BUS_UART2 47 +#define RST_BUS_UART3 48 +#define RST_BUS_UART4 49 +#define RST_BUS_UART5 50 + +#endif /* _DT_BINDINGS_RESET_SUN9I_A80_CCU_H_ */ diff --git a/include/dt-bindings/reset/sun9i-a80-de.h b/include/dt-bindings/reset/sun9i-a80-de.h new file mode 100644 index 000000000000..205072770171 --- /dev/null +++ b/include/dt-bindings/reset/sun9i-a80-de.h @@ -0,0 +1,58 @@ +/* + * Copyright (C) 2016 Chen-Yu Tsai <wens@csie.org> + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_RESET_SUN9I_A80_DE_H_ +#define _DT_BINDINGS_RESET_SUN9I_A80_DE_H_ + +#define RST_FE0 0 +#define RST_FE1 1 +#define RST_FE2 2 +#define RST_DEU0 3 +#define RST_DEU1 4 +#define RST_BE0 5 +#define RST_BE1 6 +#define RST_BE2 7 +#define RST_DRC0 8 +#define RST_DRC1 9 +#define RST_MERGE 10 + +#endif /* _DT_BINDINGS_RESET_SUN9I_A80_DE_H_ */ diff --git a/include/dt-bindings/reset/sun9i-a80-usb.h b/include/dt-bindings/reset/sun9i-a80-usb.h new file mode 100644 index 000000000000..ee492864c2aa --- /dev/null +++ b/include/dt-bindings/reset/sun9i-a80-usb.h @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2016 Chen-Yu Tsai <wens@csie.org> + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_RESET_SUN9I_A80_USB_H_ +#define _DT_BINDINGS_RESET_SUN9I_A80_USB_H_ + +#define RST_USB0_HCI 0 +#define RST_USB1_HCI 1 +#define RST_USB2_HCI 2 + +#define RST_USB0_PHY 3 +#define RST_USB1_HSIC 4 +#define RST_USB1_PHY 5 +#define RST_USB2_HSIC 6 +#define RST_USB2_PHY 7 + +#endif /* _DT_BINDINGS_RESET_SUN9I_A80_USB_H_ */ diff --git a/include/dt-bindings/soc/zte,pm_domains.h b/include/dt-bindings/soc/zte,pm_domains.h new file mode 100644 index 000000000000..a0b4019c8e01 --- /dev/null +++ b/include/dt-bindings/soc/zte,pm_domains.h @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2017 Linaro Ltd. + * + * Author: Baoyou Xie <baoyou.xie@linaro.org> + * License terms: GNU General Public License (GPL) version 2 + */ + +#ifndef _DT_BINDINGS_SOC_ZTE_PM_DOMAINS_H +#define _DT_BINDINGS_SOC_ZTE_PM_DOMAINS_H + +#define DM_ZX296718_SAPPU 0 +#define DM_ZX296718_VDE 1 /* g1v6 */ +#define DM_ZX296718_VCE 2 /* h1v6 */ +#define DM_ZX296718_HDE 3 /* g2v2 */ +#define DM_ZX296718_VIU 4 +#define DM_ZX296718_USB20 5 +#define DM_ZX296718_USB21 6 +#define DM_ZX296718_USB30 7 +#define DM_ZX296718_HSIC 8 +#define DM_ZX296718_GMAC 9 +#define DM_ZX296718_TS 10 +#define DM_ZX296718_VOU 11 + +#endif /* _DT_BINDINGS_SOC_ZTE_PM_DOMAINS_H */ diff --git a/include/keys/user-type.h b/include/keys/user-type.h index c56fef40f53e..e098cbe27db5 100644 --- a/include/keys/user-type.h +++ b/include/keys/user-type.h @@ -48,9 +48,14 @@ extern void user_describe(const struct key *user, struct seq_file *m); extern long user_read(const struct key *key, char __user *buffer, size_t buflen); -static inline const struct user_key_payload *user_key_payload(const struct key *key) +static inline const struct user_key_payload *user_key_payload_rcu(const struct key *key) { - return (struct user_key_payload *)rcu_dereference_key(key); + return (struct user_key_payload *)dereference_key_rcu(key); +} + +static inline struct user_key_payload *user_key_payload_locked(const struct key *key) +{ + return (struct user_key_payload *)dereference_key_locked((struct key *)key); } #endif /* CONFIG_KEYS */ diff --git a/include/linux/atmel-ssc.h b/include/linux/atmel-ssc.h index 7c0f6549898b..fdb545101ede 100644 --- a/include/linux/atmel-ssc.h +++ b/include/linux/atmel-ssc.h @@ -20,6 +20,7 @@ struct ssc_device { int user; int irq; bool clk_from_rk_pin; + bool sound_dai; }; struct ssc_device * __must_check ssc_request(unsigned int ssc_num); diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 1303b570b18c..05488da3aee9 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -6,6 +6,8 @@ #include <asm/exec.h> #include <uapi/linux/binfmts.h> +struct filename; + #define CORENAME_MAX_SIZE 128 /* @@ -123,4 +125,12 @@ extern void install_exec_creds(struct linux_binprm *bprm); extern void set_binfmt(struct linux_binfmt *new); extern ssize_t read_code(struct file *, unsigned long, loff_t, size_t); +extern int do_execve(struct filename *, + const char __user * const __user *, + const char __user * const __user *); +extern int do_execveat(int, struct filename *, + const char __user * const __user *, + const char __user * const __user *, + int); + #endif /* _LINUX_BINFMTS_H */ diff --git a/include/linux/bio.h b/include/linux/bio.h index 7cf8a6c70a3f..8e521194f6fc 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -183,7 +183,7 @@ static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter, #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len) -static inline unsigned bio_segments(struct bio *bio) +static inline unsigned __bio_segments(struct bio *bio, struct bvec_iter *bvec) { unsigned segs = 0; struct bio_vec bv; @@ -205,12 +205,17 @@ static inline unsigned bio_segments(struct bio *bio) break; } - bio_for_each_segment(bv, bio, iter) + __bio_for_each_segment(bv, bio, iter, *bvec) segs++; return segs; } +static inline unsigned bio_segments(struct bio *bio) +{ + return __bio_segments(bio, &bio->bi_iter); +} + /* * get a reference to a bio, so it won't disappear. the intended use is * something like: @@ -384,6 +389,8 @@ extern void bio_put(struct bio *); extern void __bio_clone_fast(struct bio *, struct bio *); extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *); extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs); +extern struct bio *bio_clone_bioset_partial(struct bio *, gfp_t, + struct bio_set *, int, int); extern struct bio_set *fs_bio_set; diff --git a/include/linux/blk-mq-virtio.h b/include/linux/blk-mq-virtio.h new file mode 100644 index 000000000000..b1ef6e14744f --- /dev/null +++ b/include/linux/blk-mq-virtio.h @@ -0,0 +1,10 @@ +#ifndef _LINUX_BLK_MQ_VIRTIO_H +#define _LINUX_BLK_MQ_VIRTIO_H + +struct blk_mq_tag_set; +struct virtio_device; + +int blk_mq_virtio_map_queues(struct blk_mq_tag_set *set, + struct virtio_device *vdev, int first_vec); + +#endif /* _LINUX_BLK_MQ_VIRTIO_H */ diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 8e4df3d6c8cd..b296a9006117 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -33,6 +33,7 @@ struct blk_mq_hw_ctx { struct blk_mq_ctx **ctxs; unsigned int nr_ctx; + wait_queue_t dispatch_wait; atomic_t wait_index; struct blk_mq_tags *tags; @@ -160,6 +161,7 @@ enum { BLK_MQ_S_STOPPED = 0, BLK_MQ_S_TAG_ACTIVE = 1, BLK_MQ_S_SCHED_RESTART = 2, + BLK_MQ_S_TAG_WAITING = 3, BLK_MQ_MAX_DEPTH = 10240, @@ -243,6 +245,9 @@ void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, void blk_mq_freeze_queue(struct request_queue *q); void blk_mq_unfreeze_queue(struct request_queue *q); void blk_mq_freeze_queue_start(struct request_queue *q); +void blk_mq_freeze_queue_wait(struct request_queue *q); +int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, + unsigned long timeout); int blk_mq_reinit_tagset(struct blk_mq_tag_set *set); int blk_mq_map_queues(struct blk_mq_tag_set *set); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index aecca0e7d9ca..796016e63c1d 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -2,6 +2,7 @@ #define _LINUX_BLKDEV_H #include <linux/sched.h> +#include <linux/sched/clock.h> #ifdef CONFIG_BLOCK diff --git a/include/linux/bug.h b/include/linux/bug.h index baff2e8fc8a8..5828489309bb 100644 --- a/include/linux/bug.h +++ b/include/linux/bug.h @@ -124,18 +124,20 @@ static inline enum bug_trap_type report_bug(unsigned long bug_addr, /* * Since detected data corruption should stop operation on the affected - * structures, this returns false if the corruption condition is found. + * structures. Return value must be checked and sanely acted on by caller. */ +static inline __must_check bool check_data_corruption(bool v) { return v; } #define CHECK_DATA_CORRUPTION(condition, fmt, ...) \ - do { \ - if (unlikely(condition)) { \ + check_data_corruption(({ \ + bool corruption = unlikely(condition); \ + if (corruption) { \ if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) { \ pr_err(fmt, ##__VA_ARGS__); \ BUG(); \ } else \ WARN(1, fmt, ##__VA_ARGS__); \ - return false; \ } \ - } while (0) + corruption; \ + })) #endif /* _LINUX_BUG_H */ diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 03a6653d329a..2ea0c282f3dc 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -22,7 +22,6 @@ struct ceph_osd_client; * completion callback for async writepages */ typedef void (*ceph_osdc_callback_t)(struct ceph_osd_request *); -typedef void (*ceph_osdc_unsafe_callback_t)(struct ceph_osd_request *, bool); #define CEPH_HOMELESS_OSD -1 @@ -170,15 +169,12 @@ struct ceph_osd_request { unsigned int r_num_ops; int r_result; - bool r_got_reply; struct ceph_osd_client *r_osdc; struct kref r_kref; bool r_mempool; - struct completion r_completion; - struct completion r_done_completion; /* fsync waiter */ + struct completion r_completion; /* private to osd_client.c */ ceph_osdc_callback_t r_callback; - ceph_osdc_unsafe_callback_t r_unsafe_callback; struct list_head r_unsafe_item; struct inode *r_inode; /* for use by callbacks */ diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h index 9a9041784dcf..938656f70807 100644 --- a/include/linux/ceph/osdmap.h +++ b/include/linux/ceph/osdmap.h @@ -57,7 +57,7 @@ static inline bool ceph_can_shift_osds(struct ceph_pg_pool_info *pool) case CEPH_POOL_TYPE_EC: return false; default: - BUG_ON(1); + BUG(); } } @@ -82,13 +82,6 @@ void ceph_oloc_copy(struct ceph_object_locator *dest, void ceph_oloc_destroy(struct ceph_object_locator *oloc); /* - * Maximum supported by kernel client object name length - * - * (probably outdated: must be >= RBD_MAX_MD_NAME_LEN -- currently 100) - */ -#define CEPH_MAX_OID_NAME_LEN 100 - -/* * 51-char inline_name is long enough for all cephfs and all but one * rbd requests: <imgname> in "<imgname>.rbd"/"rbd_id.<imgname>" can be * arbitrarily long (~PAGE_SIZE). It's done once during rbd map; all @@ -173,8 +166,8 @@ struct ceph_osdmap { * the list of osds that store+replicate them. */ struct crush_map *crush; - struct mutex crush_scratch_mutex; - int crush_scratch_ary[CEPH_PG_MAX_SIZE * 3]; + struct mutex crush_workspace_mutex; + void *crush_workspace; }; static inline bool ceph_osd_exists(struct ceph_osdmap *map, int osd) diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h index 5c0da61cb763..5d0018782d50 100644 --- a/include/linux/ceph/rados.h +++ b/include/linux/ceph/rados.h @@ -50,7 +50,7 @@ struct ceph_timespec { #define CEPH_PG_LAYOUT_LINEAR 2 #define CEPH_PG_LAYOUT_HYBRID 3 -#define CEPH_PG_MAX_SIZE 16 /* max # osds in a single pg */ +#define CEPH_PG_MAX_SIZE 32 /* max # osds in a single pg */ /* * placement group. diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 861b4677fc5b..6a3f850cabab 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -148,14 +148,18 @@ struct cgroup_subsys_state { * set for a task. */ struct css_set { - /* Reference count */ - atomic_t refcount; - /* - * List running through all cgroup groups in the same hash - * slot. Protected by css_set_lock + * Set of subsystem states, one for each subsystem. This array is + * immutable after creation apart from the init_css_set during + * subsystem registration (at boot time). */ - struct hlist_node hlist; + struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; + + /* reference count */ + atomic_t refcount; + + /* the default cgroup associated with this css_set */ + struct cgroup *dfl_cgrp; /* * Lists running through all tasks using this cgroup group. @@ -167,21 +171,29 @@ struct css_set { struct list_head tasks; struct list_head mg_tasks; + /* all css_task_iters currently walking this cset */ + struct list_head task_iters; + /* - * List of cgrp_cset_links pointing at cgroups referenced from this - * css_set. Protected by css_set_lock. + * On the default hierarhcy, ->subsys[ssid] may point to a css + * attached to an ancestor instead of the cgroup this css_set is + * associated with. The following node is anchored at + * ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to + * iterate through all css's attached to a given cgroup. */ - struct list_head cgrp_links; + struct list_head e_cset_node[CGROUP_SUBSYS_COUNT]; - /* the default cgroup associated with this css_set */ - struct cgroup *dfl_cgrp; + /* + * List running through all cgroup groups in the same hash + * slot. Protected by css_set_lock + */ + struct hlist_node hlist; /* - * Set of subsystem states, one for each subsystem. This array is - * immutable after creation apart from the init_css_set during - * subsystem registration (at boot time). + * List of cgrp_cset_links pointing at cgroups referenced from this + * css_set. Protected by css_set_lock. */ - struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; + struct list_head cgrp_links; /* * List of csets participating in the on-going migration either as @@ -201,18 +213,6 @@ struct css_set { struct cgroup *mg_dst_cgrp; struct css_set *mg_dst_cset; - /* - * On the default hierarhcy, ->subsys[ssid] may point to a css - * attached to an ancestor instead of the cgroup this css_set is - * associated with. The following node is anchored at - * ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to - * iterate through all css's attached to a given cgroup. - */ - struct list_head e_cset_node[CGROUP_SUBSYS_COUNT]; - - /* all css_task_iters currently walking this cset */ - struct list_head task_iters; - /* dead and being drained, ignore for migration */ bool dead; @@ -388,6 +388,9 @@ struct cftype { struct list_head node; /* anchored at ss->cfts */ struct kernfs_ops *kf_ops; + int (*open)(struct kernfs_open_file *of); + void (*release)(struct kernfs_open_file *of); + /* * read_u64() is a shortcut for the common case of returning a * single integer. Use it in place of read() @@ -528,8 +531,8 @@ extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem; * cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups * @tsk: target task * - * Called from threadgroup_change_begin() and allows cgroup operations to - * synchronize against threadgroup changes using a percpu_rw_semaphore. + * Allows cgroup operations to synchronize against threadgroup changes + * using a percpu_rw_semaphore. */ static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) { @@ -540,8 +543,7 @@ static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) * cgroup_threadgroup_change_end - threadgroup exclusion for cgroups * @tsk: target task * - * Called from threadgroup_change_end(). Counterpart of - * cgroup_threadcgroup_change_begin(). + * Counterpart of cgroup_threadcgroup_change_begin(). */ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) { @@ -552,7 +554,11 @@ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) #define CGROUP_SUBSYS_COUNT 0 -static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) {} +static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) +{ + might_sleep(); +} + static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {} #endif /* CONFIG_CGROUPS */ diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index c83c23f0577b..f6b43fbb141c 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -266,7 +266,7 @@ void css_task_iter_end(struct css_task_iter *it); * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset * @leader: the loop cursor * @dst_css: the destination css - * @tset: takset to iterate + * @tset: taskset to iterate * * Iterate threadgroup leaders of @tset. For single-task migrations, @tset * may not contain any. diff --git a/include/linux/cgroup_rdma.h b/include/linux/cgroup_rdma.h new file mode 100644 index 000000000000..e94290b29e99 --- /dev/null +++ b/include/linux/cgroup_rdma.h @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2016 Parav Pandit <pandit.parav@gmail.com> + * + * This file is subject to the terms and conditions of version 2 of the GNU + * General Public License. See the file COPYING in the main directory of the + * Linux distribution for more details. + */ + +#ifndef _CGROUP_RDMA_H +#define _CGROUP_RDMA_H + +#include <linux/cgroup.h> + +enum rdmacg_resource_type { + RDMACG_RESOURCE_HCA_HANDLE, + RDMACG_RESOURCE_HCA_OBJECT, + RDMACG_RESOURCE_MAX, +}; + +#ifdef CONFIG_CGROUP_RDMA + +struct rdma_cgroup { + struct cgroup_subsys_state css; + + /* + * head to keep track of all resource pools + * that belongs to this cgroup. + */ + struct list_head rpools; +}; + +struct rdmacg_device { + struct list_head dev_node; + struct list_head rpools; + char *name; +}; + +/* + * APIs for RDMA/IB stack to publish when a device wants to + * participate in resource accounting + */ +int rdmacg_register_device(struct rdmacg_device *device); +void rdmacg_unregister_device(struct rdmacg_device *device); + +/* APIs for RDMA/IB stack to charge/uncharge pool specific resources */ +int rdmacg_try_charge(struct rdma_cgroup **rdmacg, + struct rdmacg_device *device, + enum rdmacg_resource_type index); +void rdmacg_uncharge(struct rdma_cgroup *cg, + struct rdmacg_device *device, + enum rdmacg_resource_type index); +#endif /* CONFIG_CGROUP_RDMA */ +#endif /* _CGROUP_RDMA_H */ diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h index 0df0336acee9..d0e597c44585 100644 --- a/include/linux/cgroup_subsys.h +++ b/include/linux/cgroup_subsys.h @@ -56,6 +56,10 @@ SUBSYS(hugetlb) SUBSYS(pids) #endif +#if IS_ENABLED(CONFIG_CGROUP_RDMA) +SUBSYS(rdma) +#endif + /* * The following subsystems are not supported on the default hierarchy. */ diff --git a/include/linux/cma.h b/include/linux/cma.h index 6f0a91b37f68..03f32d0bd1d8 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -29,6 +29,7 @@ extern int __init cma_declare_contiguous(phys_addr_t base, extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, unsigned int order_per_bit, struct cma **res_cma); -extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align); +extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, + gfp_t gfp_mask); extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count); #endif diff --git a/include/linux/compat.h b/include/linux/compat.h index 9e40be522793..aef47be2a5c1 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -711,8 +711,10 @@ int __compat_save_altstack(compat_stack_t __user *, unsigned long); compat_stack_t __user *__uss = uss; \ struct task_struct *t = current; \ put_user_ex(ptr_to_compat((void __user *)t->sas_ss_sp), &__uss->ss_sp); \ - put_user_ex(sas_ss_flags(sp), &__uss->ss_flags); \ + put_user_ex(t->sas_ss_flags, &__uss->ss_flags); \ put_user_ex(t->sas_ss_size, &__uss->ss_size); \ + if (t->sas_ss_flags & SS_AUTODISARM) \ + sas_ss_reset(t); \ } while (0); asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid, diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 0444b1336268..0efef9cf014f 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -116,11 +116,13 @@ */ #define __pure __attribute__((pure)) #define __aligned(x) __attribute__((aligned(x))) +#define __aligned_largest __attribute__((aligned)) #define __printf(a, b) __attribute__((format(printf, a, b))) #define __scanf(a, b) __attribute__((format(scanf, a, b))) #define __attribute_const__ __attribute__((__const__)) #define __maybe_unused __attribute__((unused)) #define __always_unused __attribute__((unused)) +#define __mode(x) __attribute__((mode(x))) /* gcc version specific checks */ @@ -195,6 +197,17 @@ #endif #endif +#ifdef CONFIG_STACK_VALIDATION +#define annotate_unreachable() ({ \ + asm("%c0:\t\n" \ + ".pushsection .discard.unreachable\t\n" \ + ".long %c0b - .\t\n" \ + ".popsection\t\n" : : "i" (__LINE__)); \ +}) +#else +#define annotate_unreachable() +#endif + /* * Mark a position in code as unreachable. This can be used to * suppress control flow warnings after asm blocks that transfer @@ -204,7 +217,8 @@ * this in the preprocessor, but we can live with this because they're * unreleased. Really, we need to have autoconf for the kernel. */ -#define unreachable() __builtin_unreachable() +#define unreachable() \ + do { annotate_unreachable(); __builtin_unreachable(); } while (0) /* Mark a function definition as prohibited from being cloned. */ #define __noclone __attribute__((__noclone__, __optimize__("no-tracer"))) diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 91c30cba984e..f8110051188f 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -105,29 +105,36 @@ struct ftrace_branch_data { }; }; +struct ftrace_likely_data { + struct ftrace_branch_data data; + unsigned long constant; +}; + /* * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code * to disable branch tracing on a per file basis. */ #if defined(CONFIG_TRACE_BRANCH_PROFILING) \ && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__) -void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); +void ftrace_likely_update(struct ftrace_likely_data *f, int val, + int expect, int is_constant); #define likely_notrace(x) __builtin_expect(!!(x), 1) #define unlikely_notrace(x) __builtin_expect(!!(x), 0) -#define __branch_check__(x, expect) ({ \ +#define __branch_check__(x, expect, is_constant) ({ \ int ______r; \ - static struct ftrace_branch_data \ + static struct ftrace_likely_data \ __attribute__((__aligned__(4))) \ __attribute__((section("_ftrace_annotated_branch"))) \ ______f = { \ - .func = __func__, \ - .file = __FILE__, \ - .line = __LINE__, \ + .data.func = __func__, \ + .data.file = __FILE__, \ + .data.line = __LINE__, \ }; \ - ______r = likely_notrace(x); \ - ftrace_likely_update(&______f, ______r, expect); \ + ______r = __builtin_expect(!!(x), expect); \ + ftrace_likely_update(&______f, ______r, \ + expect, is_constant); \ ______r; \ }) @@ -137,10 +144,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); * written by Daniel Walker. */ # ifndef likely -# define likely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1)) +# define likely(x) (__branch_check__(x, 1, __builtin_constant_p(x))) # endif # ifndef unlikely -# define unlikely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0)) +# define unlikely(x) (__branch_check__(x, 0, __builtin_constant_p(x))) # endif #ifdef CONFIG_PROFILE_ALL_BRANCHES @@ -570,12 +577,4 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s (_________p1); \ }) -/* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */ -#ifdef CONFIG_KPROBES -# define __kprobes __attribute__((__section__(".kprobes.text"))) -# define nokprobe_inline __always_inline -#else -# define __kprobes -# define nokprobe_inline inline -#endif #endif /* __LINUX_COMPILER_H */ diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 21f9c74496e7..f92081234afd 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -30,6 +30,8 @@ struct cpu { extern void boot_cpu_init(void); extern void boot_cpu_state_init(void); +extern void cpu_init(void); +extern void trap_init(void); extern int register_cpu(struct cpu *cpu, int num); extern struct device *get_cpu_device(unsigned cpu); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index bb790c4db0c5..62d240e962f0 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -26,7 +26,6 @@ enum cpuhp_state { CPUHP_ARM_OMAP_WAKE_DEAD, CPUHP_IRQ_POLL_DEAD, CPUHP_BLOCK_SOFTIRQ_DEAD, - CPUHP_VIRT_SCSI_DEAD, CPUHP_ACPI_CPUDRV_DEAD, CPUHP_S390_PFAULT_DEAD, CPUHP_BLK_MQ_DEAD, diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index bfc204e70338..611fce58d670 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -9,6 +9,8 @@ */ #include <linux/sched.h> +#include <linux/sched/topology.h> +#include <linux/sched/task.h> #include <linux/cpumask.h> #include <linux/nodemask.h> #include <linux/mm.h> diff --git a/include/linux/cputime.h b/include/linux/cputime.h deleted file mode 100644 index a691dc4ddc13..000000000000 --- a/include/linux/cputime.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef __LINUX_CPUTIME_H -#define __LINUX_CPUTIME_H - -#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE -#include <asm/cputime.h> - -#ifndef cputime_to_nsecs -# define cputime_to_nsecs(__ct) \ - (cputime_to_usecs(__ct) * NSEC_PER_USEC) -#endif - -#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ -#endif /* __LINUX_CPUTIME_H */ diff --git a/include/linux/cred.h b/include/linux/cred.h index f0e70a1bb3ac..b03e7d049a64 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -18,8 +18,9 @@ #include <linux/selinux.h> #include <linux/atomic.h> #include <linux/uidgid.h> +#include <linux/sched.h> +#include <linux/sched/user.h> -struct user_struct; struct cred; struct inode; diff --git a/include/linux/crush/crush.h b/include/linux/crush/crush.h index be8f12b8f195..fbecbd089d75 100644 --- a/include/linux/crush/crush.h +++ b/include/linux/crush/crush.h @@ -135,13 +135,6 @@ struct crush_bucket { __u32 size; /* num items */ __s32 *items; - /* - * cached random permutation: used for uniform bucket and for - * the linear search fallback for the other bucket types. - */ - __u32 perm_x; /* @x for which *perm is defined */ - __u32 perm_n; /* num elements of *perm that are permuted/defined */ - __u32 *perm; }; struct crush_bucket_uniform { @@ -211,6 +204,21 @@ struct crush_map { * device fails. */ __u8 chooseleaf_stable; + /* + * This value is calculated after decode or construction by + * the builder. It is exposed here (rather than having a + * 'build CRUSH working space' function) so that callers can + * reserve a static buffer, allocate space on the stack, or + * otherwise avoid calling into the heap allocator if they + * want to. The size of the working space depends on the map, + * while the size of the scratch vector passed to the mapper + * depends on the size of the desired result set. + * + * Nothing stops the caller from allocating both in one swell + * foop and passing in two points, though. + */ + size_t working_size; + #ifndef __KERNEL__ /* * version 0 (original) of straw_calc has various flaws. version 1 @@ -248,4 +256,23 @@ static inline int crush_calc_tree_node(int i) return ((i+1) << 1)-1; } +/* + * These data structures are private to the CRUSH implementation. They + * are exposed in this header file because builder needs their + * definitions to calculate the total working size. + * + * Moving this out of the crush map allow us to treat the CRUSH map as + * immutable within the mapper and removes the requirement for a CRUSH + * map lock. + */ +struct crush_work_bucket { + __u32 perm_x; /* @x for which *perm is defined */ + __u32 perm_n; /* num elements of *perm that are permuted/defined */ + __u32 *perm; /* Permutation of the bucket's items */ +}; + +struct crush_work { + struct crush_work_bucket **work; /* Per-bucket working store */ +}; + #endif diff --git a/include/linux/crush/mapper.h b/include/linux/crush/mapper.h index 5dfd5b1125d2..c95e19e1ff11 100644 --- a/include/linux/crush/mapper.h +++ b/include/linux/crush/mapper.h @@ -15,6 +15,20 @@ extern int crush_do_rule(const struct crush_map *map, int ruleno, int x, int *result, int result_max, const __u32 *weights, int weight_max, - int *scratch); + void *cwin); + +/* + * Returns the exact amount of workspace that will need to be used + * for a given combination of crush_map and result_max. The caller can + * then allocate this much on its own, either on the stack, in a + * per-thread long-lived buffer, or however it likes. + */ +static inline size_t crush_work_size(const struct crush_map *map, + int result_max) +{ + return map->working_size + result_max * 3 * sizeof(__u32); +} + +void crush_init_workspace(const struct crush_map *map, void *v); #endif diff --git a/include/linux/dax.h b/include/linux/dax.h index 2983e52efd07..d8a3dc042e1c 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -38,8 +38,8 @@ static inline void *dax_radix_locked_entry(sector_t sector, unsigned long flags) ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, const struct iomap_ops *ops); -int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, - const struct iomap_ops *ops); +int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, + const struct iomap_ops *ops); int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index); int dax_invalidate_mapping_entry_sync(struct address_space *mapping, @@ -71,21 +71,13 @@ static inline unsigned int dax_radix_order(void *entry) return PMD_SHIFT - PAGE_SHIFT; return 0; } -int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address, - pmd_t *pmd, unsigned int flags, const struct iomap_ops *ops); #else static inline unsigned int dax_radix_order(void *entry) { return 0; } -static inline int dax_iomap_pmd_fault(struct vm_area_struct *vma, - unsigned long address, pmd_t *pmd, unsigned int flags, - const struct iomap_ops *ops) -{ - return VM_FAULT_FALLBACK; -} #endif -int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *); +int dax_pfn_mkwrite(struct vm_fault *vmf); static inline bool vma_is_dax(struct vm_area_struct *vma) { diff --git a/include/linux/dcache.h b/include/linux/dcache.h index c965e4469499..d2e38dc6172c 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -11,6 +11,7 @@ #include <linux/rcupdate.h> #include <linux/lockref.h> #include <linux/stringhash.h> +#include <linux/wait.h> struct path; struct vfsmount; @@ -562,7 +563,7 @@ static inline struct dentry *d_backing_dentry(struct dentry *upper) * @inode: inode to select the dentry from multiple layers (can be NULL) * @flags: open flags to control copy-up behavior * - * If dentry is on an union/overlay, then return the underlying, real dentry. + * If dentry is on a union/overlay, then return the underlying, real dentry. * Otherwise return the dentry itself. * * See also: Documentation/filesystems/vfs.txt @@ -581,7 +582,7 @@ static inline struct dentry *d_real(struct dentry *dentry, * d_real_inode - Return the real inode * @dentry: The dentry to query * - * If dentry is on an union/overlay, then return the underlying, real inode. + * If dentry is on a union/overlay, then return the underlying, real inode. * Otherwise return d_inode(). */ static inline struct inode *d_real_inode(const struct dentry *dentry) diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index 9d571acd3a48..7dff776e6d16 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -98,9 +98,10 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent); struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent, const char *dest); +typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *); struct dentry *debugfs_create_automount(const char *name, struct dentry *parent, - struct vfsmount *(*f)(void *), + debugfs_automount_t f, void *data); void debugfs_remove(struct dentry *dentry); diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h index 00e60f79a9cc..4178d2493547 100644 --- a/include/linux/delayacct.h +++ b/include/linux/delayacct.h @@ -18,8 +18,6 @@ #define _LINUX_DELAYACCT_H #include <uapi/linux/taskstats.h> -#include <linux/sched.h> -#include <linux/slab.h> /* * Per-task flags relevant to delay accounting @@ -30,7 +28,43 @@ #define DELAYACCT_PF_BLKIO 0x00000002 /* I am waiting on IO */ #ifdef CONFIG_TASK_DELAY_ACCT +struct task_delay_info { + spinlock_t lock; + unsigned int flags; /* Private per-task flags */ + + /* For each stat XXX, add following, aligned appropriately + * + * struct timespec XXX_start, XXX_end; + * u64 XXX_delay; + * u32 XXX_count; + * + * Atomicity of updates to XXX_delay, XXX_count protected by + * single lock above (split into XXX_lock if contention is an issue). + */ + + /* + * XXX_count is incremented on every XXX operation, the delay + * associated with the operation is added to XXX_delay. + * XXX_delay contains the accumulated delay time in nanoseconds. + */ + u64 blkio_start; /* Shared by blkio, swapin */ + u64 blkio_delay; /* wait for sync block io completion */ + u64 swapin_delay; /* wait for swapin block io completion */ + u32 blkio_count; /* total count of the number of sync block */ + /* io operations performed */ + u32 swapin_count; /* total count of the number of swapin block */ + /* io operations performed */ + + u64 freepages_start; + u64 freepages_delay; /* wait for memory reclaim */ + u32 freepages_count; /* total count of memory reclaim */ +}; +#endif +#include <linux/sched.h> +#include <linux/slab.h> + +#ifdef CONFIG_TASK_DELAY_ACCT extern int delayacct_on; /* Delay accounting turned on/off */ extern struct kmem_cache *delayacct_cache; extern void delayacct_init(void); diff --git a/include/linux/device.h b/include/linux/device.h index bd684fc8ec1d..30c4570e928d 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -925,6 +925,7 @@ struct device { #ifdef CONFIG_NUMA int numa_node; /* NUMA node this device is close to */ #endif + const struct dma_map_ops *dma_ops; u64 *dma_mask; /* dma mask (if dma'able device) */ u64 coherent_dma_mask;/* Like dma_mask, but for alloc_coherent mappings as @@ -1139,6 +1140,7 @@ static inline bool device_supports_offline(struct device *dev) extern void lock_device_hotplug(void); extern void unlock_device_hotplug(void); extern int lock_device_hotplug_sysfs(void); +void assert_held_device_hotplug(void); extern int device_offline(struct device *dev); extern int device_online(struct device *dev); extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode); diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 8daeb3ce0016..bfb3704fc6fc 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -39,23 +39,6 @@ struct dma_buf_attachment; /** * struct dma_buf_ops - operations possible on struct dma_buf - * @attach: [optional] allows different devices to 'attach' themselves to the - * given buffer. It might return -EBUSY to signal that backing storage - * is already allocated and incompatible with the requirements - * of requesting device. - * @detach: [optional] detach a given device from this buffer. - * @map_dma_buf: returns list of scatter pages allocated, increases usecount - * of the buffer. Requires atleast one attach to be called - * before. Returned sg list should already be mapped into - * _device_ address space. This call may sleep. May also return - * -EINTR. Should return -EINVAL if attach hasn't been called yet. - * @unmap_dma_buf: decreases usecount of buffer, might deallocate scatter - * pages. - * @release: release this buffer; to be called after the last dma_buf_put. - * @begin_cpu_access: [optional] called before cpu access to invalidate cpu - * caches and allocate backing storage (if not yet done) - * respectively pin the object into memory. - * @end_cpu_access: [optional] called after cpu access to flush caches. * @kmap_atomic: maps a page from the buffer into kernel address * space, users may not block until the subsequent unmap call. * This callback must not sleep. @@ -63,43 +46,206 @@ struct dma_buf_attachment; * This Callback must not sleep. * @kmap: maps a page from the buffer into kernel address space. * @kunmap: [optional] unmaps a page from the buffer. - * @mmap: used to expose the backing storage to userspace. Note that the - * mapping needs to be coherent - if the exporter doesn't directly - * support this, it needs to fake coherency by shooting down any ptes - * when transitioning away from the cpu domain. * @vmap: [optional] creates a virtual mapping for the buffer into kernel * address space. Same restrictions as for vmap and friends apply. * @vunmap: [optional] unmaps a vmap from the buffer */ struct dma_buf_ops { + /** + * @attach: + * + * This is called from dma_buf_attach() to make sure that a given + * &device can access the provided &dma_buf. Exporters which support + * buffer objects in special locations like VRAM or device-specific + * carveout areas should check whether the buffer could be move to + * system memory (or directly accessed by the provided device), and + * otherwise need to fail the attach operation. + * + * The exporter should also in general check whether the current + * allocation fullfills the DMA constraints of the new device. If this + * is not the case, and the allocation cannot be moved, it should also + * fail the attach operation. + * + * Any exporter-private housekeeping data can be stored in the + * &dma_buf_attachment.priv pointer. + * + * This callback is optional. + * + * Returns: + * + * 0 on success, negative error code on failure. It might return -EBUSY + * to signal that backing storage is already allocated and incompatible + * with the requirements of requesting device. + */ int (*attach)(struct dma_buf *, struct device *, - struct dma_buf_attachment *); + struct dma_buf_attachment *); + /** + * @detach: + * + * This is called by dma_buf_detach() to release a &dma_buf_attachment. + * Provided so that exporters can clean up any housekeeping for an + * &dma_buf_attachment. + * + * This callback is optional. + */ void (*detach)(struct dma_buf *, struct dma_buf_attachment *); - /* For {map,unmap}_dma_buf below, any specific buffer attributes - * required should get added to device_dma_parameters accessible - * via dev->dma_params. + /** + * @map_dma_buf: + * + * This is called by dma_buf_map_attachment() and is used to map a + * shared &dma_buf into device address space, and it is mandatory. It + * can only be called if @attach has been called successfully. This + * essentially pins the DMA buffer into place, and it cannot be moved + * any more + * + * This call may sleep, e.g. when the backing storage first needs to be + * allocated, or moved to a location suitable for all currently attached + * devices. + * + * Note that any specific buffer attributes required for this function + * should get added to device_dma_parameters accessible via + * &device.dma_params from the &dma_buf_attachment. The @attach callback + * should also check these constraints. + * + * If this is being called for the first time, the exporter can now + * choose to scan through the list of attachments for this buffer, + * collate the requirements of the attached devices, and choose an + * appropriate backing storage for the buffer. + * + * Based on enum dma_data_direction, it might be possible to have + * multiple users accessing at the same time (for reading, maybe), or + * any other kind of sharing that the exporter might wish to make + * available to buffer-users. + * + * Returns: + * + * A &sg_table scatter list of or the backing storage of the DMA buffer, + * already mapped into the device address space of the &device attached + * with the provided &dma_buf_attachment. + * + * On failure, returns a negative error value wrapped into a pointer. + * May also return -EINTR when a signal was received while being + * blocked. */ struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *, - enum dma_data_direction); + enum dma_data_direction); + /** + * @unmap_dma_buf: + * + * This is called by dma_buf_unmap_attachment() and should unmap and + * release the &sg_table allocated in @map_dma_buf, and it is mandatory. + * It should also unpin the backing storage if this is the last mapping + * of the DMA buffer, it the exporter supports backing storage + * migration. + */ void (*unmap_dma_buf)(struct dma_buf_attachment *, - struct sg_table *, - enum dma_data_direction); + struct sg_table *, + enum dma_data_direction); + /* TODO: Add try_map_dma_buf version, to return immed with -EBUSY * if the call would block. */ - /* after final dma_buf_put() */ + /** + * @release: + * + * Called after the last dma_buf_put to release the &dma_buf, and + * mandatory. + */ void (*release)(struct dma_buf *); + /** + * @begin_cpu_access: + * + * This is called from dma_buf_begin_cpu_access() and allows the + * exporter to ensure that the memory is actually available for cpu + * access - the exporter might need to allocate or swap-in and pin the + * backing storage. The exporter also needs to ensure that cpu access is + * coherent for the access direction. The direction can be used by the + * exporter to optimize the cache flushing, i.e. access with a different + * direction (read instead of write) might return stale or even bogus + * data (e.g. when the exporter needs to copy the data to temporary + * storage). + * + * This callback is optional. + * + * FIXME: This is both called through the DMA_BUF_IOCTL_SYNC command + * from userspace (where storage shouldn't be pinned to avoid handing + * de-factor mlock rights to userspace) and for the kernel-internal + * users of the various kmap interfaces, where the backing storage must + * be pinned to guarantee that the atomic kmap calls can succeed. Since + * there's no in-kernel users of the kmap interfaces yet this isn't a + * real problem. + * + * Returns: + * + * 0 on success or a negative error code on failure. This can for + * example fail when the backing storage can't be allocated. Can also + * return -ERESTARTSYS or -EINTR when the call has been interrupted and + * needs to be restarted. + */ int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction); + + /** + * @end_cpu_access: + * + * This is called from dma_buf_end_cpu_access() when the importer is + * done accessing the CPU. The exporter can use this to flush caches and + * unpin any resources pinned in @begin_cpu_access. + * The result of any dma_buf kmap calls after end_cpu_access is + * undefined. + * + * This callback is optional. + * + * Returns: + * + * 0 on success or a negative error code on failure. Can return + * -ERESTARTSYS or -EINTR when the call has been interrupted and needs + * to be restarted. + */ int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); void *(*kmap_atomic)(struct dma_buf *, unsigned long); void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *); void *(*kmap)(struct dma_buf *, unsigned long); void (*kunmap)(struct dma_buf *, unsigned long, void *); + /** + * @mmap: + * + * This callback is used by the dma_buf_mmap() function + * + * Note that the mapping needs to be incoherent, userspace is expected + * to braket CPU access using the DMA_BUF_IOCTL_SYNC interface. + * + * Because dma-buf buffers have invariant size over their lifetime, the + * dma-buf core checks whether a vma is too large and rejects such + * mappings. The exporter hence does not need to duplicate this check. + * Drivers do not need to check this themselves. + * + * If an exporter needs to manually flush caches and hence needs to fake + * coherency for mmap support, it needs to be able to zap all the ptes + * pointing at the backing storage. Now linux mm needs a struct + * address_space associated with the struct file stored in vma->vm_file + * to do that with the function unmap_mapping_range. But the dma_buf + * framework only backs every dma_buf fd with the anon_file struct file, + * i.e. all dma_bufs share the same file. + * + * Hence exporters need to setup their own file (and address_space) + * association by setting vma->vm_file and adjusting vma->vm_pgoff in + * the dma_buf mmap callback. In the specific case of a gem driver the + * exporter could use the shmem file already provided by gem (and set + * vm_pgoff = 0). Exporters can then zap ptes by unmapping the + * corresponding range of the struct address_space associated with their + * own file. + * + * This callback is optional. + * + * Returns: + * + * 0 on success or a negative error code on failure. + */ int (*mmap)(struct dma_buf *, struct vm_area_struct *vma); void *(*vmap)(struct dma_buf *); @@ -124,6 +270,15 @@ struct dma_buf_ops { * @poll: for userspace poll support * @cb_excl: for userspace poll support * @cb_shared: for userspace poll support + * + * This represents a shared buffer, created by calling dma_buf_export(). The + * userspace representation is a normal file descriptor, which can be created by + * calling dma_buf_fd(). + * + * Shared dma buffers are reference counted using dma_buf_put() and + * get_dma_buf(). + * + * Device DMA access is handled by the separate &struct dma_buf_attachment. */ struct dma_buf { size_t size; @@ -160,6 +315,11 @@ struct dma_buf { * This structure holds the attachment information between the dma_buf buffer * and its user device(s). The list contains one attachment struct per device * attached to the buffer. + * + * An attachment is created by calling dma_buf_attach(), and released again by + * calling dma_buf_detach(). The DMA mapping itself needed to initiate a + * transfer is created by dma_buf_map_attachment() and freed again by calling + * dma_buf_unmap_attachment(). */ struct dma_buf_attachment { struct dma_buf *dmabuf; @@ -192,9 +352,11 @@ struct dma_buf_export_info { }; /** - * helper macro for exporters; zeros and fills in most common values - * + * DEFINE_DMA_BUF_EXPORT_INFO - helper macro for exporters * @name: export-info name + * + * DEFINE_DMA_BUF_EXPORT_INFO macro defines the &struct dma_buf_export_info, + * zeroes it out and pre-populates exp_name in it. */ #define DEFINE_DMA_BUF_EXPORT_INFO(name) \ struct dma_buf_export_info name = { .exp_name = KBUILD_MODNAME, \ diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h index fec734df1524..b67bf6ac907d 100644 --- a/include/linux/dma-contiguous.h +++ b/include/linux/dma-contiguous.h @@ -112,7 +112,7 @@ static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size, } struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, - unsigned int order); + unsigned int order, gfp_t gfp_mask); bool dma_release_from_contiguous(struct device *dev, struct page *pages, int count); @@ -145,7 +145,7 @@ int dma_declare_contiguous(struct device *dev, phys_addr_t size, static inline struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, - unsigned int order) + unsigned int order, gfp_t gfp_mask) { return NULL; } diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index d51a7d23c358..6048fa404e57 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -47,7 +47,7 @@ struct dma_fence_cb; * can be compared to decide which fence would be signaled later. * @flags: A mask of DMA_FENCE_FLAG_* defined below * @timestamp: Timestamp when the fence was signaled. - * @status: Optional, only valid if < 0, must be set before calling + * @error: Optional, only valid if < 0, must be set before calling * dma_fence_signal, indicates that the fence has completed with an error. * * the flags member must be manipulated and read using the appropriate @@ -79,7 +79,7 @@ struct dma_fence { unsigned seqno; unsigned long flags; ktime_t timestamp; - int status; + int error; }; enum dma_fence_flag_bits { @@ -133,7 +133,7 @@ struct dma_fence_cb { * or some failure occurred that made it impossible to enable * signaling. True indicates successful enabling. * - * fence->status may be set in enable_signaling, but only when false is + * fence->error may be set in enable_signaling, but only when false is * returned. * * Calling dma_fence_signal before enable_signaling is called allows @@ -145,7 +145,7 @@ struct dma_fence_cb { * the second time will be a noop since it was already signaled. * * Notes on signaled: - * May set fence->status if returning true. + * May set fence->error if returning true. * * Notes on wait: * Must not be NULL, set to dma_fence_default_wait for default implementation. @@ -378,6 +378,50 @@ static inline struct dma_fence *dma_fence_later(struct dma_fence *f1, return dma_fence_is_signaled(f2) ? NULL : f2; } +/** + * dma_fence_get_status_locked - returns the status upon completion + * @fence: [in] the dma_fence to query + * + * Drivers can supply an optional error status condition before they signal + * the fence (to indicate whether the fence was completed due to an error + * rather than success). The value of the status condition is only valid + * if the fence has been signaled, dma_fence_get_status_locked() first checks + * the signal state before reporting the error status. + * + * Returns 0 if the fence has not yet been signaled, 1 if the fence has + * been signaled without an error condition, or a negative error code + * if the fence has been completed in err. + */ +static inline int dma_fence_get_status_locked(struct dma_fence *fence) +{ + if (dma_fence_is_signaled_locked(fence)) + return fence->error ?: 1; + else + return 0; +} + +int dma_fence_get_status(struct dma_fence *fence); + +/** + * dma_fence_set_error - flag an error condition on the fence + * @fence: [in] the dma_fence + * @error: [in] the error to store + * + * Drivers can supply an optional error status condition before they signal + * the fence, to indicate that the fence was completed due to an error + * rather than success. This must be set before signaling (so that the value + * is visible before any waiters on the signal callback are woken). This + * helper exists to help catching erroneous setting of #dma_fence.error. + */ +static inline void dma_fence_set_error(struct dma_fence *fence, + int error) +{ + BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)); + BUG_ON(error >= 0 || error < -MAX_ERRNO); + + fence->error = error; +} + signed long dma_fence_wait_timeout(struct dma_fence *, bool intr, signed long timeout); signed long dma_fence_wait_any_timeout(struct dma_fence **fences, diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index c24721a33b4c..0977317c6835 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -134,7 +134,8 @@ struct dma_map_ops { int is_phys; }; -extern struct dma_map_ops dma_noop_ops; +extern const struct dma_map_ops dma_noop_ops; +extern const struct dma_map_ops dma_virt_ops; #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) @@ -171,14 +172,26 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, #ifdef CONFIG_HAS_DMA #include <asm/dma-mapping.h> +static inline const struct dma_map_ops *get_dma_ops(struct device *dev) +{ + if (dev && dev->dma_ops) + return dev->dma_ops; + return get_arch_dma_ops(dev ? dev->bus : NULL); +} + +static inline void set_dma_ops(struct device *dev, + const struct dma_map_ops *dma_ops) +{ + dev->dma_ops = dma_ops; +} #else /* * Define the dma api to allow compilation but not linking of * dma dependent code. Code that depends on the dma-mapping * API needs to set 'depends on HAS_DMA' in its Kconfig */ -extern struct dma_map_ops bad_dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +extern const struct dma_map_ops bad_dma_ops; +static inline const struct dma_map_ops *get_dma_ops(struct device *dev) { return &bad_dma_ops; } @@ -189,7 +202,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, enum dma_data_direction dir, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); dma_addr_t addr; kmemcheck_mark_initialized(ptr, size); @@ -208,7 +221,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, enum dma_data_direction dir, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); if (ops->unmap_page) @@ -224,7 +237,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); int i, ents; struct scatterlist *s; @@ -242,7 +255,7 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg int nents, enum dma_data_direction dir, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); debug_dma_unmap_sg(dev, sg, nents, dir); @@ -256,7 +269,7 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev, enum dma_data_direction dir, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); dma_addr_t addr; kmemcheck_mark_initialized(page_address(page) + offset, size); @@ -272,7 +285,7 @@ static inline void dma_unmap_page_attrs(struct device *dev, enum dma_data_direction dir, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); if (ops->unmap_page) @@ -286,7 +299,7 @@ static inline dma_addr_t dma_map_resource(struct device *dev, enum dma_data_direction dir, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); dma_addr_t addr; BUG_ON(!valid_dma_direction(dir)); @@ -307,7 +320,7 @@ static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); if (ops->unmap_resource) @@ -319,7 +332,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); if (ops->sync_single_for_cpu) @@ -331,7 +344,7 @@ static inline void dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); if (ops->sync_single_for_device) @@ -371,7 +384,7 @@ static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction dir) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); if (ops->sync_sg_for_cpu) @@ -383,7 +396,7 @@ static inline void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction dir) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); if (ops->sync_sg_for_device) @@ -428,7 +441,7 @@ static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!ops); if (ops->mmap) return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); @@ -446,7 +459,7 @@ dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!ops); if (ops->get_sgtable) return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, @@ -464,7 +477,7 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); void *cpu_addr; BUG_ON(!ops); @@ -486,7 +499,7 @@ static inline void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!ops); WARN_ON(irqs_disabled()); @@ -544,7 +557,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) #ifndef HAVE_ARCH_DMA_SUPPORTED static inline int dma_supported(struct device *dev, u64 mask) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); if (!ops) return 0; @@ -557,7 +570,7 @@ static inline int dma_supported(struct device *dev, u64 mask) #ifndef HAVE_ARCH_DMA_SET_MASK static inline int dma_set_mask(struct device *dev, u64 mask) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); if (ops->set_dma_mask) return ops->set_dma_mask(dev, mask); diff --git a/include/linux/dmar.h b/include/linux/dmar.h index e9bc9292bd3a..e8ffba1052d3 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -26,7 +26,7 @@ #include <linux/msi.h> #include <linux/irqreturn.h> #include <linux/rwsem.h> -#include <linux/rcupdate.h> +#include <linux/rculist.h> struct acpi_dmar_header; diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h index 698d51a0eea3..c8240a12c42d 100644 --- a/include/linux/elfcore.h +++ b/include/linux/elfcore.h @@ -3,6 +3,8 @@ #include <linux/user.h> #include <linux/bug.h> +#include <linux/sched/task_stack.h> + #include <asm/elf.h> #include <uapi/linux/elfcore.h> diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index cea41a124a80..e2d239ed4c60 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -36,6 +36,12 @@ #define F2FS_NODE_INO(sbi) (sbi->node_ino_num) #define F2FS_META_INO(sbi) (sbi->meta_ino_num) +#define F2FS_IO_SIZE(sbi) (1 << (sbi)->write_io_size_bits) /* Blocks */ +#define F2FS_IO_SIZE_KB(sbi) (1 << ((sbi)->write_io_size_bits + 2)) /* KB */ +#define F2FS_IO_SIZE_BYTES(sbi) (1 << ((sbi)->write_io_size_bits + 12)) /* B */ +#define F2FS_IO_SIZE_BITS(sbi) ((sbi)->write_io_size_bits) /* power of 2 */ +#define F2FS_IO_SIZE_MASK(sbi) (F2FS_IO_SIZE(sbi) - 1) + /* This flag is used by node and meta inodes, and by recovery */ #define GFP_F2FS_ZERO (GFP_NOFS | __GFP_ZERO) #define GFP_F2FS_HIGH_ZERO (GFP_NOFS | __GFP_ZERO | __GFP_HIGHMEM) @@ -108,6 +114,7 @@ struct f2fs_super_block { /* * For checkpoint */ +#define CP_NAT_BITS_FLAG 0x00000080 #define CP_CRC_RECOVERY_FLAG 0x00000040 #define CP_FASTBOOT_FLAG 0x00000020 #define CP_FSCK_FLAG 0x00000010 @@ -272,6 +279,7 @@ struct f2fs_node { * For NAT entries */ #define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry)) +#define NAT_ENTRY_BITMAP_SIZE ((NAT_ENTRY_PER_BLOCK + 7) / 8) struct f2fs_nat_entry { __u8 version; /* latest version of cached nat entry */ diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h index 9f4956d8601c..728d4e0292aa 100644 --- a/include/linux/fault-inject.h +++ b/include/linux/fault-inject.h @@ -61,6 +61,8 @@ static inline struct dentry *fault_create_debugfs_attr(const char *name, #endif /* CONFIG_FAULT_INJECTION */ +struct kmem_cache; + #ifdef CONFIG_FAILSLAB extern bool should_failslab(struct kmem_cache *s, gfp_t gfpflags); #else diff --git a/include/linux/frame.h b/include/linux/frame.h index e6baaba3f1ae..d772c61c31da 100644 --- a/include/linux/frame.h +++ b/include/linux/frame.h @@ -11,7 +11,7 @@ * For more information, see tools/objtool/Documentation/stack-validation.txt. */ #define STACK_FRAME_NON_STANDARD(func) \ - static void __used __section(__func_stack_frame_non_standard) \ + static void __used __section(.discard.func_stack_frame_non_standard) \ *__func_stack_frame_non_standard_##func = func #else /* !CONFIG_STACK_VALIDATION */ diff --git a/include/linux/fs.h b/include/linux/fs.h index c930cbc19342..aad3fd0ff5f8 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -655,6 +655,11 @@ struct inode { void *i_private; /* fs or device private pointer */ }; +static inline unsigned int i_blocksize(const struct inode *node) +{ + return (1 << node->i_blkbits); +} + static inline int inode_unhashed(struct inode *inode) { return hlist_unhashed(&inode->i_hash); @@ -1562,6 +1567,9 @@ extern int vfs_unlink(struct inode *, struct dentry *, struct inode **); extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *, struct inode **, unsigned int); extern int vfs_whiteout(struct inode *, struct dentry *); +extern struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode, + int open_flag); + /* * VFS file helper functions. */ @@ -1701,7 +1709,7 @@ struct inode_operations { int (*rename) (struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); int (*setattr) (struct dentry *, struct iattr *); - int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *); + int (*getattr) (const struct path *, struct kstat *, u32, unsigned int); ssize_t (*listxattr) (struct dentry *, char *, size_t); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len); @@ -1713,6 +1721,29 @@ struct inode_operations { int (*set_acl)(struct inode *, struct posix_acl *, int); } ____cacheline_aligned; +static inline ssize_t call_read_iter(struct file *file, struct kiocb *kio, + struct iov_iter *iter) +{ + return file->f_op->read_iter(kio, iter); +} + +static inline ssize_t call_write_iter(struct file *file, struct kiocb *kio, + struct iov_iter *iter) +{ + return file->f_op->write_iter(kio, iter); +} + +static inline int call_mmap(struct file *file, struct vm_area_struct *vma) +{ + return file->f_op->mmap(file, vma); +} + +static inline int call_fsync(struct file *file, loff_t start, loff_t end, + int datasync) +{ + return file->f_op->fsync(file, start, end, datasync); +} + ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector, unsigned long nr_segs, unsigned long fast_segs, struct iovec *fast_pointer, @@ -1739,19 +1770,6 @@ extern int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff, extern int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same); -static inline int do_clone_file_range(struct file *file_in, loff_t pos_in, - struct file *file_out, loff_t pos_out, - u64 len) -{ - int ret; - - sb_start_write(file_inode(file_out)->i_sb); - ret = vfs_clone_file_range(file_in, pos_in, file_out, pos_out, len); - sb_end_write(file_inode(file_out)->i_sb); - - return ret; -} - struct super_operations { struct inode *(*alloc_inode)(struct super_block *sb); void (*destroy_inode)(struct inode *); @@ -2563,6 +2581,19 @@ static inline void file_end_write(struct file *file) __sb_end_write(file_inode(file)->i_sb, SB_FREEZE_WRITE); } +static inline int do_clone_file_range(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + u64 len) +{ + int ret; + + file_start_write(file_out); + ret = vfs_clone_file_range(file_in, pos_in, file_out, pos_out, len); + file_end_write(file_out); + + return ret; +} + /* * get_write_access() gets write permission for a file. * put_write_access() releases this write permission. @@ -2871,8 +2902,8 @@ extern int page_symlink(struct inode *inode, const char *symname, int len); extern const struct inode_operations page_symlink_inode_operations; extern void kfree_link(void *); extern void generic_fillattr(struct inode *, struct kstat *); -int vfs_getattr_nosec(struct path *path, struct kstat *stat); -extern int vfs_getattr(struct path *, struct kstat *); +extern int vfs_getattr_nosec(const struct path *, struct kstat *, u32, unsigned int); +extern int vfs_getattr(const struct path *, struct kstat *, u32, unsigned int); void __inode_add_bytes(struct inode *inode, loff_t bytes); void inode_add_bytes(struct inode *inode, loff_t bytes); void __inode_sub_bytes(struct inode *inode, loff_t bytes); @@ -2885,10 +2916,29 @@ extern const struct inode_operations simple_symlink_inode_operations; extern int iterate_dir(struct file *, struct dir_context *); -extern int vfs_stat(const char __user *, struct kstat *); -extern int vfs_lstat(const char __user *, struct kstat *); -extern int vfs_fstat(unsigned int, struct kstat *); -extern int vfs_fstatat(int , const char __user *, struct kstat *, int); +extern int vfs_statx(int, const char __user *, int, struct kstat *, u32); +extern int vfs_statx_fd(unsigned int, struct kstat *, u32, unsigned int); + +static inline int vfs_stat(const char __user *filename, struct kstat *stat) +{ + return vfs_statx(AT_FDCWD, filename, 0, stat, STATX_BASIC_STATS); +} +static inline int vfs_lstat(const char __user *name, struct kstat *stat) +{ + return vfs_statx(AT_FDCWD, name, AT_SYMLINK_NOFOLLOW, + stat, STATX_BASIC_STATS); +} +static inline int vfs_fstatat(int dfd, const char __user *filename, + struct kstat *stat, int flags) +{ + return vfs_statx(dfd, filename, flags, stat, STATX_BASIC_STATS); +} +static inline int vfs_fstat(int fd, struct kstat *stat) +{ + return vfs_statx_fd(fd, stat, STATX_BASIC_STATS, 0); +} + + extern const char *vfs_get_link(struct dentry *, struct delayed_call *); extern int vfs_readlink(struct dentry *, char __user *, int); @@ -2918,7 +2968,7 @@ extern int dcache_dir_close(struct inode *, struct file *); extern loff_t dcache_dir_lseek(struct file *, loff_t, int); extern int dcache_readdir(struct file *, struct dir_context *); extern int simple_setattr(struct dentry *, struct iattr *); -extern int simple_getattr(struct vfsmount *, struct dentry *, struct kstat *); +extern int simple_getattr(const struct path *, struct kstat *, u32, unsigned int); extern int simple_statfs(struct dentry *, struct kstatfs *); extern int simple_open(struct inode *inode, struct file *file); extern int simple_link(struct dentry *, struct inode *, struct dentry *); diff --git a/include/linux/fsl-diu-fb.h b/include/linux/fsl-diu-fb.h index a1e8277120c7..c46eab5bc893 100644 --- a/include/linux/fsl-diu-fb.h +++ b/include/linux/fsl-diu-fb.h @@ -73,7 +73,7 @@ struct diu_ad { /* Word 0(32-bit) in DDR memory */ /* __u16 comp; */ /* __u16 pixel_s:2; */ -/* __u16 pallete:1; */ +/* __u16 palette:1; */ /* __u16 red_c:2; */ /* __u16 green_c:2; */ /* __u16 blue_c:2; */ @@ -142,7 +142,7 @@ struct diu_ad { struct diu { __be32 desc[3]; __be32 gamma; - __be32 pallete; + __be32 palette; __be32 cursor; __be32 curs_pos; __be32 diu_mode; diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 487246546ebe..e6e689b5569e 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -16,6 +16,7 @@ #include <linux/spinlock.h> #include <linux/types.h> #include <linux/atomic.h> +#include <linux/user_namespace.h> /* * IN_* from inotfy.h lines up EXACTLY with FS_*, this is so we can easily @@ -170,7 +171,7 @@ struct fsnotify_group { struct inotify_group_private_data { spinlock_t idr_lock; struct idr idr; - struct user_struct *user; + struct ucounts *ucounts; } inotify_data; #endif #ifdef CONFIG_FANOTIFY diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 0fe0b6295ab5..db373b9d3223 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -541,7 +541,7 @@ static inline bool pm_suspended_storage(void) #if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA) /* The below functions must be run on a range from a single zone. */ extern int alloc_contig_range(unsigned long start, unsigned long end, - unsigned migratetype); + unsigned migratetype, gfp_t gfp_mask); extern void free_contig_range(unsigned long pfn, unsigned nr_pages); #endif diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index fb0fde686cb1..2484b2fcc6eb 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -135,10 +135,24 @@ int desc_to_gpio(const struct gpio_desc *desc); struct fwnode_handle; struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode, - const char *propname); -struct gpio_desc *devm_get_gpiod_from_child(struct device *dev, - const char *con_id, - struct fwnode_handle *child); + const char *propname, int index, + enum gpiod_flags dflags, + const char *label); +struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev, + const char *con_id, int index, + struct fwnode_handle *child, + enum gpiod_flags flags, + const char *label); +/* FIXME: delete this helper when users are switched over */ +static inline struct gpio_desc *devm_get_gpiod_from_child(struct device *dev, + const char *con_id, struct fwnode_handle *child) +{ + return devm_fwnode_get_index_gpiod_from_child(dev, con_id, + 0, child, + GPIOD_ASIS, + "?"); +} + #else /* CONFIG_GPIOLIB */ static inline int gpiod_count(struct device *dev, const char *con_id) @@ -411,20 +425,45 @@ static inline int desc_to_gpio(const struct gpio_desc *desc) /* Child properties interface */ struct fwnode_handle; -static inline struct gpio_desc *fwnode_get_named_gpiod( - struct fwnode_handle *fwnode, const char *propname) +static inline +struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode, + const char *propname, int index, + enum gpiod_flags dflags, + const char *label) { return ERR_PTR(-ENOSYS); } -static inline struct gpio_desc *devm_get_gpiod_from_child( - struct device *dev, const char *con_id, struct fwnode_handle *child) +static inline +struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev, + const char *con_id, int index, + struct fwnode_handle *child, + enum gpiod_flags flags, + const char *label) +{ + return ERR_PTR(-ENOSYS); +} + +/* FIXME: delete this when all users are switched over */ +static inline struct gpio_desc *devm_get_gpiod_from_child(struct device *dev, + const char *con_id, struct fwnode_handle *child) { return ERR_PTR(-ENOSYS); } #endif /* CONFIG_GPIOLIB */ +static inline +struct gpio_desc *devm_fwnode_get_gpiod_from_child(struct device *dev, + const char *con_id, + struct fwnode_handle *child, + enum gpiod_flags flags, + const char *label) +{ + return devm_fwnode_get_index_gpiod_from_child(dev, con_id, 0, child, + flags, label); +} + #if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS) int gpiod_export(struct gpio_desc *desc, bool direction_may_change); diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index e52b427223ba..249e579ecd4c 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -19,7 +19,6 @@ #include <linux/ktime.h> #include <linux/init.h> #include <linux/list.h> -#include <linux/wait.h> #include <linux/percpu.h> #include <linux/timer.h> #include <linux/timerqueue.h> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 97e478d6b690..a3762d49ba39 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -6,6 +6,18 @@ extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, struct vm_area_struct *vma); extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd); +extern int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, + pud_t *dst_pud, pud_t *src_pud, unsigned long addr, + struct vm_area_struct *vma); + +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD +extern void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud); +#else +static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) +{ +} +#endif + extern int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd); extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, unsigned long addr, @@ -17,6 +29,9 @@ extern bool madvise_free_huge_pmd(struct mmu_gather *tlb, extern int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr); +extern int zap_huge_pud(struct mmu_gather *tlb, + struct vm_area_struct *vma, + pud_t *pud, unsigned long addr); extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, unsigned char *vec); @@ -26,13 +41,16 @@ extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, pgprot_t newprot, int prot_numa); -int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *, - pfn_t pfn, bool write); +int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, + pmd_t *pmd, pfn_t pfn, bool write); +int vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, + pud_t *pud, pfn_t pfn, bool write); enum transparent_hugepage_flag { TRANSPARENT_HUGEPAGE_FLAG, TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, + TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG, TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG, @@ -57,13 +75,14 @@ extern struct kobj_attribute shmem_enabled_attr; #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) #ifdef CONFIG_TRANSPARENT_HUGEPAGE -struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, - pmd_t *pmd, int flags); - #define HPAGE_PMD_SHIFT PMD_SHIFT #define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT) #define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1)) +#define HPAGE_PUD_SHIFT PUD_SHIFT +#define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT) +#define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1)) + extern bool is_vma_temporary_stack(struct vm_area_struct *vma); #define transparent_hugepage_enabled(__vma) \ @@ -117,6 +136,17 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, bool freeze, struct page *page); +void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, + unsigned long address); + +#define split_huge_pud(__vma, __pud, __address) \ + do { \ + pud_t *____pud = (__pud); \ + if (pud_trans_huge(*____pud) \ + || pud_devmap(*____pud)) \ + __split_huge_pud(__vma, __pud, __address); \ + } while (0) + extern int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags, int advice); extern void vma_adjust_trans_huge(struct vm_area_struct *vma, @@ -125,6 +155,8 @@ extern void vma_adjust_trans_huge(struct vm_area_struct *vma, long adjust_next); extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma); +extern spinlock_t *__pud_trans_huge_lock(pud_t *pud, + struct vm_area_struct *vma); /* mmap_sem must be held on entry */ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) @@ -135,6 +167,15 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, else return NULL; } +static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, + struct vm_area_struct *vma) +{ + VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); + if (pud_trans_huge(*pud) || pud_devmap(*pud)) + return __pud_trans_huge_lock(pud, vma); + else + return NULL; +} static inline int hpage_nr_pages(struct page *page) { if (unlikely(PageTransHuge(page))) @@ -142,6 +183,11 @@ static inline int hpage_nr_pages(struct page *page) return 1; } +struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, + pmd_t *pmd, int flags); +struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, + pud_t *pud, int flags); + extern int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd); extern struct page *huge_zero_page; @@ -156,6 +202,11 @@ static inline bool is_huge_zero_pmd(pmd_t pmd) return is_huge_zero_page(pmd_page(pmd)); } +static inline bool is_huge_zero_pud(pud_t pud) +{ + return false; +} + struct page *mm_get_huge_zero_page(struct mm_struct *mm); void mm_put_huge_zero_page(struct mm_struct *mm); @@ -166,6 +217,10 @@ void mm_put_huge_zero_page(struct mm_struct *mm); #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; }) +#define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; }) +#define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; }) +#define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; }) + #define hpage_nr_pages(x) 1 #define transparent_hugepage_enabled(__vma) 0 @@ -194,6 +249,9 @@ static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, static inline void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, bool freeze, struct page *page) {} +#define split_huge_pud(__vma, __pmd, __address) \ + do { } while (0) + static inline int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags, int advice) { @@ -211,6 +269,11 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, { return NULL; } +static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, + struct vm_area_struct *vma) +{ + return NULL; +} static inline int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd) { @@ -222,6 +285,11 @@ static inline bool is_huge_zero_page(struct page *page) return false; } +static inline bool is_huge_zero_pud(pud_t pud) +{ + return false; +} + static inline void mm_put_huge_zero_page(struct mm_struct *mm) { return; @@ -232,6 +300,12 @@ static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma, { return NULL; } + +static inline struct page *follow_devmap_pud(struct vm_area_struct *vma, + unsigned long addr, pud_t *pud, int flags) +{ + return NULL; +} #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* _LINUX_HUGE_MM_H */ diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 48c76d612d40..503099d8aada 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -65,7 +65,8 @@ int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, - unsigned long *, unsigned long *, long, unsigned int); + unsigned long *, unsigned long *, long, unsigned int, + int *); void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long, struct page *); void __unmap_hugepage_range_final(struct mmu_gather *tlb, @@ -81,6 +82,11 @@ void hugetlb_show_meminfo(void); unsigned long hugetlb_total_pages(void); int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags); +int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte, + struct vm_area_struct *dst_vma, + unsigned long dst_addr, + unsigned long src_addr, + struct page **pagep); int hugetlb_reserve_pages(struct inode *inode, long from, long to, struct vm_area_struct *vma, vm_flags_t vm_flags); @@ -131,7 +137,7 @@ static inline unsigned long hugetlb_total_pages(void) return 0; } -#define follow_hugetlb_page(m,v,p,vs,a,b,i,w) ({ BUG(); 0; }) +#define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; }) #define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL) #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; }) static inline void hugetlb_report_meminfo(struct seq_file *m) @@ -149,6 +155,8 @@ static inline void hugetlb_show_meminfo(void) #define is_hugepage_only_range(mm, addr, len) 0 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; }) #define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; }) +#define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \ + src_addr, pagep) ({ BUG(); 0; }) #define huge_pte_offset(mm, address) 0 static inline int dequeue_hwpoisoned_huge_page(struct page *page) { diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 7b23a3316dcb..6b183521c616 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -30,6 +30,7 @@ #include <linux/device.h> /* for struct device */ #include <linux/sched.h> /* for completion */ #include <linux/mutex.h> +#include <linux/rtmutex.h> #include <linux/irqdomain.h> /* for Host Notify IRQ */ #include <linux/of.h> /* for struct device_node */ #include <linux/swab.h> /* for swab16 */ @@ -283,6 +284,7 @@ enum i2c_slave_event { extern int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb); extern int i2c_slave_unregister(struct i2c_client *client); +extern bool i2c_detect_slave_mode(struct device *dev); static inline int i2c_slave_event(struct i2c_client *client, enum i2c_slave_event event, u8 *val) diff --git a/include/linux/i2c/mpr121_touchkey.h b/include/linux/i2c/mpr121_touchkey.h deleted file mode 100644 index f0bcc38bbb97..000000000000 --- a/include/linux/i2c/mpr121_touchkey.h +++ /dev/null @@ -1,20 +0,0 @@ -/* Header file for Freescale MPR121 Capacitive Touch Sensor */ - -#ifndef _MPR121_TOUCHKEY_H -#define _MPR121_TOUCHKEY_H - -/** - * struct mpr121_platform_data - platform data for mpr121 sensor - * @keymap: pointer to array of KEY_* values representing keymap - * @keymap_size: size of the keymap - * @wakeup: configure the button as a wake-up source - * @vdd_uv: VDD voltage in uV - */ -struct mpr121_platform_data { - const unsigned short *keymap; - unsigned int keymap_size; - bool wakeup; - int vdd_uv; -}; - -#endif /* _MPR121_TOUCHKEY_H */ diff --git a/include/linux/idr.h b/include/linux/idr.h index 3c01b89aed67..bf70b3ef0a07 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -12,47 +12,29 @@ #ifndef __IDR_H__ #define __IDR_H__ -#include <linux/types.h> -#include <linux/bitops.h> -#include <linux/init.h> -#include <linux/rcupdate.h> +#include <linux/radix-tree.h> +#include <linux/gfp.h> +#include <linux/percpu.h> + +struct idr { + struct radix_tree_root idr_rt; + unsigned int idr_next; +}; /* - * Using 6 bits at each layer allows us to allocate 7 layers out of each page. - * 8 bits only gave us 3 layers out of every pair of pages, which is less - * efficient except for trees with a largest element between 192-255 inclusive. + * The IDR API does not expose the tagging functionality of the radix tree + * to users. Use tag 0 to track whether a node has free space below it. */ -#define IDR_BITS 6 -#define IDR_SIZE (1 << IDR_BITS) -#define IDR_MASK ((1 << IDR_BITS)-1) - -struct idr_layer { - int prefix; /* the ID prefix of this idr_layer */ - int layer; /* distance from leaf */ - struct idr_layer __rcu *ary[1<<IDR_BITS]; - int count; /* When zero, we can release it */ - union { - /* A zero bit means "space here" */ - DECLARE_BITMAP(bitmap, IDR_SIZE); - struct rcu_head rcu_head; - }; -}; +#define IDR_FREE 0 -struct idr { - struct idr_layer __rcu *hint; /* the last layer allocated from */ - struct idr_layer __rcu *top; - int layers; /* only valid w/o concurrent changes */ - int cur; /* current pos for cyclic allocation */ - spinlock_t lock; - int id_free_cnt; - struct idr_layer *id_free; -}; +/* Set the IDR flag and the IDR_FREE tag */ +#define IDR_RT_MARKER ((__force gfp_t)(3 << __GFP_BITS_SHIFT)) -#define IDR_INIT(name) \ +#define IDR_INIT \ { \ - .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ + .idr_rt = RADIX_TREE_INIT(IDR_RT_MARKER) \ } -#define DEFINE_IDR(name) struct idr name = IDR_INIT(name) +#define DEFINE_IDR(name) struct idr name = IDR_INIT /** * idr_get_cursor - Return the current position of the cyclic allocator @@ -62,9 +44,9 @@ struct idr { * idr_alloc_cyclic() if it is free (otherwise the search will start from * this position). */ -static inline unsigned int idr_get_cursor(struct idr *idr) +static inline unsigned int idr_get_cursor(const struct idr *idr) { - return READ_ONCE(idr->cur); + return READ_ONCE(idr->idr_next); } /** @@ -77,7 +59,7 @@ static inline unsigned int idr_get_cursor(struct idr *idr) */ static inline void idr_set_cursor(struct idr *idr, unsigned int val) { - WRITE_ONCE(idr->cur, val); + WRITE_ONCE(idr->idr_next, val); } /** @@ -97,22 +79,31 @@ static inline void idr_set_cursor(struct idr *idr, unsigned int val) * period). */ -/* - * This is what we export. - */ - -void *idr_find_slowpath(struct idr *idp, int id); void idr_preload(gfp_t gfp_mask); -int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask); -int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask); -int idr_for_each(struct idr *idp, +int idr_alloc(struct idr *, void *entry, int start, int end, gfp_t); +int idr_alloc_cyclic(struct idr *, void *entry, int start, int end, gfp_t); +int idr_for_each(const struct idr *, int (*fn)(int id, void *p, void *data), void *data); -void *idr_get_next(struct idr *idp, int *nextid); -void *idr_replace(struct idr *idp, void *ptr, int id); -void idr_remove(struct idr *idp, int id); -void idr_destroy(struct idr *idp); -void idr_init(struct idr *idp); -bool idr_is_empty(struct idr *idp); +void *idr_get_next(struct idr *, int *nextid); +void *idr_replace(struct idr *, void *, int id); +void idr_destroy(struct idr *); + +static inline void *idr_remove(struct idr *idr, int id) +{ + return radix_tree_delete_item(&idr->idr_rt, id, NULL); +} + +static inline void idr_init(struct idr *idr) +{ + INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER); + idr->idr_next = 0; +} + +static inline bool idr_is_empty(const struct idr *idr) +{ + return radix_tree_empty(&idr->idr_rt) && + radix_tree_tagged(&idr->idr_rt, IDR_FREE); +} /** * idr_preload_end - end preload section started with idr_preload() @@ -137,19 +128,14 @@ static inline void idr_preload_end(void) * This function can be called under rcu_read_lock(), given that the leaf * pointers lifetimes are correctly managed. */ -static inline void *idr_find(struct idr *idr, int id) +static inline void *idr_find(const struct idr *idr, int id) { - struct idr_layer *hint = rcu_dereference_raw(idr->hint); - - if (hint && (id & ~IDR_MASK) == hint->prefix) - return rcu_dereference_raw(hint->ary[id & IDR_MASK]); - - return idr_find_slowpath(idr, id); + return radix_tree_lookup(&idr->idr_rt, id); } /** * idr_for_each_entry - iterate over an idr's elements of a given type - * @idp: idr handle + * @idr: idr handle * @entry: the type * to use as cursor * @id: id entry's key * @@ -157,57 +143,60 @@ static inline void *idr_find(struct idr *idr, int id) * after normal terminatinon @entry is left with the value NULL. This * is convenient for a "not found" value. */ -#define idr_for_each_entry(idp, entry, id) \ - for (id = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++id) +#define idr_for_each_entry(idr, entry, id) \ + for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; ++id) /** - * idr_for_each_entry - continue iteration over an idr's elements of a given type - * @idp: idr handle + * idr_for_each_entry_continue - continue iteration over an idr's elements of a given type + * @idr: idr handle * @entry: the type * to use as cursor * @id: id entry's key * * Continue to iterate over list of given type, continuing after * the current position. */ -#define idr_for_each_entry_continue(idp, entry, id) \ - for ((entry) = idr_get_next((idp), &(id)); \ +#define idr_for_each_entry_continue(idr, entry, id) \ + for ((entry) = idr_get_next((idr), &(id)); \ entry; \ - ++id, (entry) = idr_get_next((idp), &(id))) + ++id, (entry) = idr_get_next((idr), &(id))) /* * IDA - IDR based id allocator, use when translation from id to * pointer isn't necessary. - * - * IDA_BITMAP_LONGS is calculated to be one less to accommodate - * ida_bitmap->nr_busy so that the whole struct fits in 128 bytes. */ #define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */ -#define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long) - 1) +#define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long)) #define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8) struct ida_bitmap { - long nr_busy; unsigned long bitmap[IDA_BITMAP_LONGS]; }; +DECLARE_PER_CPU(struct ida_bitmap *, ida_bitmap); + struct ida { - struct idr idr; - struct ida_bitmap *free_bitmap; + struct radix_tree_root ida_rt; }; -#define IDA_INIT(name) { .idr = IDR_INIT((name).idr), .free_bitmap = NULL, } -#define DEFINE_IDA(name) struct ida name = IDA_INIT(name) +#define IDA_INIT { \ + .ida_rt = RADIX_TREE_INIT(IDR_RT_MARKER | GFP_NOWAIT), \ +} +#define DEFINE_IDA(name) struct ida name = IDA_INIT int ida_pre_get(struct ida *ida, gfp_t gfp_mask); int ida_get_new_above(struct ida *ida, int starting_id, int *p_id); void ida_remove(struct ida *ida, int id); void ida_destroy(struct ida *ida); -void ida_init(struct ida *ida); int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, gfp_t gfp_mask); void ida_simple_remove(struct ida *ida, unsigned int id); +static inline void ida_init(struct ida *ida) +{ + INIT_RADIX_TREE(&ida->ida_rt, IDR_RT_MARKER | GFP_NOWAIT); +} + /** * ida_get_new - allocate new ID * @ida: idr handle @@ -220,11 +209,8 @@ static inline int ida_get_new(struct ida *ida, int *p_id) return ida_get_new_above(ida, 0, p_id); } -static inline bool ida_is_empty(struct ida *ida) +static inline bool ida_is_empty(const struct ida *ida) { - return idr_is_empty(&ida->idr); + return radix_tree_empty(&ida->ida_rt); } - -void __init idr_init_cache(void); - #endif /* __IDR_H__ */ diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 3a85d61f7614..91d9049f0039 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -12,8 +12,10 @@ #include <linux/securebits.h> #include <linux/seqlock.h> #include <linux/rbtree.h> +#include <linux/sched/autogroup.h> #include <net/net_namespace.h> #include <linux/sched/rt.h> +#include <linux/mm_types.h> #include <asm/thread_info.h> @@ -149,8 +151,6 @@ extern struct group_info init_groups; extern struct cred init_cred; -extern struct task_group root_task_group; - #ifdef CONFIG_CGROUP_SCHED # define INIT_CGROUP_SCHED(tsk) \ .sched_task_group = &root_task_group, diff --git a/include/linux/input/matrix_keypad.h b/include/linux/input/matrix_keypad.h index 27e06acc509a..37b04a0fdea4 100644 --- a/include/linux/input/matrix_keypad.h +++ b/include/linux/input/matrix_keypad.h @@ -80,24 +80,9 @@ int matrix_keypad_build_keymap(const struct matrix_keymap_data *keymap_data, unsigned int rows, unsigned int cols, unsigned short *keymap, struct input_dev *input_dev); +int matrix_keypad_parse_properties(struct device *dev, + unsigned int *rows, unsigned int *cols); -#ifdef CONFIG_OF -/** - * matrix_keypad_parse_of_params() - Read parameters from matrix-keypad node - * - * @dev: Device containing of_node - * @rows: Returns number of matrix rows - * @cols: Returns number of matrix columns - * @return 0 if OK, <0 on error - */ -int matrix_keypad_parse_of_params(struct device *dev, - unsigned int *rows, unsigned int *cols); -#else -static inline int matrix_keypad_parse_of_params(struct device *dev, - unsigned int *rows, unsigned int *cols) -{ - return -ENOSYS; -} -#endif /* CONFIG_OF */ +#define matrix_keypad_parse_of_params matrix_keypad_parse_properties #endif /* _MATRIX_KEYPAD_H */ diff --git a/include/linux/input/tca8418_keypad.h b/include/linux/input/tca8418_keypad.h deleted file mode 100644 index e71a85dc2cbd..000000000000 --- a/include/linux/input/tca8418_keypad.h +++ /dev/null @@ -1,44 +0,0 @@ -/* - * TCA8418 keypad platform support - * - * Copyright (C) 2011 Fuel7, Inc. All rights reserved. - * - * Author: Kyle Manna <kyle.manna@fuel7.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public - * License v2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public - * License along with this program; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 021110-1307, USA. - * - * If you can't comply with GPLv2, alternative licensing terms may be - * arranged. Please contact Fuel7, Inc. (http://fuel7.com/) for proprietary - * alternative licensing inquiries. - */ - -#ifndef _TCA8418_KEYPAD_H -#define _TCA8418_KEYPAD_H - -#include <linux/types.h> -#include <linux/input/matrix_keypad.h> - -#define TCA8418_I2C_ADDR 0x34 -#define TCA8418_NAME "tca8418_keypad" - -struct tca8418_keypad_platform_data { - const struct matrix_keymap_data *keymap_data; - unsigned rows; - unsigned cols; - bool rep; - bool irq_is_gpio; -}; - -#endif diff --git a/include/linux/iomap.h b/include/linux/iomap.h index 891459caa278..7291810067eb 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -79,8 +79,7 @@ int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, const struct iomap_ops *ops); int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, const struct iomap_ops *ops); -int iomap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, - const struct iomap_ops *ops); +int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops); int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, loff_t start, loff_t len, const struct iomap_ops *ops); diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h index 1c30014ed176..d29e1e21bf3f 100644 --- a/include/linux/iopoll.h +++ b/include/linux/iopoll.h @@ -17,7 +17,7 @@ #include <linux/kernel.h> #include <linux/types.h> -#include <linux/hrtimer.h> +#include <linux/ktime.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/io.h> diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h index 78c5d5ae3857..f1045b2c6a00 100644 --- a/include/linux/ipmi.h +++ b/include/linux/ipmi.h @@ -100,7 +100,7 @@ struct ipmi_user_hndl { /* Create a new user of the IPMI layer on the given interface number. */ int ipmi_create_user(unsigned int if_num, - struct ipmi_user_hndl *handler, + const struct ipmi_user_hndl *handler, void *handler_data, ipmi_user_t *user); diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index b63d6b7b0db0..8e06d758ee48 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -89,11 +89,17 @@ extern bool static_key_initialized; struct static_key { atomic_t enabled; -/* Set lsb bit to 1 if branch is default true, 0 ot */ - struct jump_entry *entries; -#ifdef CONFIG_MODULES - struct static_key_mod *next; -#endif +/* + * bit 0 => 1 if key is initially true + * 0 if initially false + * bit 1 => 1 if points to struct static_key_mod + * 0 if points to struct jump_entry + */ + union { + unsigned long type; + struct jump_entry *entries; + struct static_key_mod *next; + }; }; #else @@ -118,9 +124,10 @@ struct module; #ifdef HAVE_JUMP_LABEL -#define JUMP_TYPE_FALSE 0UL -#define JUMP_TYPE_TRUE 1UL -#define JUMP_TYPE_MASK 1UL +#define JUMP_TYPE_FALSE 0UL +#define JUMP_TYPE_TRUE 1UL +#define JUMP_TYPE_LINKED 2UL +#define JUMP_TYPE_MASK 3UL static __always_inline bool static_key_false(struct static_key *key) { diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 820c0ad54a01..ceb3fe78a0d3 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -1,7 +1,6 @@ #ifndef _LINUX_KASAN_H #define _LINUX_KASAN_H -#include <linux/sched.h> #include <linux/types.h> struct kmem_cache; @@ -30,16 +29,10 @@ static inline void *kasan_mem_to_shadow(const void *addr) } /* Enable reporting bugs after kasan_disable_current() */ -static inline void kasan_enable_current(void) -{ - current->kasan_depth++; -} +extern void kasan_enable_current(void); /* Disable reporting bugs for current task */ -static inline void kasan_disable_current(void) -{ - current->kasan_depth--; -} +extern void kasan_disable_current(void); void kasan_unpoison_shadow(const void *address, size_t size); @@ -52,7 +45,7 @@ void kasan_free_pages(struct page *page, unsigned int order); void kasan_cache_create(struct kmem_cache *cache, size_t *size, unsigned long *flags); void kasan_cache_shrink(struct kmem_cache *cache); -void kasan_cache_destroy(struct kmem_cache *cache); +void kasan_cache_shutdown(struct kmem_cache *cache); void kasan_poison_slab(struct page *page); void kasan_unpoison_object_data(struct kmem_cache *cache, void *object); @@ -98,7 +91,7 @@ static inline void kasan_cache_create(struct kmem_cache *cache, size_t *size, unsigned long *flags) {} static inline void kasan_cache_shrink(struct kmem_cache *cache) {} -static inline void kasan_cache_destroy(struct kmem_cache *cache) {} +static inline void kasan_cache_shutdown(struct kmem_cache *cache) {} static inline void kasan_poison_slab(struct page *page) {} static inline void kasan_unpoison_object_data(struct kmem_cache *cache, diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h index 8f2e059e4d45..4d748603e818 100644 --- a/include/linux/kconfig.h +++ b/include/linux/kconfig.h @@ -8,7 +8,7 @@ /* * The use of "&&" / "||" is limited in certain expressions. - * The followings enable to calculate "and" / "or" with macro expansion only. + * The following enable to calculate "and" / "or" with macro expansion only. */ #define __and(x, y) ___and(x, y) #define ___and(x, y) ____and(__ARG_PLACEHOLDER_##x, y) diff --git a/include/linux/kernel.h b/include/linux/kernel.h index cb09238f6d32..4c26dc3a8295 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -100,16 +100,18 @@ ) /* - * Divide positive or negative dividend by positive divisor and round - * to closest integer. Result is undefined for negative divisors and - * for negative dividends if the divisor variable type is unsigned. + * Divide positive or negative dividend by positive or negative divisor + * and round to closest integer. Result is undefined for negative + * divisors if he dividend variable type is unsigned and for negative + * dividends if the divisor variable type is unsigned. */ #define DIV_ROUND_CLOSEST(x, divisor)( \ { \ typeof(x) __x = x; \ typeof(divisor) __d = divisor; \ (((typeof(x))-1) > 0 || \ - ((typeof(divisor))-1) > 0 || (__x) > 0) ? \ + ((typeof(divisor))-1) > 0 || \ + (((__x) > 0) == ((__d) > 0))) ? \ (((__x) + ((__d) / 2)) / (__d)) : \ (((__x) - ((__d) / 2)) / (__d)); \ } \ diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 7056238fd9f5..a9b11b8d06f2 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -46,6 +46,7 @@ enum kernfs_node_flag { KERNFS_SUICIDAL = 0x0400, KERNFS_SUICIDED = 0x0800, KERNFS_EMPTY_DIR = 0x1000, + KERNFS_HAS_RELEASE = 0x2000, }; /* @flags for kernfs_create_root() */ @@ -175,6 +176,7 @@ struct kernfs_open_file { /* published fields */ struct kernfs_node *kn; struct file *file; + struct seq_file *seq_file; void *priv; /* private fields, do not use outside kernfs proper */ @@ -185,12 +187,20 @@ struct kernfs_open_file { char *prealloc_buf; size_t atomic_write_len; - bool mmapped; + bool mmapped:1; + bool released:1; const struct vm_operations_struct *vm_ops; }; struct kernfs_ops { /* + * Optional open/release methods. Both are called with + * @of->seq_file populated. + */ + int (*open)(struct kernfs_open_file *of); + void (*release)(struct kernfs_open_file *of); + + /* * Read is handled by either seq_file or raw_read(). * * If seq_show() is present, seq_file path is active. Other seq diff --git a/include/linux/key.h b/include/linux/key.h index 722914798f37..e45212f2777e 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -354,7 +354,10 @@ static inline bool key_is_instantiated(const struct key *key) !test_bit(KEY_FLAG_NEGATIVE, &key->flags); } -#define rcu_dereference_key(KEY) \ +#define dereference_key_rcu(KEY) \ + (rcu_dereference((KEY)->payload.rcu_data0)) + +#define dereference_key_locked(KEY) \ (rcu_dereference_protected((KEY)->payload.rcu_data0, \ rwsem_is_locked(&((struct key *)(KEY))->sem))) diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h index 1e032a1ddb3e..5d9a400af509 100644 --- a/include/linux/khugepaged.h +++ b/include/linux/khugepaged.h @@ -1,7 +1,8 @@ #ifndef _LINUX_KHUGEPAGED_H #define _LINUX_KHUGEPAGED_H -#include <linux/sched.h> /* MMF_VM_HUGEPAGE */ +#include <linux/sched/coredump.h> /* MMF_VM_HUGEPAGE */ + #ifdef CONFIG_TRANSPARENT_HUGEPAGE extern struct attribute_group khugepaged_attr_group; diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 16ddfb8b304a..c328e4f7dcad 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -29,7 +29,7 @@ * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi * <prasanna@in.ibm.com> added function-return probes. */ -#include <linux/compiler.h> /* for __kprobes */ +#include <linux/compiler.h> #include <linux/linkage.h> #include <linux/list.h> #include <linux/notifier.h> @@ -40,9 +40,9 @@ #include <linux/rcupdate.h> #include <linux/mutex.h> #include <linux/ftrace.h> +#include <asm/kprobes.h> #ifdef CONFIG_KPROBES -#include <asm/kprobes.h> /* kprobe_status settings */ #define KPROBE_HIT_ACTIVE 0x00000001 @@ -51,6 +51,7 @@ #define KPROBE_HIT_SSDONE 0x00000008 #else /* CONFIG_KPROBES */ +#include <asm-generic/kprobes.h> typedef int kprobe_opcode_t; struct arch_specific_insn { int dummy; @@ -509,18 +510,4 @@ static inline bool is_kprobe_optinsn_slot(unsigned long addr) } #endif -#ifdef CONFIG_KPROBES -/* - * Blacklist ganerating macro. Specify functions which is not probed - * by using this macro. - */ -#define __NOKPROBE_SYMBOL(fname) \ -static unsigned long __used \ - __attribute__((section("_kprobe_blacklist"))) \ - _kbl_addr_##fname = (unsigned long)fname; -#define NOKPROBE_SYMBOL(fname) __NOKPROBE_SYMBOL(fname) -#else -#define NOKPROBE_SYMBOL(fname) -#endif - #endif /* _LINUX_KPROBES_H */ diff --git a/include/linux/ksm.h b/include/linux/ksm.h index 481c8c4627ca..e1cfda4bee58 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -12,6 +12,7 @@ #include <linux/pagemap.h> #include <linux/rmap.h> #include <linux/sched.h> +#include <linux/sched/coredump.h> struct stable_node; struct mem_cgroup; diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index 8458c5351e56..77e7af32543f 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -70,6 +70,8 @@ struct nd_cmd_desc { struct nd_interleave_set { u64 cookie; + /* compatibility with initial buggy Linux implementation */ + u64 altcookie; }; struct nd_mapping_desc { diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index c15373894a42..b37dee3acaba 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h @@ -355,7 +355,8 @@ static inline int nlm_privileged_requester(const struct svc_rqst *rqstp) static inline int nlm_compare_locks(const struct file_lock *fl1, const struct file_lock *fl2) { - return fl1->fl_pid == fl2->fl_pid + return file_inode(fl1->fl_file) == file_inode(fl2->fl_file) + && fl1->fl_pid == fl2->fl_pid && fl1->fl_owner == fl2->fl_owner && fl1->fl_start == fl2->fl_start && fl1->fl_end == fl2->fl_end diff --git a/include/linux/log2.h b/include/linux/log2.h index ef3d4f67118c..c373295f359f 100644 --- a/include/linux/log2.h +++ b/include/linux/log2.h @@ -16,12 +16,6 @@ #include <linux/bitops.h> /* - * deal with unrepresentable constant logarithms - */ -extern __attribute__((const, noreturn)) -int ____ilog2_NaN(void); - -/* * non-constant log of base 2 calculators * - the arch may override these in asm/bitops.h if they can be implemented * more efficiently than using fls() and fls64() @@ -85,7 +79,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) #define ilog2(n) \ ( \ __builtin_constant_p(n) ? ( \ - (n) < 1 ? ____ilog2_NaN() : \ + (n) < 2 ? 0 : \ (n) & (1ULL << 63) ? 63 : \ (n) & (1ULL << 62) ? 62 : \ (n) & (1ULL << 61) ? 61 : \ @@ -148,10 +142,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) (n) & (1ULL << 4) ? 4 : \ (n) & (1ULL << 3) ? 3 : \ (n) & (1ULL << 2) ? 2 : \ - (n) & (1ULL << 1) ? 1 : \ - (n) & (1ULL << 0) ? 0 : \ - ____ilog2_NaN() \ - ) : \ + 1 ) : \ (sizeof(n) <= 4) ? \ __ilog2_u32(n) : \ __ilog2_u64(n) \ diff --git a/include/linux/lz4.h b/include/linux/lz4.h index 6b784c59f321..394e3d9213b8 100644 --- a/include/linux/lz4.h +++ b/include/linux/lz4.h @@ -1,87 +1,648 @@ -#ifndef __LZ4_H__ -#define __LZ4_H__ -/* - * LZ4 Kernel Interface +/* LZ4 Kernel Interface * * Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com> + * Copyright (C) 2016, Sven Schmidt <4sschmid@informatik.uni-hamburg.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. + * + * This file is based on the original header file + * for LZ4 - Fast LZ compression algorithm. + * + * LZ4 - Fast LZ compression algorithm + * Copyright (C) 2011-2016, Yann Collet. + * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * You can contact the author at : + * - LZ4 homepage : http://www.lz4.org + * - LZ4 source repository : https://github.com/lz4/lz4 */ -#define LZ4_MEM_COMPRESS (16384) -#define LZ4HC_MEM_COMPRESS (262144 + (2 * sizeof(unsigned char *))) +#ifndef __LZ4_H__ +#define __LZ4_H__ + +#include <linux/types.h> +#include <linux/string.h> /* memset, memcpy */ + +/*-************************************************************************ + * CONSTANTS + **************************************************************************/ /* - * lz4_compressbound() - * Provides the maximum size that LZ4 may output in a "worst case" scenario - * (input data not compressible) + * LZ4_MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes + * (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ -static inline size_t lz4_compressbound(size_t isize) -{ - return isize + (isize / 255) + 16; -} +#define LZ4_MEMORY_USAGE 14 + +#define LZ4_MAX_INPUT_SIZE 0x7E000000 /* 2 113 929 216 bytes */ +#define LZ4_COMPRESSBOUND(isize) (\ + (unsigned int)(isize) > (unsigned int)LZ4_MAX_INPUT_SIZE \ + ? 0 \ + : (isize) + ((isize)/255) + 16) + +#define LZ4_ACCELERATION_DEFAULT 1 +#define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2) +#define LZ4_HASHTABLESIZE (1 << LZ4_MEMORY_USAGE) +#define LZ4_HASH_SIZE_U32 (1 << LZ4_HASHLOG) + +#define LZ4HC_MIN_CLEVEL 3 +#define LZ4HC_DEFAULT_CLEVEL 9 +#define LZ4HC_MAX_CLEVEL 16 + +#define LZ4HC_DICTIONARY_LOGSIZE 16 +#define LZ4HC_MAXD (1<<LZ4HC_DICTIONARY_LOGSIZE) +#define LZ4HC_MAXD_MASK (LZ4HC_MAXD - 1) +#define LZ4HC_HASH_LOG (LZ4HC_DICTIONARY_LOGSIZE - 1) +#define LZ4HC_HASHTABLESIZE (1 << LZ4HC_HASH_LOG) +#define LZ4HC_HASH_MASK (LZ4HC_HASHTABLESIZE - 1) + +/*-************************************************************************ + * STREAMING CONSTANTS AND STRUCTURES + **************************************************************************/ +#define LZ4_STREAMSIZE_U64 ((1 << (LZ4_MEMORY_USAGE - 3)) + 4) +#define LZ4_STREAMSIZE (LZ4_STREAMSIZE_U64 * sizeof(unsigned long long)) + +#define LZ4_STREAMHCSIZE 262192 +#define LZ4_STREAMHCSIZE_SIZET (262192 / sizeof(size_t)) + +#define LZ4_STREAMDECODESIZE_U64 4 +#define LZ4_STREAMDECODESIZE (LZ4_STREAMDECODESIZE_U64 * \ + sizeof(unsigned long long)) /* - * lz4_compress() - * src : source address of the original data - * src_len : size of the original data - * dst : output buffer address of the compressed data - * This requires 'dst' of size LZ4_COMPRESSBOUND. - * dst_len : is the output size, which is returned after compress done - * workmem : address of the working memory. - * This requires 'workmem' of size LZ4_MEM_COMPRESS. - * return : Success if return 0 - * Error if return (< 0) - * note : Destination buffer and workmem must be already allocated with - * the defined size. - */ -int lz4_compress(const unsigned char *src, size_t src_len, - unsigned char *dst, size_t *dst_len, void *wrkmem); - - /* - * lz4hc_compress() - * src : source address of the original data - * src_len : size of the original data - * dst : output buffer address of the compressed data - * This requires 'dst' of size LZ4_COMPRESSBOUND. - * dst_len : is the output size, which is returned after compress done - * workmem : address of the working memory. - * This requires 'workmem' of size LZ4HC_MEM_COMPRESS. - * return : Success if return 0 - * Error if return (< 0) - * note : Destination buffer and workmem must be already allocated with - * the defined size. - */ -int lz4hc_compress(const unsigned char *src, size_t src_len, - unsigned char *dst, size_t *dst_len, void *wrkmem); + * LZ4_stream_t - information structure to track an LZ4 stream. + */ +typedef struct { + uint32_t hashTable[LZ4_HASH_SIZE_U32]; + uint32_t currentOffset; + uint32_t initCheck; + const uint8_t *dictionary; + uint8_t *bufferStart; + uint32_t dictSize; +} LZ4_stream_t_internal; +typedef union { + unsigned long long table[LZ4_STREAMSIZE_U64]; + LZ4_stream_t_internal internal_donotuse; +} LZ4_stream_t; /* - * lz4_decompress() - * src : source address of the compressed data - * src_len : is the input size, whcih is returned after decompress done - * dest : output buffer address of the decompressed data - * actual_dest_len: is the size of uncompressed data, supposing it's known - * return : Success if return 0 - * Error if return (< 0) - * note : Destination buffer must be already allocated. - * slightly faster than lz4_decompress_unknownoutputsize() - */ -int lz4_decompress(const unsigned char *src, size_t *src_len, - unsigned char *dest, size_t actual_dest_len); + * LZ4_streamHC_t - information structure to track an LZ4HC stream. + */ +typedef struct { + unsigned int hashTable[LZ4HC_HASHTABLESIZE]; + unsigned short chainTable[LZ4HC_MAXD]; + /* next block to continue on current prefix */ + const unsigned char *end; + /* All index relative to this position */ + const unsigned char *base; + /* alternate base for extDict */ + const unsigned char *dictBase; + /* below that point, need extDict */ + unsigned int dictLimit; + /* below that point, no more dict */ + unsigned int lowLimit; + /* index from which to continue dict update */ + unsigned int nextToUpdate; + unsigned int compressionLevel; +} LZ4HC_CCtx_internal; +typedef union { + size_t table[LZ4_STREAMHCSIZE_SIZET]; + LZ4HC_CCtx_internal internal_donotuse; +} LZ4_streamHC_t; /* - * lz4_decompress_unknownoutputsize() - * src : source address of the compressed data - * src_len : is the input size, therefore the compressed size - * dest : output buffer address of the decompressed data - * dest_len: is the max size of the destination buffer, which is - * returned with actual size of decompressed data after - * decompress done - * return : Success if return 0 - * Error if return (< 0) - * note : Destination buffer must be already allocated. - */ -int lz4_decompress_unknownoutputsize(const unsigned char *src, size_t src_len, - unsigned char *dest, size_t *dest_len); + * LZ4_streamDecode_t - information structure to track an + * LZ4 stream during decompression. + * + * init this structure using LZ4_setStreamDecode (or memset()) before first use + */ +typedef struct { + const uint8_t *externalDict; + size_t extDictSize; + const uint8_t *prefixEnd; + size_t prefixSize; +} LZ4_streamDecode_t_internal; +typedef union { + unsigned long long table[LZ4_STREAMDECODESIZE_U64]; + LZ4_streamDecode_t_internal internal_donotuse; +} LZ4_streamDecode_t; + +/*-************************************************************************ + * SIZE OF STATE + **************************************************************************/ +#define LZ4_MEM_COMPRESS LZ4_STREAMSIZE +#define LZ4HC_MEM_COMPRESS LZ4_STREAMHCSIZE + +/*-************************************************************************ + * Compression Functions + **************************************************************************/ + +/** + * LZ4_compressBound() - Max. output size in worst case szenarios + * @isize: Size of the input data + * + * Return: Max. size LZ4 may output in a "worst case" szenario + * (data not compressible) + */ +static inline int LZ4_compressBound(size_t isize) +{ + return LZ4_COMPRESSBOUND(isize); +} + +/** + * LZ4_compress_default() - Compress data from source to dest + * @source: source address of the original data + * @dest: output buffer address of the compressed data + * @inputSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE + * @maxOutputSize: full or partial size of buffer 'dest' + * which must be already allocated + * @wrkmem: address of the working memory. + * This requires 'workmem' of LZ4_MEM_COMPRESS. + * + * Compresses 'sourceSize' bytes from buffer 'source' + * into already allocated 'dest' buffer of size 'maxOutputSize'. + * Compression is guaranteed to succeed if + * 'maxOutputSize' >= LZ4_compressBound(inputSize). + * It also runs faster, so it's a recommended setting. + * If the function cannot compress 'source' into a more limited 'dest' budget, + * compression stops *immediately*, and the function result is zero. + * As a consequence, 'dest' content is not valid. + * + * Return: Number of bytes written into buffer 'dest' + * (necessarily <= maxOutputSize) or 0 if compression fails + */ +int LZ4_compress_default(const char *source, char *dest, int inputSize, + int maxOutputSize, void *wrkmem); + +/** + * LZ4_compress_fast() - As LZ4_compress_default providing an acceleration param + * @source: source address of the original data + * @dest: output buffer address of the compressed data + * @inputSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE + * @maxOutputSize: full or partial size of buffer 'dest' + * which must be already allocated + * @acceleration: acceleration factor + * @wrkmem: address of the working memory. + * This requires 'workmem' of LZ4_MEM_COMPRESS. + * + * Same as LZ4_compress_default(), but allows to select an "acceleration" + * factor. The larger the acceleration value, the faster the algorithm, + * but also the lesser the compression. It's a trade-off. It can be fine tuned, + * with each successive value providing roughly +~3% to speed. + * An acceleration value of "1" is the same as regular LZ4_compress_default() + * Values <= 0 will be replaced by LZ4_ACCELERATION_DEFAULT, which is 1. + * + * Return: Number of bytes written into buffer 'dest' + * (necessarily <= maxOutputSize) or 0 if compression fails + */ +int LZ4_compress_fast(const char *source, char *dest, int inputSize, + int maxOutputSize, int acceleration, void *wrkmem); + +/** + * LZ4_compress_destSize() - Compress as much data as possible + * from source to dest + * @source: source address of the original data + * @dest: output buffer address of the compressed data + * @sourceSizePtr: will be modified to indicate how many bytes where read + * from 'source' to fill 'dest'. New value is necessarily <= old value. + * @targetDestSize: Size of buffer 'dest' which must be already allocated + * @wrkmem: address of the working memory. + * This requires 'workmem' of LZ4_MEM_COMPRESS. + * + * Reverse the logic, by compressing as much data as possible + * from 'source' buffer into already allocated buffer 'dest' + * of size 'targetDestSize'. + * This function either compresses the entire 'source' content into 'dest' + * if it's large enough, or fill 'dest' buffer completely with as much data as + * possible from 'source'. + * + * Return: Number of bytes written into 'dest' (necessarily <= targetDestSize) + * or 0 if compression fails + */ +int LZ4_compress_destSize(const char *source, char *dest, int *sourceSizePtr, + int targetDestSize, void *wrkmem); + +/*-************************************************************************ + * Decompression Functions + **************************************************************************/ + +/** + * LZ4_decompress_fast() - Decompresses data from 'source' into 'dest' + * @source: source address of the compressed data + * @dest: output buffer address of the uncompressed data + * which must be already allocated with 'originalSize' bytes + * @originalSize: is the original and therefore uncompressed size + * + * Decompresses data from 'source' into 'dest'. + * This function fully respect memory boundaries for properly formed + * compressed data. + * It is a bit faster than LZ4_decompress_safe(). + * However, it does not provide any protection against intentionally + * modified data stream (malicious input). + * Use this function in trusted environment only + * (data to decode comes from a trusted source). + * + * Return: number of bytes read from the source buffer + * or a negative result if decompression fails. + */ +int LZ4_decompress_fast(const char *source, char *dest, int originalSize); + +/** + * LZ4_decompress_safe() - Decompression protected against buffer overflow + * @source: source address of the compressed data + * @dest: output buffer address of the uncompressed data + * which must be already allocated + * @compressedSize: is the precise full size of the compressed block + * @maxDecompressedSize: is the size of 'dest' buffer + * + * Decompresses data fom 'source' into 'dest'. + * If the source stream is detected malformed, the function will + * stop decoding and return a negative result. + * This function is protected against buffer overflow exploits, + * including malicious data packets. It never writes outside output buffer, + * nor reads outside input buffer. + * + * Return: number of bytes decompressed into destination buffer + * (necessarily <= maxDecompressedSize) + * or a negative result in case of error + */ +int LZ4_decompress_safe(const char *source, char *dest, int compressedSize, + int maxDecompressedSize); + +/** + * LZ4_decompress_safe_partial() - Decompress a block of size 'compressedSize' + * at position 'source' into buffer 'dest' + * @source: source address of the compressed data + * @dest: output buffer address of the decompressed data which must be + * already allocated + * @compressedSize: is the precise full size of the compressed block. + * @targetOutputSize: the decompression operation will try + * to stop as soon as 'targetOutputSize' has been reached + * @maxDecompressedSize: is the size of destination buffer + * + * This function decompresses a compressed block of size 'compressedSize' + * at position 'source' into destination buffer 'dest' + * of size 'maxDecompressedSize'. + * The function tries to stop decompressing operation as soon as + * 'targetOutputSize' has been reached, reducing decompression time. + * This function never writes outside of output buffer, + * and never reads outside of input buffer. + * It is therefore protected against malicious data packets. + * + * Return: the number of bytes decoded in the destination buffer + * (necessarily <= maxDecompressedSize) + * or a negative result in case of error + * + */ +int LZ4_decompress_safe_partial(const char *source, char *dest, + int compressedSize, int targetOutputSize, int maxDecompressedSize); + +/*-************************************************************************ + * LZ4 HC Compression + **************************************************************************/ + +/** + * LZ4_compress_HC() - Compress data from `src` into `dst`, using HC algorithm + * @src: source address of the original data + * @dst: output buffer address of the compressed data + * @srcSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE + * @dstCapacity: full or partial size of buffer 'dst', + * which must be already allocated + * @compressionLevel: Recommended values are between 4 and 9, although any + * value between 1 and LZ4HC_MAX_CLEVEL will work. + * Values >LZ4HC_MAX_CLEVEL behave the same as 16. + * @wrkmem: address of the working memory. + * This requires 'wrkmem' of size LZ4HC_MEM_COMPRESS. + * + * Compress data from 'src' into 'dst', using the more powerful + * but slower "HC" algorithm. Compression is guaranteed to succeed if + * `dstCapacity >= LZ4_compressBound(srcSize) + * + * Return : the number of bytes written into 'dst' or 0 if compression fails. + */ +int LZ4_compress_HC(const char *src, char *dst, int srcSize, int dstCapacity, + int compressionLevel, void *wrkmem); + +/** + * LZ4_resetStreamHC() - Init an allocated 'LZ4_streamHC_t' structure + * @streamHCPtr: pointer to the 'LZ4_streamHC_t' structure + * @compressionLevel: Recommended values are between 4 and 9, although any + * value between 1 and LZ4HC_MAX_CLEVEL will work. + * Values >LZ4HC_MAX_CLEVEL behave the same as 16. + * + * An LZ4_streamHC_t structure can be allocated once + * and re-used multiple times. + * Use this function to init an allocated `LZ4_streamHC_t` structure + * and start a new compression. + */ +void LZ4_resetStreamHC(LZ4_streamHC_t *streamHCPtr, int compressionLevel); + +/** + * LZ4_loadDictHC() - Load a static dictionary into LZ4_streamHC + * @streamHCPtr: pointer to the LZ4HC_stream_t + * @dictionary: dictionary to load + * @dictSize: size of dictionary + * + * Use this function to load a static dictionary into LZ4HC_stream. + * Any previous data will be forgotten, only 'dictionary' + * will remain in memory. + * Loading a size of 0 is allowed. + * + * Return : dictionary size, in bytes (necessarily <= 64 KB) + */ +int LZ4_loadDictHC(LZ4_streamHC_t *streamHCPtr, const char *dictionary, + int dictSize); + +/** + * LZ4_compress_HC_continue() - Compress 'src' using data from previously + * compressed blocks as a dictionary using the HC algorithm + * @streamHCPtr: Pointer to the previous 'LZ4_streamHC_t' structure + * @src: source address of the original data + * @dst: output buffer address of the compressed data, + * which must be already allocated + * @srcSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE + * @maxDstSize: full or partial size of buffer 'dest' + * which must be already allocated + * + * These functions compress data in successive blocks of any size, using + * previous blocks as dictionary. One key assumption is that previous + * blocks (up to 64 KB) remain read-accessible while + * compressing next blocks. There is an exception for ring buffers, + * which can be smaller than 64 KB. + * Ring buffers scenario is automatically detected and handled by + * LZ4_compress_HC_continue(). + * Before starting compression, state must be properly initialized, + * using LZ4_resetStreamHC(). + * A first "fictional block" can then be designated as + * initial dictionary, using LZ4_loadDictHC() (Optional). + * Then, use LZ4_compress_HC_continue() + * to compress each successive block. Previous memory blocks + * (including initial dictionary when present) must remain accessible + * and unmodified during compression. + * 'dst' buffer should be sized to handle worst case scenarios, using + * LZ4_compressBound(), to ensure operation success. + * If, for any reason, previous data blocks can't be preserved unmodified + * in memory during next compression block, + * you must save it to a safer memory space, using LZ4_saveDictHC(). + * Return value of LZ4_saveDictHC() is the size of dictionary + * effectively saved into 'safeBuffer'. + * + * Return: Number of bytes written into buffer 'dst' or 0 if compression fails + */ +int LZ4_compress_HC_continue(LZ4_streamHC_t *streamHCPtr, const char *src, + char *dst, int srcSize, int maxDstSize); + +/** + * LZ4_saveDictHC() - Save static dictionary from LZ4HC_stream + * @streamHCPtr: pointer to the 'LZ4HC_stream_t' structure + * @safeBuffer: buffer to save dictionary to, must be already allocated + * @maxDictSize: size of 'safeBuffer' + * + * If previously compressed data block is not guaranteed + * to remain available at its memory location, + * save it into a safer place (char *safeBuffer). + * Note : you don't need to call LZ4_loadDictHC() afterwards, + * dictionary is immediately usable, you can therefore call + * LZ4_compress_HC_continue(). + * + * Return : saved dictionary size in bytes (necessarily <= maxDictSize), + * or 0 if error. + */ +int LZ4_saveDictHC(LZ4_streamHC_t *streamHCPtr, char *safeBuffer, + int maxDictSize); + +/*-********************************************* + * Streaming Compression Functions + ***********************************************/ + +/** + * LZ4_resetStream() - Init an allocated 'LZ4_stream_t' structure + * @LZ4_stream: pointer to the 'LZ4_stream_t' structure + * + * An LZ4_stream_t structure can be allocated once + * and re-used multiple times. + * Use this function to init an allocated `LZ4_stream_t` structure + * and start a new compression. + */ +void LZ4_resetStream(LZ4_stream_t *LZ4_stream); + +/** + * LZ4_loadDict() - Load a static dictionary into LZ4_stream + * @streamPtr: pointer to the LZ4_stream_t + * @dictionary: dictionary to load + * @dictSize: size of dictionary + * + * Use this function to load a static dictionary into LZ4_stream. + * Any previous data will be forgotten, only 'dictionary' + * will remain in memory. + * Loading a size of 0 is allowed. + * + * Return : dictionary size, in bytes (necessarily <= 64 KB) + */ +int LZ4_loadDict(LZ4_stream_t *streamPtr, const char *dictionary, + int dictSize); + +/** + * LZ4_saveDict() - Save static dictionary from LZ4_stream + * @streamPtr: pointer to the 'LZ4_stream_t' structure + * @safeBuffer: buffer to save dictionary to, must be already allocated + * @dictSize: size of 'safeBuffer' + * + * If previously compressed data block is not guaranteed + * to remain available at its memory location, + * save it into a safer place (char *safeBuffer). + * Note : you don't need to call LZ4_loadDict() afterwards, + * dictionary is immediately usable, you can therefore call + * LZ4_compress_fast_continue(). + * + * Return : saved dictionary size in bytes (necessarily <= dictSize), + * or 0 if error. + */ +int LZ4_saveDict(LZ4_stream_t *streamPtr, char *safeBuffer, int dictSize); + +/** + * LZ4_compress_fast_continue() - Compress 'src' using data from previously + * compressed blocks as a dictionary + * @streamPtr: Pointer to the previous 'LZ4_stream_t' structure + * @src: source address of the original data + * @dst: output buffer address of the compressed data, + * which must be already allocated + * @srcSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE + * @maxDstSize: full or partial size of buffer 'dest' + * which must be already allocated + * @acceleration: acceleration factor + * + * Compress buffer content 'src', using data from previously compressed blocks + * as dictionary to improve compression ratio. + * Important : Previous data blocks are assumed to still + * be present and unmodified ! + * If maxDstSize >= LZ4_compressBound(srcSize), + * compression is guaranteed to succeed, and runs faster. + * + * Return: Number of bytes written into buffer 'dst' or 0 if compression fails + */ +int LZ4_compress_fast_continue(LZ4_stream_t *streamPtr, const char *src, + char *dst, int srcSize, int maxDstSize, int acceleration); + +/** + * LZ4_setStreamDecode() - Instruct where to find dictionary + * @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure + * @dictionary: dictionary to use + * @dictSize: size of dictionary + * + * Use this function to instruct where to find the dictionary. + * Setting a size of 0 is allowed (same effect as reset). + * + * Return: 1 if OK, 0 if error + */ +int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode, + const char *dictionary, int dictSize); + +/** + * LZ4_decompress_fast_continue() - Decompress blocks in streaming mode + * @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure + * @source: source address of the compressed data + * @dest: output buffer address of the uncompressed data + * which must be already allocated + * @compressedSize: is the precise full size of the compressed block + * @maxDecompressedSize: is the size of 'dest' buffer + * + * These decoding function allows decompression of multiple blocks + * in "streaming" mode. + * Previously decoded blocks *must* remain available at the memory position + * where they were decoded (up to 64 KB) + * In the case of a ring buffers, decoding buffer must be either : + * - Exactly same size as encoding buffer, with same update rule + * (block boundaries at same positions) In which case, + * the decoding & encoding ring buffer can have any size, + * including very small ones ( < 64 KB). + * - Larger than encoding buffer, by a minimum of maxBlockSize more bytes. + * maxBlockSize is implementation dependent. + * It's the maximum size you intend to compress into a single block. + * In which case, encoding and decoding buffers do not need + * to be synchronized, and encoding ring buffer can have any size, + * including small ones ( < 64 KB). + * - _At least_ 64 KB + 8 bytes + maxBlockSize. + * In which case, encoding and decoding buffers do not need to be + * synchronized, and encoding ring buffer can have any size, + * including larger than decoding buffer. W + * Whenever these conditions are not possible, save the last 64KB of decoded + * data into a safe buffer, and indicate where it is saved + * using LZ4_setStreamDecode() + * + * Return: number of bytes decompressed into destination buffer + * (necessarily <= maxDecompressedSize) + * or a negative result in case of error + */ +int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode, + const char *source, char *dest, int compressedSize, + int maxDecompressedSize); + +/** + * LZ4_decompress_fast_continue() - Decompress blocks in streaming mode + * @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure + * @source: source address of the compressed data + * @dest: output buffer address of the uncompressed data + * which must be already allocated with 'originalSize' bytes + * @originalSize: is the original and therefore uncompressed size + * + * These decoding function allows decompression of multiple blocks + * in "streaming" mode. + * Previously decoded blocks *must* remain available at the memory position + * where they were decoded (up to 64 KB) + * In the case of a ring buffers, decoding buffer must be either : + * - Exactly same size as encoding buffer, with same update rule + * (block boundaries at same positions) In which case, + * the decoding & encoding ring buffer can have any size, + * including very small ones ( < 64 KB). + * - Larger than encoding buffer, by a minimum of maxBlockSize more bytes. + * maxBlockSize is implementation dependent. + * It's the maximum size you intend to compress into a single block. + * In which case, encoding and decoding buffers do not need + * to be synchronized, and encoding ring buffer can have any size, + * including small ones ( < 64 KB). + * - _At least_ 64 KB + 8 bytes + maxBlockSize. + * In which case, encoding and decoding buffers do not need to be + * synchronized, and encoding ring buffer can have any size, + * including larger than decoding buffer. W + * Whenever these conditions are not possible, save the last 64KB of decoded + * data into a safe buffer, and indicate where it is saved + * using LZ4_setStreamDecode() + * + * Return: number of bytes decompressed into destination buffer + * (necessarily <= maxDecompressedSize) + * or a negative result in case of error + */ +int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode, + const char *source, char *dest, int originalSize); + +/** + * LZ4_decompress_safe_usingDict() - Same as LZ4_setStreamDecode() + * followed by LZ4_decompress_safe_continue() + * @source: source address of the compressed data + * @dest: output buffer address of the uncompressed data + * which must be already allocated + * @compressedSize: is the precise full size of the compressed block + * @maxDecompressedSize: is the size of 'dest' buffer + * @dictStart: pointer to the start of the dictionary in memory + * @dictSize: size of dictionary + * + * These decoding function works the same as + * a combination of LZ4_setStreamDecode() followed by + * LZ4_decompress_safe_continue() + * It is stand-alone, and don'tn eed a LZ4_streamDecode_t structure. + * + * Return: number of bytes decompressed into destination buffer + * (necessarily <= maxDecompressedSize) + * or a negative result in case of error + */ +int LZ4_decompress_safe_usingDict(const char *source, char *dest, + int compressedSize, int maxDecompressedSize, const char *dictStart, + int dictSize); + +/** + * LZ4_decompress_fast_usingDict() - Same as LZ4_setStreamDecode() + * followed by LZ4_decompress_fast_continue() + * @source: source address of the compressed data + * @dest: output buffer address of the uncompressed data + * which must be already allocated with 'originalSize' bytes + * @originalSize: is the original and therefore uncompressed size + * @dictStart: pointer to the start of the dictionary in memory + * @dictSize: size of dictionary + * + * These decoding function works the same as + * a combination of LZ4_setStreamDecode() followed by + * LZ4_decompress_safe_continue() + * It is stand-alone, and don'tn eed a LZ4_streamDecode_t structure. + * + * Return: number of bytes decompressed into destination buffer + * (necessarily <= maxDecompressedSize) + * or a negative result in case of error + */ +int LZ4_decompress_fast_usingDict(const char *source, char *dest, + int originalSize, const char *dictStart, int dictSize); + #endif diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 5b759c9acf97..bdfc65af4152 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -42,6 +42,7 @@ struct memblock_type { unsigned long max; /* size of the allocated array */ phys_addr_t total_size; /* size of all regions */ struct memblock_region *regions; + char *name; }; struct memblock { @@ -203,6 +204,7 @@ int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn, unsigned long *end_pfn); void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, unsigned long *out_end_pfn, int *out_nid); +unsigned long memblock_next_valid_pfn(unsigned long pfn, unsigned long max_pfn); /** * for_each_mem_pfn_range - early memory pfn range iterator diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 254698856b8f..5af377303880 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -253,6 +253,7 @@ struct mem_cgroup { /* Index in the kmem_cache->memcg_params.memcg_caches array */ int kmemcg_id; enum memcg_kmem_state kmem_state; + struct list_head kmem_caches; #endif int last_scanned_node; @@ -829,6 +830,7 @@ void memcg_kmem_uncharge(struct page *page, int order); #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) extern struct static_key_false memcg_kmem_enabled_key; +extern struct workqueue_struct *memcg_kmem_cache_wq; extern int memcg_nr_cache_ids; void memcg_get_cache_ids(void); diff --git a/include/linux/memory.h b/include/linux/memory.h index 093607f90b91..b723a686fc10 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -109,9 +109,6 @@ extern void unregister_memory_notifier(struct notifier_block *nb); extern int register_memory_isolate_notifier(struct notifier_block *nb); extern void unregister_memory_isolate_notifier(struct notifier_block *nb); extern int register_new_memory(int, struct mem_section *); -extern int memory_block_change_state(struct memory_block *mem, - unsigned long to_state, - unsigned long from_state_req); #ifdef CONFIG_MEMORY_HOTREMOVE extern int unregister_memory_section(struct mem_section *); #endif diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h index 552cc1d61cc7..44412c9d26e1 100644 --- a/include/linux/mfd/abx500.h +++ b/include/linux/mfd/abx500.h @@ -45,7 +45,7 @@ enum abx500_adc_therm { * struct abx500_res_to_temp - defines one point in a temp to res curve. To * be used in battery packs that combines the identification resistor with a * NTC resistor. - * @temp: battery pack temperature in Celcius + * @temp: battery pack temperature in Celsius * @resist: NTC resistor net total resistance */ struct abx500_res_to_temp { diff --git a/include/linux/mfd/abx500/ab8500-bm.h b/include/linux/mfd/abx500/ab8500-bm.h index 12a5b396921e..e63681eb6c62 100644 --- a/include/linux/mfd/abx500/ab8500-bm.h +++ b/include/linux/mfd/abx500/ab8500-bm.h @@ -279,7 +279,7 @@ enum bup_vch_sel { * struct res_to_temp - defines one point in a temp to res curve. To * be used in battery packs that combines the identification resistor with a * NTC resistor. - * @temp: battery pack temperature in Celcius + * @temp: battery pack temperature in Celsius * @resist: NTC resistor net total resistance */ struct res_to_temp { @@ -290,7 +290,7 @@ struct res_to_temp { /** * struct batres_vs_temp - defines one point in a temp vs battery internal * resistance curve. - * @temp: battery pack temperature in Celcius + * @temp: battery pack temperature in Celsius * @resist: battery internal reistance in mOhm */ struct batres_vs_temp { diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h index f848ee86a339..0d9a1ff38393 100644 --- a/include/linux/mfd/axp20x.h +++ b/include/linux/mfd/axp20x.h @@ -235,10 +235,20 @@ enum axp20x_variants { #define AXP22X_BATLOW_THRES1 0xe6 /* AXP288 specific registers */ +#define AXP288_POWER_REASON 0x02 +#define AXP288_BC_GLOBAL 0x2c +#define AXP288_BC_VBUS_CNTL 0x2d +#define AXP288_BC_USB_STAT 0x2e +#define AXP288_BC_DET_STAT 0x2f #define AXP288_PMIC_ADC_H 0x56 #define AXP288_PMIC_ADC_L 0x57 +#define AXP288_TS_ADC_H 0x58 +#define AXP288_TS_ADC_L 0x59 +#define AXP288_GP_ADC_H 0x5a +#define AXP288_GP_ADC_L 0x5b #define AXP288_ADC_TS_PIN_CTRL 0x84 -#define AXP288_PMIC_ADC_EN 0x84 +#define AXP288_RT_BATT_V_H 0xa0 +#define AXP288_RT_BATT_V_L 0xa1 /* Fuel Gauge */ #define AXP288_FG_RDC1_REG 0xba @@ -515,14 +525,10 @@ enum axp809_irqs { AXP809_IRQ_GPIO0_INPUT, }; -#define AXP288_TS_ADC_H 0x58 -#define AXP288_TS_ADC_L 0x59 -#define AXP288_GP_ADC_H 0x5a -#define AXP288_GP_ADC_L 0x5b - struct axp20x_dev { struct device *dev; int irq; + unsigned long irq_flags; struct regmap *regmap; struct regmap_irq_chip_data *regmap_irqc; long variant; @@ -582,7 +588,7 @@ int axp20x_match_device(struct axp20x_dev *axp20x); int axp20x_device_probe(struct axp20x_dev *axp20x); /** - * axp20x_device_probe(): Remove a axp20x device + * axp20x_device_remove(): Remove a axp20x device * * @axp20x: axp20x device to remove * diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h index f62043a75f43..7a01c94496f1 100644 --- a/include/linux/mfd/cros_ec.h +++ b/include/linux/mfd/cros_ec.h @@ -103,6 +103,7 @@ struct cros_ec_command { * @din_size: size of din buffer to allocate (zero to use static din) * @dout_size: size of dout buffer to allocate (zero to use static dout) * @wake_enabled: true if this device can wake the system from sleep + * @suspended: true if this device had been suspended * @cmd_xfer: send command to EC and get response * Returns the number of bytes received if the communication succeeded, but * that doesn't mean the EC was happy with the command. The caller @@ -136,6 +137,7 @@ struct cros_ec_device { int din_size; int dout_size; bool wake_enabled; + bool suspended; int (*cmd_xfer)(struct cros_ec_device *ec, struct cros_ec_command *msg); int (*pkt_xfer)(struct cros_ec_device *ec, diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h index 098c3501ad2c..f1ef6388c233 100644 --- a/include/linux/mfd/cros_ec_commands.h +++ b/include/linux/mfd/cros_ec_commands.h @@ -1840,18 +1840,69 @@ struct ec_response_tmp006_get_raw { * * Returns raw data for keyboard cols; see ec_response_mkbp_info.cols for * expected response size. + * + * NOTE: This has been superseded by EC_CMD_MKBP_GET_NEXT_EVENT. If you wish + * to obtain the instantaneous state, use EC_CMD_MKBP_INFO with the type + * EC_MKBP_INFO_CURRENT and event EC_MKBP_EVENT_KEY_MATRIX. */ #define EC_CMD_MKBP_STATE 0x60 -/* Provide information about the matrix : number of rows and columns */ +/* + * Provide information about various MKBP things. See enum ec_mkbp_info_type. + */ #define EC_CMD_MKBP_INFO 0x61 struct ec_response_mkbp_info { uint32_t rows; uint32_t cols; - uint8_t switches; + /* Formerly "switches", which was 0. */ + uint8_t reserved; } __packed; +struct ec_params_mkbp_info { + uint8_t info_type; + uint8_t event_type; +} __packed; + +enum ec_mkbp_info_type { + /* + * Info about the keyboard matrix: number of rows and columns. + * + * Returns struct ec_response_mkbp_info. + */ + EC_MKBP_INFO_KBD = 0, + + /* + * For buttons and switches, info about which specifically are + * supported. event_type must be set to one of the values in enum + * ec_mkbp_event. + * + * For EC_MKBP_EVENT_BUTTON and EC_MKBP_EVENT_SWITCH, returns a 4 byte + * bitmask indicating which buttons or switches are present. See the + * bit inidices below. + */ + EC_MKBP_INFO_SUPPORTED = 1, + + /* + * Instantaneous state of buttons and switches. + * + * event_type must be set to one of the values in enum ec_mkbp_event. + * + * For EC_MKBP_EVENT_KEY_MATRIX, returns uint8_t key_matrix[13] + * indicating the current state of the keyboard matrix. + * + * For EC_MKBP_EVENT_HOST_EVENT, return uint32_t host_event, the raw + * event state. + * + * For EC_MKBP_EVENT_BUTTON, returns uint32_t buttons, indicating the + * state of supported buttons. + * + * For EC_MKBP_EVENT_SWITCH, returns uint32_t switches, indicating the + * state of supported switches. + */ + EC_MKBP_INFO_CURRENT = 2, +}; + /* Simulate key press */ #define EC_CMD_MKBP_SIMULATE_KEY 0x62 @@ -1984,6 +2035,12 @@ enum ec_mkbp_event { /* New Sensor FIFO data. The event data is fifo_info structure. */ EC_MKBP_EVENT_SENSOR_FIFO = 2, + /* The state of the non-matrixed buttons have changed. */ + EC_MKBP_EVENT_BUTTON = 3, + + /* The state of the switches have changed. */ + EC_MKBP_EVENT_SWITCH = 4, + /* Number of MKBP events */ EC_MKBP_EVENT_COUNT, }; @@ -1993,6 +2050,9 @@ union ec_response_get_next_data { /* Unaligned */ uint32_t host_event; + + uint32_t buttons; + uint32_t switches; } __packed; struct ec_response_get_next_event { @@ -2001,6 +2061,16 @@ struct ec_response_get_next_event { union ec_response_get_next_data data; } __packed; +/* Bit indices for buttons and switches.*/ +/* Buttons */ +#define EC_MKBP_POWER_BUTTON 0 +#define EC_MKBP_VOL_UP 1 +#define EC_MKBP_VOL_DOWN 2 + +/* Switches */ +#define EC_MKBP_LID_OPEN 0 +#define EC_MKBP_TABLET_MODE 1 + /*****************************************************************************/ /* Temperature sensor commands */ @@ -2478,6 +2548,20 @@ struct ec_params_ext_power_current_limit { uint32_t limit; /* in mA */ } __packed; +/* Inform the EC when entering a sleep state */ +#define EC_CMD_HOST_SLEEP_EVENT 0xa9 + +enum host_sleep_event { + HOST_SLEEP_EVENT_S3_SUSPEND = 1, + HOST_SLEEP_EVENT_S3_RESUME = 2, + HOST_SLEEP_EVENT_S0IX_SUSPEND = 3, + HOST_SLEEP_EVENT_S0IX_RESUME = 4 +}; + +struct ec_params_host_sleep_event { + uint8_t sleep_event; +} __packed; + /*****************************************************************************/ /* Smart battery pass-through */ diff --git a/include/linux/mfd/motorola-cpcap.h b/include/linux/mfd/motorola-cpcap.h new file mode 100644 index 000000000000..b4031c2b2214 --- /dev/null +++ b/include/linux/mfd/motorola-cpcap.h @@ -0,0 +1,292 @@ +/* + * The register defines are based on earlier cpcap.h in Motorola Linux kernel + * tree. + * + * Copyright (C) 2007-2009 Motorola, Inc. + * + * Rewritten for the real register offsets instead of enumeration + * to make the defines usable with Linux kernel regmap support + * + * Copyright (C) 2016 Tony Lindgren <tony@atomide.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define CPCAP_VENDOR_ST 0 +#define CPCAP_VENDOR_TI 1 + +#define CPCAP_REVISION_MAJOR(r) (((r) >> 4) + 1) +#define CPCAP_REVISION_MINOR(r) ((r) & 0xf) + +#define CPCAP_REVISION_1_0 0x08 +#define CPCAP_REVISION_1_1 0x09 +#define CPCAP_REVISION_2_0 0x10 +#define CPCAP_REVISION_2_1 0x11 + +/* CPCAP registers */ +#define CPCAP_REG_INT1 0x0000 /* Interrupt 1 */ +#define CPCAP_REG_INT2 0x0004 /* Interrupt 2 */ +#define CPCAP_REG_INT3 0x0008 /* Interrupt 3 */ +#define CPCAP_REG_INT4 0x000c /* Interrupt 4 */ +#define CPCAP_REG_INTM1 0x0010 /* Interrupt Mask 1 */ +#define CPCAP_REG_INTM2 0x0014 /* Interrupt Mask 2 */ +#define CPCAP_REG_INTM3 0x0018 /* Interrupt Mask 3 */ +#define CPCAP_REG_INTM4 0x001c /* Interrupt Mask 4 */ +#define CPCAP_REG_INTS1 0x0020 /* Interrupt Sense 1 */ +#define CPCAP_REG_INTS2 0x0024 /* Interrupt Sense 2 */ +#define CPCAP_REG_INTS3 0x0028 /* Interrupt Sense 3 */ +#define CPCAP_REG_INTS4 0x002c /* Interrupt Sense 4 */ +#define CPCAP_REG_ASSIGN1 0x0030 /* Resource Assignment 1 */ +#define CPCAP_REG_ASSIGN2 0x0034 /* Resource Assignment 2 */ +#define CPCAP_REG_ASSIGN3 0x0038 /* Resource Assignment 3 */ +#define CPCAP_REG_ASSIGN4 0x003c /* Resource Assignment 4 */ +#define CPCAP_REG_ASSIGN5 0x0040 /* Resource Assignment 5 */ +#define CPCAP_REG_ASSIGN6 0x0044 /* Resource Assignment 6 */ +#define CPCAP_REG_VERSC1 0x0048 /* Version Control 1 */ +#define CPCAP_REG_VERSC2 0x004c /* Version Control 2 */ + +#define CPCAP_REG_MI1 0x0200 /* Macro Interrupt 1 */ +#define CPCAP_REG_MIM1 0x0204 /* Macro Interrupt Mask 1 */ +#define CPCAP_REG_MI2 0x0208 /* Macro Interrupt 2 */ +#define CPCAP_REG_MIM2 0x020c /* Macro Interrupt Mask 2 */ +#define CPCAP_REG_UCC1 0x0210 /* UC Control 1 */ +#define CPCAP_REG_UCC2 0x0214 /* UC Control 2 */ + +#define CPCAP_REG_PC1 0x021c /* Power Cut 1 */ +#define CPCAP_REG_PC2 0x0220 /* Power Cut 2 */ +#define CPCAP_REG_BPEOL 0x0224 /* BP and EOL */ +#define CPCAP_REG_PGC 0x0228 /* Power Gate and Control */ +#define CPCAP_REG_MT1 0x022c /* Memory Transfer 1 */ +#define CPCAP_REG_MT2 0x0230 /* Memory Transfer 2 */ +#define CPCAP_REG_MT3 0x0234 /* Memory Transfer 3 */ +#define CPCAP_REG_PF 0x0238 /* Print Format */ + +#define CPCAP_REG_SCC 0x0400 /* System Clock Control */ +#define CPCAP_REG_SW1 0x0404 /* Stop Watch 1 */ +#define CPCAP_REG_SW2 0x0408 /* Stop Watch 2 */ +#define CPCAP_REG_UCTM 0x040c /* UC Turbo Mode */ +#define CPCAP_REG_TOD1 0x0410 /* Time of Day 1 */ +#define CPCAP_REG_TOD2 0x0414 /* Time of Day 2 */ +#define CPCAP_REG_TODA1 0x0418 /* Time of Day Alarm 1 */ +#define CPCAP_REG_TODA2 0x041c /* Time of Day Alarm 2 */ +#define CPCAP_REG_DAY 0x0420 /* Day */ +#define CPCAP_REG_DAYA 0x0424 /* Day Alarm */ +#define CPCAP_REG_VAL1 0x0428 /* Validity 1 */ +#define CPCAP_REG_VAL2 0x042c /* Validity 2 */ + +#define CPCAP_REG_SDVSPLL 0x0600 /* Switcher DVS and PLL */ +#define CPCAP_REG_SI2CC1 0x0604 /* Switcher I2C Control 1 */ +#define CPCAP_REG_Si2CC2 0x0608 /* Switcher I2C Control 2 */ +#define CPCAP_REG_S1C1 0x060c /* Switcher 1 Control 1 */ +#define CPCAP_REG_S1C2 0x0610 /* Switcher 1 Control 2 */ +#define CPCAP_REG_S2C1 0x0614 /* Switcher 2 Control 1 */ +#define CPCAP_REG_S2C2 0x0618 /* Switcher 2 Control 2 */ +#define CPCAP_REG_S3C 0x061c /* Switcher 3 Control */ +#define CPCAP_REG_S4C1 0x0620 /* Switcher 4 Control 1 */ +#define CPCAP_REG_S4C2 0x0624 /* Switcher 4 Control 2 */ +#define CPCAP_REG_S5C 0x0628 /* Switcher 5 Control */ +#define CPCAP_REG_S6C 0x062c /* Switcher 6 Control */ +#define CPCAP_REG_VCAMC 0x0630 /* VCAM Control */ +#define CPCAP_REG_VCSIC 0x0634 /* VCSI Control */ +#define CPCAP_REG_VDACC 0x0638 /* VDAC Control */ +#define CPCAP_REG_VDIGC 0x063c /* VDIG Control */ +#define CPCAP_REG_VFUSEC 0x0640 /* VFUSE Control */ +#define CPCAP_REG_VHVIOC 0x0644 /* VHVIO Control */ +#define CPCAP_REG_VSDIOC 0x0648 /* VSDIO Control */ +#define CPCAP_REG_VPLLC 0x064c /* VPLL Control */ +#define CPCAP_REG_VRF1C 0x0650 /* VRF1 Control */ +#define CPCAP_REG_VRF2C 0x0654 /* VRF2 Control */ +#define CPCAP_REG_VRFREFC 0x0658 /* VRFREF Control */ +#define CPCAP_REG_VWLAN1C 0x065c /* VWLAN1 Control */ +#define CPCAP_REG_VWLAN2C 0x0660 /* VWLAN2 Control */ +#define CPCAP_REG_VSIMC 0x0664 /* VSIM Control */ +#define CPCAP_REG_VVIBC 0x0668 /* VVIB Control */ +#define CPCAP_REG_VUSBC 0x066c /* VUSB Control */ +#define CPCAP_REG_VUSBINT1C 0x0670 /* VUSBINT1 Control */ +#define CPCAP_REG_VUSBINT2C 0x0674 /* VUSBINT2 Control */ +#define CPCAP_REG_URT 0x0678 /* Useroff Regulator Trigger */ +#define CPCAP_REG_URM1 0x067c /* Useroff Regulator Mask 1 */ +#define CPCAP_REG_URM2 0x0680 /* Useroff Regulator Mask 2 */ + +#define CPCAP_REG_VAUDIOC 0x0800 /* VAUDIO Control */ +#define CPCAP_REG_CC 0x0804 /* Codec Control */ +#define CPCAP_REG_CDI 0x0808 /* Codec Digital Interface */ +#define CPCAP_REG_SDAC 0x080c /* Stereo DAC */ +#define CPCAP_REG_SDACDI 0x0810 /* Stereo DAC Digital Interface */ +#define CPCAP_REG_TXI 0x0814 /* TX Inputs */ +#define CPCAP_REG_TXMP 0x0818 /* TX MIC PGA's */ +#define CPCAP_REG_RXOA 0x081c /* RX Output Amplifiers */ +#define CPCAP_REG_RXVC 0x0820 /* RX Volume Control */ +#define CPCAP_REG_RXCOA 0x0824 /* RX Codec to Output Amps */ +#define CPCAP_REG_RXSDOA 0x0828 /* RX Stereo DAC to Output Amps */ +#define CPCAP_REG_RXEPOA 0x082c /* RX External PGA to Output Amps */ +#define CPCAP_REG_RXLL 0x0830 /* RX Low Latency */ +#define CPCAP_REG_A2LA 0x0834 /* A2 Loudspeaker Amplifier */ +#define CPCAP_REG_MIPIS1 0x0838 /* MIPI Slimbus 1 */ +#define CPCAP_REG_MIPIS2 0x083c /* MIPI Slimbus 2 */ +#define CPCAP_REG_MIPIS3 0x0840 /* MIPI Slimbus 3. */ +#define CPCAP_REG_LVAB 0x0844 /* LMR Volume and A4 Balanced. */ + +#define CPCAP_REG_CCC1 0x0a00 /* Coulomb Counter Control 1 */ +#define CPCAP_REG_CRM 0x0a04 /* Charger and Reverse Mode */ +#define CPCAP_REG_CCCC2 0x0a08 /* Coincell and Coulomb Ctr Ctrl 2 */ +#define CPCAP_REG_CCS1 0x0a0c /* Coulomb Counter Sample 1 */ +#define CPCAP_REG_CCS2 0x0a10 /* Coulomb Counter Sample 2 */ +#define CPCAP_REG_CCA1 0x0a14 /* Coulomb Counter Accumulator 1 */ +#define CPCAP_REG_CCA2 0x0a18 /* Coulomb Counter Accumulator 2 */ +#define CPCAP_REG_CCM 0x0a1c /* Coulomb Counter Mode */ +#define CPCAP_REG_CCO 0x0a20 /* Coulomb Counter Offset */ +#define CPCAP_REG_CCI 0x0a24 /* Coulomb Counter Integrator */ + +#define CPCAP_REG_ADCC1 0x0c00 /* A/D Converter Configuration 1 */ +#define CPCAP_REG_ADCC2 0x0c04 /* A/D Converter Configuration 2 */ +#define CPCAP_REG_ADCD0 0x0c08 /* A/D Converter Data 0 */ +#define CPCAP_REG_ADCD1 0x0c0c /* A/D Converter Data 1 */ +#define CPCAP_REG_ADCD2 0x0c10 /* A/D Converter Data 2 */ +#define CPCAP_REG_ADCD3 0x0c14 /* A/D Converter Data 3 */ +#define CPCAP_REG_ADCD4 0x0c18 /* A/D Converter Data 4 */ +#define CPCAP_REG_ADCD5 0x0c1c /* A/D Converter Data 5 */ +#define CPCAP_REG_ADCD6 0x0c20 /* A/D Converter Data 6 */ +#define CPCAP_REG_ADCD7 0x0c24 /* A/D Converter Data 7 */ +#define CPCAP_REG_ADCAL1 0x0c28 /* A/D Converter Calibration 1 */ +#define CPCAP_REG_ADCAL2 0x0c2c /* A/D Converter Calibration 2 */ + +#define CPCAP_REG_USBC1 0x0e00 /* USB Control 1 */ +#define CPCAP_REG_USBC2 0x0e04 /* USB Control 2 */ +#define CPCAP_REG_USBC3 0x0e08 /* USB Control 3 */ +#define CPCAP_REG_UVIDL 0x0e0c /* ULPI Vendor ID Low */ +#define CPCAP_REG_UVIDH 0x0e10 /* ULPI Vendor ID High */ +#define CPCAP_REG_UPIDL 0x0e14 /* ULPI Product ID Low */ +#define CPCAP_REG_UPIDH 0x0e18 /* ULPI Product ID High */ +#define CPCAP_REG_UFC1 0x0e1c /* ULPI Function Control 1 */ +#define CPCAP_REG_UFC2 0x0e20 /* ULPI Function Control 2 */ +#define CPCAP_REG_UFC3 0x0e24 /* ULPI Function Control 3 */ +#define CPCAP_REG_UIC1 0x0e28 /* ULPI Interface Control 1 */ +#define CPCAP_REG_UIC2 0x0e2c /* ULPI Interface Control 2 */ +#define CPCAP_REG_UIC3 0x0e30 /* ULPI Interface Control 3 */ +#define CPCAP_REG_USBOTG1 0x0e34 /* USB OTG Control 1 */ +#define CPCAP_REG_USBOTG2 0x0e38 /* USB OTG Control 2 */ +#define CPCAP_REG_USBOTG3 0x0e3c /* USB OTG Control 3 */ +#define CPCAP_REG_UIER1 0x0e40 /* USB Interrupt Enable Rising 1 */ +#define CPCAP_REG_UIER2 0x0e44 /* USB Interrupt Enable Rising 2 */ +#define CPCAP_REG_UIER3 0x0e48 /* USB Interrupt Enable Rising 3 */ +#define CPCAP_REG_UIEF1 0x0e4c /* USB Interrupt Enable Falling 1 */ +#define CPCAP_REG_UIEF2 0x0e50 /* USB Interrupt Enable Falling 1 */ +#define CPCAP_REG_UIEF3 0x0e54 /* USB Interrupt Enable Falling 1 */ +#define CPCAP_REG_UIS 0x0e58 /* USB Interrupt Status */ +#define CPCAP_REG_UIL 0x0e5c /* USB Interrupt Latch */ +#define CPCAP_REG_USBD 0x0e60 /* USB Debug */ +#define CPCAP_REG_SCR1 0x0e64 /* Scratch 1 */ +#define CPCAP_REG_SCR2 0x0e68 /* Scratch 2 */ +#define CPCAP_REG_SCR3 0x0e6c /* Scratch 3 */ + +#define CPCAP_REG_VMC 0x0eac /* Video Mux Control */ +#define CPCAP_REG_OWDC 0x0eb0 /* One Wire Device Control */ +#define CPCAP_REG_GPIO0 0x0eb4 /* GPIO 0 Control */ + +#define CPCAP_REG_GPIO1 0x0ebc /* GPIO 1 Control */ + +#define CPCAP_REG_GPIO2 0x0ec4 /* GPIO 2 Control */ + +#define CPCAP_REG_GPIO3 0x0ecc /* GPIO 3 Control */ + +#define CPCAP_REG_GPIO4 0x0ed4 /* GPIO 4 Control */ + +#define CPCAP_REG_GPIO5 0x0edc /* GPIO 5 Control */ + +#define CPCAP_REG_GPIO6 0x0ee4 /* GPIO 6 Control */ + +#define CPCAP_REG_MDLC 0x1000 /* Main Display Lighting Control */ +#define CPCAP_REG_KLC 0x1004 /* Keypad Lighting Control */ +#define CPCAP_REG_ADLC 0x1008 /* Aux Display Lighting Control */ +#define CPCAP_REG_REDC 0x100c /* Red Triode Control */ +#define CPCAP_REG_GREENC 0x1010 /* Green Triode Control */ +#define CPCAP_REG_BLUEC 0x1014 /* Blue Triode Control */ +#define CPCAP_REG_CFC 0x1018 /* Camera Flash Control */ +#define CPCAP_REG_ABC 0x101c /* Adaptive Boost Control */ +#define CPCAP_REG_BLEDC 0x1020 /* Bluetooth LED Control */ +#define CPCAP_REG_CLEDC 0x1024 /* Camera Privacy LED Control */ + +#define CPCAP_REG_OW1C 0x1200 /* One Wire 1 Command */ +#define CPCAP_REG_OW1D 0x1204 /* One Wire 1 Data */ +#define CPCAP_REG_OW1I 0x1208 /* One Wire 1 Interrupt */ +#define CPCAP_REG_OW1IE 0x120c /* One Wire 1 Interrupt Enable */ + +#define CPCAP_REG_OW1 0x1214 /* One Wire 1 Control */ + +#define CPCAP_REG_OW2C 0x1220 /* One Wire 2 Command */ +#define CPCAP_REG_OW2D 0x1224 /* One Wire 2 Data */ +#define CPCAP_REG_OW2I 0x1228 /* One Wire 2 Interrupt */ +#define CPCAP_REG_OW2IE 0x122c /* One Wire 2 Interrupt Enable */ + +#define CPCAP_REG_OW2 0x1234 /* One Wire 2 Control */ + +#define CPCAP_REG_OW3C 0x1240 /* One Wire 3 Command */ +#define CPCAP_REG_OW3D 0x1244 /* One Wire 3 Data */ +#define CPCAP_REG_OW3I 0x1248 /* One Wire 3 Interrupt */ +#define CPCAP_REG_OW3IE 0x124c /* One Wire 3 Interrupt Enable */ + +#define CPCAP_REG_OW3 0x1254 /* One Wire 3 Control */ +#define CPCAP_REG_GCAIC 0x1258 /* GCAI Clock Control */ +#define CPCAP_REG_GCAIM 0x125c /* GCAI GPIO Mode */ +#define CPCAP_REG_LGDIR 0x1260 /* LMR GCAI GPIO Direction */ +#define CPCAP_REG_LGPU 0x1264 /* LMR GCAI GPIO Pull-up */ +#define CPCAP_REG_LGPIN 0x1268 /* LMR GCAI GPIO Pin */ +#define CPCAP_REG_LGMASK 0x126c /* LMR GCAI GPIO Mask */ +#define CPCAP_REG_LDEB 0x1270 /* LMR Debounce Settings */ +#define CPCAP_REG_LGDET 0x1274 /* LMR GCAI Detach Detect */ +#define CPCAP_REG_LMISC 0x1278 /* LMR Misc Bits */ +#define CPCAP_REG_LMACE 0x127c /* LMR Mace IC Support */ + +#define CPCAP_REG_TEST 0x7c00 /* Test */ + +#define CPCAP_REG_ST_TEST1 0x7d08 /* ST Test1 */ + +#define CPCAP_REG_ST_TEST2 0x7d18 /* ST Test2 */ + +/* + * Helpers for child devices to check the revision and vendor. + * + * REVISIT: No documentation for the bits below, please update + * to use proper names for defines when available. + */ + +static inline int cpcap_get_revision(struct device *dev, + struct regmap *regmap, + u16 *revision) +{ + unsigned int val; + int ret; + + ret = regmap_read(regmap, CPCAP_REG_VERSC1, &val); + if (ret) { + dev_err(dev, "Could not read revision\n"); + + return ret; + } + + *revision = ((val >> 3) & 0x7) | ((val << 3) & 0x38); + + return 0; +} + +static inline int cpcap_get_vendor(struct device *dev, + struct regmap *regmap, + u16 *vendor) +{ + unsigned int val; + int ret; + + ret = regmap_read(regmap, CPCAP_REG_VERSC1, &val); + if (ret) { + dev_err(dev, "Could not read vendor\n"); + + return ret; + } + + *vendor = (val >> 6) & 0x7; + + return 0; +} diff --git a/include/linux/mfd/tps65910.h b/include/linux/mfd/tps65910.h index 6483a6fdce59..ffb21e79204d 100644 --- a/include/linux/mfd/tps65910.h +++ b/include/linux/mfd/tps65910.h @@ -134,6 +134,7 @@ /* RTC_CTRL_REG bitfields */ #define TPS65910_RTC_CTRL_STOP_RTC 0x01 /*0=stop, 1=run */ +#define TPS65910_RTC_CTRL_AUTO_COMP 0x04 #define TPS65910_RTC_CTRL_GET_TIME 0x40 /* RTC_STATUS_REG bitfields */ diff --git a/include/linux/mic_bus.h b/include/linux/mic_bus.h index 27d7c95fd0da..504d54c71bdb 100644 --- a/include/linux/mic_bus.h +++ b/include/linux/mic_bus.h @@ -90,7 +90,7 @@ struct mbus_hw_ops { }; struct mbus_device * -mbus_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops, +mbus_register_device(struct device *pdev, int id, const struct dma_map_ops *dma_ops, struct mbus_hw_ops *hw_ops, int index, void __iomem *mmio_va); void mbus_unregister_device(struct mbus_device *mbdev); diff --git a/include/linux/migrate.h b/include/linux/migrate.h index ae8d475a9385..fa76b516fa47 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -37,7 +37,7 @@ extern int migrate_page(struct address_space *, struct page *, struct page *, enum migrate_mode); extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, unsigned long private, enum migrate_mode mode, int reason); -extern bool isolate_movable_page(struct page *page, isolate_mode_t mode); +extern int isolate_movable_page(struct page *page, isolate_mode_t mode); extern void putback_movable_page(struct page *page); extern int migrate_prep(void); @@ -56,6 +56,8 @@ static inline int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, unsigned long private, enum migrate_mode mode, int reason) { return -ENOSYS; } +static inline int isolate_movable_page(struct page *page, isolate_mode_t mode) + { return -EBUSY; } static inline int migrate_prep(void) { return -ENOSYS; } static inline int migrate_prep_local(void) { return -ENOSYS; } diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h index 0590263c462c..762b5fec3383 100644 --- a/include/linux/miscdevice.h +++ b/include/linux/miscdevice.h @@ -32,6 +32,7 @@ #define SGI_MMTIMER 153 #define STORE_QUEUE_MINOR 155 /* unused */ #define I2O_MINOR 166 +#define HWRNG_MINOR 183 #define MICROCODE_MINOR 184 #define IRNET_MINOR 187 #define VFIO_MINOR 196 diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index 1f3568694a57..7b74afcbbab2 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h @@ -308,7 +308,7 @@ int mlx4_get_counter_stats(struct mlx4_dev *dev, int counter_index, int mlx4_get_vf_stats(struct mlx4_dev *dev, int port, int vf_idx, struct ifla_vf_stats *vf_stats); u32 mlx4_comm_get_version(void); -int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac); +int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u8 *mac); int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos, __be16 proto); int mlx4_set_vf_rate(struct mlx4_dev *dev, int port, int vf, int min_tx_rate, diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h index bd0e7075ea6d..e965e5090d96 100644 --- a/include/linux/mlx4/driver.h +++ b/include/linux/mlx4/driver.h @@ -104,4 +104,14 @@ static inline u64 mlx4_mac_to_u64(u8 *addr) return mac; } +static inline void mlx4_u64_to_mac(u8 *addr, u64 mac) +{ + int i; + + for (i = ETH_ALEN; i > 0; i--) { + addr[i - 1] = mac && 0xFF; + mac >>= 8; + } +} + #endif /* MLX4_DRIVER_H */ diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 1bc4641734da..2fcff6b4503f 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -295,6 +295,7 @@ struct mlx5_port_caps { int gid_table_len; int pkey_table_len; u8 ext_port_cap; + bool has_smi; }; struct mlx5_cmd_mailbox { @@ -1061,7 +1062,10 @@ enum { }; enum { - MAX_MR_CACHE_ENTRIES = 21, + MAX_UMR_CACHE_ENTRY = 20, + MLX5_IMR_MTT_CACHE_ENTRY, + MLX5_IMR_KSM_CACHE_ENTRY, + MAX_MR_CACHE_ENTRIES }; enum { diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index afcd4736d8df..838242697541 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -5013,7 +5013,7 @@ struct mlx5_ifc_modify_rq_out_bits { enum { MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD = 1ULL << 1, - MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_MODIFY_RQ_COUNTER_SET_ID = 1ULL << 3, + MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID = 1ULL << 3, }; struct mlx5_ifc_modify_rq_in_bits { diff --git a/include/linux/mm.h b/include/linux/mm.h index 6ff66d6fe8e2..0d65dd72c0f4 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -285,6 +285,17 @@ extern pgprot_t protection_map[16]; #define FAULT_FLAG_REMOTE 0x80 /* faulting for non current tsk/mm */ #define FAULT_FLAG_INSTRUCTION 0x100 /* The fault was during an instruction fetch */ +#define FAULT_FLAG_TRACE \ + { FAULT_FLAG_WRITE, "WRITE" }, \ + { FAULT_FLAG_MKWRITE, "MKWRITE" }, \ + { FAULT_FLAG_ALLOW_RETRY, "ALLOW_RETRY" }, \ + { FAULT_FLAG_RETRY_NOWAIT, "RETRY_NOWAIT" }, \ + { FAULT_FLAG_KILLABLE, "KILLABLE" }, \ + { FAULT_FLAG_TRIED, "TRIED" }, \ + { FAULT_FLAG_USER, "USER" }, \ + { FAULT_FLAG_REMOTE, "REMOTE" }, \ + { FAULT_FLAG_INSTRUCTION, "INSTRUCTION" } + /* * vm_fault is filled by the the pagefault handler and passed to the vma's * ->fault function. The vma's ->fault is responsible for returning a bitmask @@ -303,6 +314,9 @@ struct vm_fault { unsigned long address; /* Faulting virtual address */ pmd_t *pmd; /* Pointer to pmd entry matching * the 'address' */ + pud_t *pud; /* Pointer to pud entry matching + * the 'address' + */ pte_t orig_pte; /* Value of PTE at the time of fault */ struct page *cow_page; /* Page handler may use for COW fault */ @@ -330,6 +344,13 @@ struct vm_fault { */ }; +/* page entry size for vm->huge_fault() */ +enum page_entry_size { + PE_SIZE_PTE = 0, + PE_SIZE_PMD, + PE_SIZE_PUD, +}; + /* * These are the virtual MM functions - opening of an area, closing and * unmapping it (needed to keep files on disk up-to-date etc), pointer @@ -339,18 +360,17 @@ struct vm_operations_struct { void (*open)(struct vm_area_struct * area); void (*close)(struct vm_area_struct * area); int (*mremap)(struct vm_area_struct * area); - int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); - int (*pmd_fault)(struct vm_area_struct *, unsigned long address, - pmd_t *, unsigned int flags); + int (*fault)(struct vm_fault *vmf); + int (*huge_fault)(struct vm_fault *vmf, enum page_entry_size pe_size); void (*map_pages)(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff); /* notification that a previously read-only page is about to become * writable, if an error is returned it will cause a SIGBUS */ - int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf); + int (*page_mkwrite)(struct vm_fault *vmf); /* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */ - int (*pfn_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf); + int (*pfn_mkwrite)(struct vm_fault *vmf); /* called by access_process_vm when get_user_pages() fails, typically * for use by special VMAs that can switch between memory and hardware @@ -406,6 +426,10 @@ static inline int pmd_devmap(pmd_t pmd) { return 0; } +static inline int pud_devmap(pud_t pud) +{ + return 0; +} #endif /* @@ -1111,6 +1135,20 @@ static inline void clear_page_pfmemalloc(struct page *page) VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \ VM_FAULT_FALLBACK) +#define VM_FAULT_RESULT_TRACE \ + { VM_FAULT_OOM, "OOM" }, \ + { VM_FAULT_SIGBUS, "SIGBUS" }, \ + { VM_FAULT_MAJOR, "MAJOR" }, \ + { VM_FAULT_WRITE, "WRITE" }, \ + { VM_FAULT_HWPOISON, "HWPOISON" }, \ + { VM_FAULT_HWPOISON_LARGE, "HWPOISON_LARGE" }, \ + { VM_FAULT_SIGSEGV, "SIGSEGV" }, \ + { VM_FAULT_NOPAGE, "NOPAGE" }, \ + { VM_FAULT_LOCKED, "LOCKED" }, \ + { VM_FAULT_RETRY, "RETRY" }, \ + { VM_FAULT_FALLBACK, "FALLBACK" }, \ + { VM_FAULT_DONE_COW, "DONE_COW" } + /* Encode hstate index for a hwpoisoned large page */ #define VM_FAULT_SET_HINDEX(x) ((x) << 12) #define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf) @@ -1128,18 +1166,7 @@ extern void pagefault_out_of_memory(void); */ #define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */ -extern void show_free_areas(unsigned int flags); -extern bool skip_free_areas_node(unsigned int flags, int nid); - -int shmem_zero_setup(struct vm_area_struct *); -#ifdef CONFIG_SHMEM -bool shmem_mapping(struct address_space *mapping); -#else -static inline bool shmem_mapping(struct address_space *mapping) -{ - return false; -} -#endif +extern void show_free_areas(unsigned int flags, nodemask_t *nodemask); extern bool can_do_mlock(void); extern int user_shm_lock(size_t, struct user_struct *); @@ -1152,8 +1179,6 @@ struct zap_details { struct address_space *check_mapping; /* Check page->mapping if set */ pgoff_t first_index; /* Lowest page->index to unmap */ pgoff_t last_index; /* Highest page->index to unmap */ - bool ignore_dirty; /* Ignore dirty pages */ - bool check_swap_entries; /* Check also swap entries */ }; struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, @@ -1164,12 +1189,16 @@ struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, unsigned long size); void zap_page_range(struct vm_area_struct *vma, unsigned long address, - unsigned long size, struct zap_details *); + unsigned long size); void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma, unsigned long start, unsigned long end); /** * mm_walk - callbacks for walk_page_range + * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry + * this handler should only handle pud_trans_huge() puds. + * the pmd_entry or pte_entry callbacks will be used for + * regular PUDs. * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry * this handler is required to be able to handle * pmd_trans_huge() pmds. They may simply choose to @@ -1189,6 +1218,8 @@ void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma, * (see the comment on walk_page_range() for more details) */ struct mm_walk { + int (*pud_entry)(pud_t *pud, unsigned long addr, + unsigned long next, struct mm_walk *walk); int (*pmd_entry)(pmd_t *pmd, unsigned long addr, unsigned long next, struct mm_walk *walk); int (*pte_entry)(pte_t *pte, unsigned long addr, @@ -1359,6 +1390,16 @@ static inline bool vma_is_anonymous(struct vm_area_struct *vma) return !vma->vm_ops; } +#ifdef CONFIG_SHMEM +/* + * The vma_is_shmem is not inline because it is used only by slow + * paths in userfault. + */ +bool vma_is_shmem(struct vm_area_struct *vma); +#else +static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; } +#endif + static inline int stack_guard_page_start(struct vm_area_struct *vma, unsigned long addr) { @@ -1762,8 +1803,26 @@ static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd) return ptl; } -extern void __init pagecache_init(void); +/* + * No scalability reason to split PUD locks yet, but follow the same pattern + * as the PMD locks to make it easier if we decide to. The VM should not be + * considered ready to switch to split PUD locks yet; there may be places + * which need to be converted from page_table_lock. + */ +static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud) +{ + return &mm->page_table_lock; +} +static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud) +{ + spinlock_t *ptl = pud_lockptr(mm, pud); + + spin_lock(ptl); + return ptl; +} + +extern void __init pagecache_init(void); extern void free_area_init(unsigned long * zones_size); extern void free_area_init_node(int nid, unsigned long * zones_size, unsigned long zone_start_pfn, unsigned long *zholes_size); @@ -1900,7 +1959,7 @@ extern void setup_per_zone_wmarks(void); extern int __meminit init_per_zone_wmark_min(void); extern void mem_init(void); extern void __init mmap_init(void); -extern void show_mem(unsigned int flags); +extern void show_mem(unsigned int flags, nodemask_t *nodemask); extern long si_mem_available(void); extern void si_meminfo(struct sysinfo * val); extern void si_meminfo_node(struct sysinfo *val, int nid); @@ -1908,8 +1967,8 @@ extern void si_meminfo_node(struct sysinfo *val, int nid); extern unsigned long arch_reserved_kernel_pages(void); #endif -extern __printf(2, 3) -void warn_alloc(gfp_t gfp_mask, const char *fmt, ...); +extern __printf(3, 4) +void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...); extern void setup_per_cpu_pageset(void); @@ -1972,8 +2031,10 @@ extern struct vm_area_struct *vma_merge(struct mm_struct *, unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t, struct mempolicy *, struct vm_userfaultfd_ctx); extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *); -extern int split_vma(struct mm_struct *, - struct vm_area_struct *, unsigned long addr, int new_below); +extern int __split_vma(struct mm_struct *, struct vm_area_struct *, + unsigned long addr, int new_below); +extern int split_vma(struct mm_struct *, struct vm_area_struct *, + unsigned long addr, int new_below); extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *); extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *, struct rb_node **, struct rb_node *); @@ -2021,18 +2082,22 @@ extern int install_special_mapping(struct mm_struct *mm, extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); extern unsigned long mmap_region(struct file *file, unsigned long addr, - unsigned long len, vm_flags_t vm_flags, unsigned long pgoff); + unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, + struct list_head *uf); extern unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, - vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate); -extern int do_munmap(struct mm_struct *, unsigned long, size_t); + vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate, + struct list_head *uf); +extern int do_munmap(struct mm_struct *, unsigned long, size_t, + struct list_head *uf); static inline unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, - unsigned long pgoff, unsigned long *populate) + unsigned long pgoff, unsigned long *populate, + struct list_head *uf) { - return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate); + return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate, uf); } #ifdef CONFIG_MMU @@ -2049,6 +2114,7 @@ static inline void mm_populate(unsigned long addr, unsigned long len) {} /* These take the mm semaphore themselves */ extern int __must_check vm_brk(unsigned long, unsigned long); +extern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long); extern int vm_munmap(unsigned long, size_t); extern unsigned long __must_check vm_mmap(struct file *, unsigned long, unsigned long, unsigned long, @@ -2092,10 +2158,10 @@ extern void truncate_inode_pages_range(struct address_space *, extern void truncate_inode_pages_final(struct address_space *); /* generic vm_area_ops exported for stackable file systems */ -extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); +extern int filemap_fault(struct vm_fault *vmf); extern void filemap_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff); -extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); +extern int filemap_page_mkwrite(struct vm_fault *vmf); /* mm/page-writeback.c */ int write_one_page(struct page *page, int wait); @@ -2400,6 +2466,10 @@ extern void clear_huge_page(struct page *page, extern void copy_user_huge_page(struct page *dst, struct page *src, unsigned long addr, struct vm_area_struct *vma, unsigned int pages_per_huge_page); +extern long copy_huge_page_from_user(struct page *dst_page, + const void __user *usr_src, + unsigned int pages_per_huge_page, + bool allow_pagefault); #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ extern struct page_ext_operations debug_guardpage_ops; diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 41d376e7116d..e030a68ead7e 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -50,6 +50,13 @@ static __always_inline void add_page_to_lru_list(struct page *page, list_add(&page->lru, &lruvec->lists[lru]); } +static __always_inline void add_page_to_lru_list_tail(struct page *page, + struct lruvec *lruvec, enum lru_list lru) +{ + update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page)); + list_add_tail(&page->lru, &lruvec->lists[lru]); +} + static __always_inline void del_page_from_lru_list(struct page *page, struct lruvec *lruvec, enum lru_list lru) { diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 808751d7b737..f60f45fe226f 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -1,9 +1,9 @@ #ifndef _LINUX_MM_TYPES_H #define _LINUX_MM_TYPES_H +#include <linux/mm_types_task.h> + #include <linux/auxvec.h> -#include <linux/types.h> -#include <linux/threads.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/rbtree.h> @@ -13,7 +13,7 @@ #include <linux/uprobes.h> #include <linux/page-flags-layout.h> #include <linux/workqueue.h> -#include <asm/page.h> + #include <asm/mmu.h> #ifndef AT_VECTOR_SIZE_ARCH @@ -24,11 +24,6 @@ struct address_space; struct mem_cgroup; -#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) -#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ - IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK)) -#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8) - /* * Each physical page in the system has a struct page associated with * it to keep track of whatever it is we are using the page for at the @@ -231,17 +226,6 @@ struct page { #endif ; -struct page_frag { - struct page *page; -#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) - __u32 offset; - __u32 size; -#else - __u16 offset; - __u16 size; -#endif -}; - #define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK) #define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE) @@ -371,27 +355,6 @@ struct core_state { struct completion startup; }; -enum { - MM_FILEPAGES, /* Resident file mapping pages */ - MM_ANONPAGES, /* Resident anonymous pages */ - MM_SWAPENTS, /* Anonymous swap entries */ - MM_SHMEMPAGES, /* Resident shared memory pages */ - NR_MM_COUNTERS -}; - -#if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU) -#define SPLIT_RSS_COUNTING -/* per-thread cached information, */ -struct task_rss_stat { - int events; /* for synchronization threshold */ - int count[NR_MM_COUNTERS]; -}; -#endif /* USE_SPLIT_PTE_PTLOCKS */ - -struct mm_rss_stat { - atomic_long_t count[NR_MM_COUNTERS]; -}; - struct kioctx_table; struct mm_struct { struct vm_area_struct *mmap; /* list of VMAs */ @@ -407,8 +370,27 @@ struct mm_struct { unsigned long task_size; /* size of task vm space */ unsigned long highest_vm_end; /* highest vma end address */ pgd_t * pgd; - atomic_t mm_users; /* How many users with user space? */ - atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */ + + /** + * @mm_users: The number of users including userspace. + * + * Use mmget()/mmget_not_zero()/mmput() to modify. When this drops + * to 0 (i.e. when the task exits and there are no other temporary + * reference holders), we also release a reference on @mm_count + * (which may then free the &struct mm_struct if @mm_count also + * drops to 0). + */ + atomic_t mm_users; + + /** + * @mm_count: The number of references to &struct mm_struct + * (@mm_users count as 1). + * + * Use mmgrab()/mmdrop() to modify. When this drops to 0, the + * &struct mm_struct is freed. + */ + atomic_t mm_count; + atomic_long_t nr_ptes; /* PTE page table pages */ #if CONFIG_PGTABLE_LEVELS > 2 atomic_long_t nr_pmds; /* PMD page table pages */ @@ -515,6 +497,8 @@ struct mm_struct { struct work_struct async_put_work; }; +extern struct mm_struct init_mm; + static inline void mm_init_cpumask(struct mm_struct *mm) { #ifdef CONFIG_CPUMASK_OFFSTACK diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h new file mode 100644 index 000000000000..136dfdf63ba1 --- /dev/null +++ b/include/linux/mm_types_task.h @@ -0,0 +1,87 @@ +#ifndef _LINUX_MM_TYPES_TASK_H +#define _LINUX_MM_TYPES_TASK_H + +/* + * Here are the definitions of the MM data types that are embedded in 'struct task_struct'. + * + * (These are defined separately to decouple sched.h from mm_types.h as much as possible.) + */ + +#include <linux/types.h> +#include <linux/threads.h> +#include <linux/atomic.h> +#include <linux/cpumask.h> + +#include <asm/page.h> + +#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) +#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ + IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK)) +#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8) + +/* + * The per task VMA cache array: + */ +#define VMACACHE_BITS 2 +#define VMACACHE_SIZE (1U << VMACACHE_BITS) +#define VMACACHE_MASK (VMACACHE_SIZE - 1) + +struct vmacache { + u32 seqnum; + struct vm_area_struct *vmas[VMACACHE_SIZE]; +}; + +enum { + MM_FILEPAGES, /* Resident file mapping pages */ + MM_ANONPAGES, /* Resident anonymous pages */ + MM_SWAPENTS, /* Anonymous swap entries */ + MM_SHMEMPAGES, /* Resident shared memory pages */ + NR_MM_COUNTERS +}; + +#if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU) +#define SPLIT_RSS_COUNTING +/* per-thread cached information, */ +struct task_rss_stat { + int events; /* for synchronization threshold */ + int count[NR_MM_COUNTERS]; +}; +#endif /* USE_SPLIT_PTE_PTLOCKS */ + +struct mm_rss_stat { + atomic_long_t count[NR_MM_COUNTERS]; +}; + +struct page_frag { + struct page *page; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + __u32 offset; + __u32 size; +#else + __u16 offset; + __u16 size; +#endif +}; + +/* Track pages that require TLB flushes */ +struct tlbflush_unmap_batch { +#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH + /* + * Each bit set is a CPU that potentially has a TLB entry for one of + * the PFNs being flushed. See set_tlb_ubc_flush_pending(). + */ + struct cpumask cpumask; + + /* True if any bit in cpumask is set */ + bool flush_required; + + /* + * If true then the PTE was dirty when unmapped. The entry must be + * flushed before IO is initiated or a stale TLB entry potentially + * allows an update without redirtying the page. + */ + bool writable; +#endif +}; + +#endif /* _LINUX_MM_TYPES_TASK_H */ diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index a1a210d59961..51891fb0d3ce 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -381,6 +381,19 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) ___pmd; \ }) +#define pudp_huge_clear_flush_notify(__vma, __haddr, __pud) \ +({ \ + unsigned long ___haddr = __haddr & HPAGE_PUD_MASK; \ + struct mm_struct *___mm = (__vma)->vm_mm; \ + pud_t ___pud; \ + \ + ___pud = pudp_huge_clear_flush(__vma, __haddr, __pud); \ + mmu_notifier_invalidate_range(___mm, ___haddr, \ + ___haddr + HPAGE_PUD_SIZE); \ + \ + ___pud; \ +}) + #define pmdp_huge_get_and_clear_notify(__mm, __haddr, __pmd) \ ({ \ unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \ @@ -475,6 +488,7 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) #define pmdp_clear_young_notify pmdp_test_and_clear_young #define ptep_clear_flush_notify ptep_clear_flush #define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush +#define pudp_huge_clear_flush_notify pudp_huge_clear_flush #define pmdp_huge_get_and_clear_notify pmdp_huge_get_and_clear #define set_pte_at_notify set_pte_at diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index f4aac87adcc3..8e02b3750fe0 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -236,8 +236,6 @@ struct lruvec { #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON)) #define LRU_ALL ((1 << NR_LRU_LISTS) - 1) -/* Isolate clean file */ -#define ISOLATE_CLEAN ((__force isolate_mode_t)0x1) /* Isolate unmapped file */ #define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2) /* Isolate for asynchronous migration */ @@ -779,7 +777,7 @@ static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec) #endif } -extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru); +extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx); #ifdef CONFIG_HAVE_MEMORY_PRESENT void memory_present(int nid, unsigned long start, unsigned long end); diff --git a/include/linux/mount.h b/include/linux/mount.h index c6f55158d5e5..8e0352af06b7 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h @@ -90,6 +90,9 @@ struct file_system_type; extern struct vfsmount *vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data); +extern struct vfsmount *vfs_submount(const struct dentry *mountpoint, + struct file_system_type *type, + const char *name, void *data); extern void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list); extern void mark_mounts_for_expiry(struct list_head *mounts); diff --git a/include/linux/msi.h b/include/linux/msi.h index a83b84ff70e5..df6d59201d31 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -325,12 +325,6 @@ void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg); struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode, struct msi_domain_info *info, struct irq_domain *parent); -int pci_msi_domain_alloc_irqs(struct irq_domain *domain, struct pci_dev *dev, - int nvec, int type); -void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev); -struct irq_domain *pci_msi_create_default_irq_domain(struct fwnode_handle *fwnode, - struct msi_domain_info *info, struct irq_domain *parent); - irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev, struct msi_desc *desc); int pci_msi_domain_check_cap(struct irq_domain *domain, diff --git a/include/linux/mtd/qinfo.h b/include/linux/mtd/qinfo.h index 7b3d487d8b3f..b532ce524dae 100644 --- a/include/linux/mtd/qinfo.h +++ b/include/linux/mtd/qinfo.h @@ -14,7 +14,7 @@ * @DevId - Chip Device ID * @qinfo - pointer to qinfo records describing the chip * @numchips - number of chips including virual RWW partitions - * @chipshift - Chip/partiton size 2^chipshift + * @chipshift - Chip/partition size 2^chipshift * @chips - per-chip data structure */ struct lpddr_private { diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index f1da8c8dd473..287f34161086 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -335,7 +335,7 @@ extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *); extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr); extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr); extern int nfs_post_op_update_inode_force_wcc_locked(struct inode *inode, struct nfs_fattr *fattr); -extern int nfs_getattr(struct vfsmount *, struct dentry *, struct kstat *); +extern int nfs_getattr(const struct path *, struct kstat *, u32, unsigned int); extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *); extern void nfs_access_set_mask(struct nfs_access_entry *, u32); extern int nfs_permission(struct inode *, int); diff --git a/include/linux/nmi.h b/include/linux/nmi.h index 0a3fadc32693..aa3cd0878270 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -7,6 +7,43 @@ #include <linux/sched.h> #include <asm/irq.h> +#ifdef CONFIG_LOCKUP_DETECTOR +extern void touch_softlockup_watchdog_sched(void); +extern void touch_softlockup_watchdog(void); +extern void touch_softlockup_watchdog_sync(void); +extern void touch_all_softlockup_watchdogs(void); +extern int proc_dowatchdog_thresh(struct ctl_table *table, int write, + void __user *buffer, + size_t *lenp, loff_t *ppos); +extern unsigned int softlockup_panic; +extern unsigned int hardlockup_panic; +void lockup_detector_init(void); +#else +static inline void touch_softlockup_watchdog_sched(void) +{ +} +static inline void touch_softlockup_watchdog(void) +{ +} +static inline void touch_softlockup_watchdog_sync(void) +{ +} +static inline void touch_all_softlockup_watchdogs(void) +{ +} +static inline void lockup_detector_init(void) +{ +} +#endif + +#ifdef CONFIG_DETECT_HUNG_TASK +void reset_hung_task_detector(void); +#else +static inline void reset_hung_task_detector(void) +{ +} +#endif + /* * The run state of the lockup detectors is controlled by the content of the * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit - diff --git a/include/linux/nvme-rdma.h b/include/linux/nvme-rdma.h index bf240a3cbf99..a72fd04aa5e1 100644 --- a/include/linux/nvme-rdma.h +++ b/include/linux/nvme-rdma.h @@ -29,6 +29,30 @@ enum nvme_rdma_cm_status { NVME_RDMA_CM_INVALID_ORD = 0x08, }; +static inline const char *nvme_rdma_cm_msg(enum nvme_rdma_cm_status status) +{ + switch (status) { + case NVME_RDMA_CM_INVALID_LEN: + return "invalid length"; + case NVME_RDMA_CM_INVALID_RECFMT: + return "invalid record format"; + case NVME_RDMA_CM_INVALID_QID: + return "invalid queue ID"; + case NVME_RDMA_CM_INVALID_HSQSIZE: + return "invalid host SQ size"; + case NVME_RDMA_CM_INVALID_HRQSIZE: + return "invalid host RQ size"; + case NVME_RDMA_CM_NO_RSC: + return "resource not found"; + case NVME_RDMA_CM_INVALID_IRD: + return "invalid IRD"; + case NVME_RDMA_CM_INVALID_ORD: + return "Invalid ORD"; + default: + return "unrecognized reason"; + } +} + /** * struct nvme_rdma_cm_req - rdma connect request * diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 0b676a02cf3e..c43d435d4225 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -579,6 +579,12 @@ struct nvme_write_zeroes_cmd { __le16 appmask; }; +/* Features */ + +struct nvme_feat_auto_pst { + __le64 entries[32]; +}; + /* Admin commands */ enum nvme_admin_opcode { @@ -644,7 +650,9 @@ struct nvme_identify { __le32 nsid; __u64 rsvd2[2]; union nvme_data_ptr dptr; - __le32 cns; + __u8 cns; + __u8 rsvd3; + __le16 ctrlid; __u32 rsvd11[5]; }; diff --git a/include/linux/of_device.h b/include/linux/of_device.h index e9afbcc8de12..c12dace043f3 100644 --- a/include/linux/of_device.h +++ b/include/linux/of_device.h @@ -13,7 +13,6 @@ struct device; #ifdef CONFIG_OF extern const struct of_device_id *of_match_device( const struct of_device_id *matches, const struct device *dev); -extern void of_device_make_bus_id(struct device *dev); /** * of_driver_match_device - Tell if a driver's of_match_table matches a device. diff --git a/include/linux/of_graph.h b/include/linux/of_graph.h index bb3a5a2cd570..abdb02eaef06 100644 --- a/include/linux/of_graph.h +++ b/include/linux/of_graph.h @@ -51,6 +51,8 @@ struct device_node *of_graph_get_endpoint_by_regs( struct device_node *of_graph_get_remote_port_parent( const struct device_node *node); struct device_node *of_graph_get_remote_port(const struct device_node *node); +struct device_node *of_graph_get_remote_node(const struct device_node *node, + u32 port, u32 endpoint); #else static inline int of_graph_parse_endpoint(const struct device_node *node, @@ -89,6 +91,12 @@ static inline struct device_node *of_graph_get_remote_port( { return NULL; } +static inline struct device_node *of_graph_get_remote_node( + const struct device_node *node, + u32 port, u32 endpoint) +{ + return NULL; +} #endif /* CONFIG_OF */ diff --git a/include/linux/oom.h b/include/linux/oom.h index b4e36e92bc87..8a266e2be5a6 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -2,7 +2,7 @@ #define __INCLUDE_LINUX_OOM_H -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/types.h> #include <linux/nodemask.h> #include <uapi/linux/oom.h> diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 324c8dbad1e1..84943e8057ef 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -266,7 +266,6 @@ static inline struct page *find_get_page_flags(struct address_space *mapping, /** * find_lock_page - locate, pin and lock a pagecache page - * pagecache_get_page - find and get a page reference * @mapping: the address_space to search * @offset: the page index * @@ -482,19 +481,11 @@ static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm, } /* - * This is exported only for wait_on_page_locked/wait_on_page_writeback, - * and for filesystems which need to wait on PG_private. + * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc., + * and should not be used directly. */ extern void wait_on_page_bit(struct page *page, int bit_nr); extern int wait_on_page_bit_killable(struct page *page, int bit_nr); -extern void wake_up_page_bit(struct page *page, int bit_nr); - -static inline void wake_up_page(struct page *page, int bit) -{ - if (!PageWaiters(page)) - return; - wake_up_page_bit(page, bit); -} /* * Wait for a page to be unlocked. diff --git a/include/linux/pci.h b/include/linux/pci.h index adbc859fe7c4..eb3da1a04e6c 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -678,9 +678,6 @@ struct pci_error_handlers { /* MMIO has been re-enabled, but not DMA */ pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev); - /* PCI Express link has been reset */ - pci_ers_result_t (*link_reset)(struct pci_dev *dev); - /* PCI slot has been reset */ pci_ers_result_t (*slot_reset)(struct pci_dev *dev); @@ -1308,14 +1305,7 @@ void pci_msix_shutdown(struct pci_dev *dev); void pci_disable_msix(struct pci_dev *dev); void pci_restore_msi_state(struct pci_dev *dev); int pci_msi_enabled(void); -int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec); -static inline int pci_enable_msi_exact(struct pci_dev *dev, int nvec) -{ - int rc = pci_enable_msi_range(dev, nvec, nvec); - if (rc < 0) - return rc; - return 0; -} +int pci_enable_msi(struct pci_dev *dev); int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, int minvec, int maxvec); static inline int pci_enable_msix_exact(struct pci_dev *dev, @@ -1333,6 +1323,7 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, void pci_free_irq_vectors(struct pci_dev *dev); int pci_irq_vector(struct pci_dev *dev, unsigned int nr); const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec); +int pci_irq_get_node(struct pci_dev *pdev, int vec); #else static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; } @@ -1346,10 +1337,7 @@ static inline void pci_msix_shutdown(struct pci_dev *dev) { } static inline void pci_disable_msix(struct pci_dev *dev) { } static inline void pci_restore_msi_state(struct pci_dev *dev) { } static inline int pci_msi_enabled(void) { return 0; } -static inline int pci_enable_msi_range(struct pci_dev *dev, int minvec, - int maxvec) -{ return -ENOSYS; } -static inline int pci_enable_msi_exact(struct pci_dev *dev, int nvec) +static inline int pci_enable_msi(struct pci_dev *dev) { return -ENOSYS; } static inline int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, int minvec, int maxvec) @@ -1383,6 +1371,11 @@ static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, { return cpu_possible_mask; } + +static inline int pci_irq_get_node(struct pci_dev *pdev, int vec) +{ + return first_online_node; +} #endif static inline int @@ -1425,8 +1418,6 @@ static inline void pcie_set_ecrc_checking(struct pci_dev *dev) { } static inline void pcie_ecrc_get_policy(char *str) { } #endif -#define pci_enable_msi(pdev) pci_enable_msi_exact(pdev, 1) - #ifdef CONFIG_HT_IRQ /* The functions a driver should call */ int ht_create_irq(struct pci_dev *dev, int idx); diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 73dda0edcb97..a4f77feecbb0 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2516,6 +2516,8 @@ #define PCI_DEVICE_ID_KORENIX_JETCARDF2 0x1700 #define PCI_DEVICE_ID_KORENIX_JETCARDF3 0x17ff +#define PCI_VENDOR_ID_HUAWEI 0x19e5 + #define PCI_VENDOR_ID_NETRONOME 0x19ee #define PCI_DEVICE_ID_NETRONOME_NFP3200 0x3200 #define PCI_DEVICE_ID_NETRONOME_NFP3240 0x3240 diff --git a/include/linux/perf_regs.h b/include/linux/perf_regs.h index a5f98d53d732..9b7dd59fe28d 100644 --- a/include/linux/perf_regs.h +++ b/include/linux/perf_regs.h @@ -1,6 +1,8 @@ #ifndef _LINUX_PERF_REGS_H #define _LINUX_PERF_REGS_H +#include <linux/sched/task_stack.h> + struct perf_regs { __u64 abi; struct pt_regs *regs; diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h index a3d90b9da18d..a49b3259cad7 100644 --- a/include/linux/pfn_t.h +++ b/include/linux/pfn_t.h @@ -15,6 +15,12 @@ #define PFN_DEV (1ULL << (BITS_PER_LONG_LONG - 3)) #define PFN_MAP (1ULL << (BITS_PER_LONG_LONG - 4)) +#define PFN_FLAGS_TRACE \ + { PFN_SG_CHAIN, "SG_CHAIN" }, \ + { PFN_SG_LAST, "SG_LAST" }, \ + { PFN_DEV, "DEV" }, \ + { PFN_MAP, "MAP" } + static inline pfn_t __pfn_to_pfn_t(unsigned long pfn, u64 flags) { pfn_t pfn_t = { .val = pfn | (flags & PFN_FLAGS_MASK), }; @@ -84,6 +90,13 @@ static inline pmd_t pfn_t_pmd(pfn_t pfn, pgprot_t pgprot) { return pfn_pmd(pfn_t_to_pfn(pfn), pgprot); } + +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD +static inline pud_t pfn_t_pud(pfn_t pfn, pgprot_t pgprot) +{ + return pfn_pud(pfn_t_to_pfn(pfn), pgprot); +} +#endif #endif #ifdef __HAVE_ARCH_PTE_DEVMAP @@ -100,5 +113,10 @@ static inline bool pfn_t_devmap(pfn_t pfn) } pte_t pte_mkdevmap(pte_t pte); pmd_t pmd_mkdevmap(pmd_t pmd); +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ + defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) +pud_t pud_mkdevmap(pud_t pud); #endif +#endif /* __HAVE_ARCH_PTE_DEVMAP */ + #endif /* _LINUX_PFN_T_H_ */ diff --git a/include/linux/pid.h b/include/linux/pid.h index 23705a53abba..4d179316e431 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h @@ -1,7 +1,7 @@ #ifndef _LINUX_PID_H #define _LINUX_PID_H -#include <linux/rcupdate.h> +#include <linux/rculist.h> enum pid_type { @@ -191,10 +191,10 @@ pid_t pid_vnr(struct pid *pid); #define do_each_pid_thread(pid, type, task) \ do_each_pid_task(pid, type, task) { \ struct task_struct *tg___ = task; \ - do { + for_each_thread(tg___, task) { #define while_each_pid_thread(pid, type, task) \ - } while_each_thread(tg___, task); \ + } \ task = tg___; \ } while_each_pid_task(pid, type, task) #endif /* _LINUX_PID_H */ diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h index 34cce96741bc..c2a989dee876 100644 --- a/include/linux/pid_namespace.h +++ b/include/linux/pid_namespace.h @@ -21,6 +21,12 @@ struct pidmap { struct fs_pin; +enum { /* definitions for pid_namespace's hide_pid field */ + HIDEPID_OFF = 0, + HIDEPID_NO_ACCESS = 1, + HIDEPID_INVISIBLE = 2, +}; + struct pid_namespace { struct kref kref; struct pidmap pidmap[PIDMAP_ENTRIES]; diff --git a/include/linux/platform_data/asoc-s3c.h b/include/linux/platform_data/asoc-s3c.h index 15bf56ee8af7..90641a5daaf0 100644 --- a/include/linux/platform_data/asoc-s3c.h +++ b/include/linux/platform_data/asoc-s3c.h @@ -18,7 +18,7 @@ extern void s3c64xx_ac97_setup_gpio(int); -struct samsung_i2s { +struct samsung_i2s_type { /* If the Primary DAI has 5.1 Channels */ #define QUIRK_PRI_6CHAN (1 << 0) /* If the I2S block has a Stereo Overlay Channel */ @@ -47,7 +47,5 @@ struct s3c_audio_pdata { void *dma_capture; void *dma_play_sec; void *dma_capture_mic; - union { - struct samsung_i2s i2s; - } type; + struct samsung_i2s_type type; }; diff --git a/include/linux/platform_data/gpio-davinci.h b/include/linux/platform_data/gpio-davinci.h index 6ace3fd32b6a..90ae19ca828f 100644 --- a/include/linux/platform_data/gpio-davinci.h +++ b/include/linux/platform_data/gpio-davinci.h @@ -21,23 +21,28 @@ #include <asm-generic/gpio.h> +#define MAX_REGS_BANKS 5 + struct davinci_gpio_platform_data { u32 ngpio; u32 gpio_unbanked; }; +struct davinci_gpio_irq_data { + void __iomem *regs; + struct davinci_gpio_controller *chip; + int bank_num; +}; struct davinci_gpio_controller { struct gpio_chip chip; struct irq_domain *irq_domain; /* Serialize access to GPIO registers */ spinlock_t lock; - void __iomem *regs; - void __iomem *set_data; - void __iomem *clr_data; - void __iomem *in_data; + void __iomem *regs[MAX_REGS_BANKS]; int gpio_unbanked; - unsigned gpio_irq; + unsigned int base_irq; + unsigned int base; }; /* diff --git a/include/linux/platform_data/rtc-m48t86.h b/include/linux/platform_data/rtc-m48t86.h deleted file mode 100644 index 915d6b4f0f89..000000000000 --- a/include/linux/platform_data/rtc-m48t86.h +++ /dev/null @@ -1,16 +0,0 @@ -/* - * ST M48T86 / Dallas DS12887 RTC driver - * Copyright (c) 2006 Tower Technologies - * - * Author: Alessandro Zummo <a.zummo@towertech.it> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. -*/ - -struct m48t86_ops -{ - void (*writebyte)(unsigned char value, unsigned long addr); - unsigned char (*readbyte)(unsigned long addr); -}; diff --git a/include/linux/platform_data/video-imxfb.h b/include/linux/platform_data/video-imxfb.h index 18e908324549..a5c0a71ec914 100644 --- a/include/linux/platform_data/video-imxfb.h +++ b/include/linux/platform_data/video-imxfb.h @@ -47,10 +47,6 @@ #define LSCR1_GRAY2(x) (((x) & 0xf) << 4) #define LSCR1_GRAY1(x) (((x) & 0xf)) -#define DMACR_BURST (1 << 31) -#define DMACR_HM(x) (((x) & 0xf) << 16) -#define DMACR_TM(x) ((x) & 0xf) - struct imx_fb_videomode { struct fb_videomode mode; u32 pcr; diff --git a/include/linux/platform_data/x86/clk-pmc-atom.h b/include/linux/platform_data/x86/clk-pmc-atom.h new file mode 100644 index 000000000000..3ab892208343 --- /dev/null +++ b/include/linux/platform_data/x86/clk-pmc-atom.h @@ -0,0 +1,44 @@ +/* + * Intel Atom platform clocks for BayTrail and CherryTrail SoC. + * + * Copyright (C) 2016, Intel Corporation + * Author: Irina Tirdea <irina.tirdea@intel.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __PLATFORM_DATA_X86_CLK_PMC_ATOM_H +#define __PLATFORM_DATA_X86_CLK_PMC_ATOM_H + +/** + * struct pmc_clk - PMC platform clock configuration + * + * @name: identified, typically pmc_plt_clk_<x>, x=[0..5] + * @freq: in Hz, 19.2MHz and 25MHz (Baytrail only) supported + * @parent_name: one of 'xtal' or 'osc' + */ +struct pmc_clk { + const char *name; + unsigned long freq; + const char *parent_name; +}; + +/** + * struct pmc_clk_data - common PMC clock configuration + * + * @base: PMC clock register base offset + * @clks: pointer to set of registered clocks, typically 0..5 + */ +struct pmc_clk_data { + void __iomem *base; + const struct pmc_clk *clks; +}; + +#endif /* __PLATFORM_DATA_X86_CLK_PMC_ATOM_H */ diff --git a/include/linux/platform_data/x86/pmc_atom.h b/include/linux/platform_data/x86/pmc_atom.h new file mode 100644 index 000000000000..e4905fe69c38 --- /dev/null +++ b/include/linux/platform_data/x86/pmc_atom.h @@ -0,0 +1,158 @@ +/* + * Intel Atom SOC Power Management Controller Header File + * Copyright (c) 2014, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef PMC_ATOM_H +#define PMC_ATOM_H + +/* ValleyView Power Control Unit PCI Device ID */ +#define PCI_DEVICE_ID_VLV_PMC 0x0F1C +/* CherryTrail Power Control Unit PCI Device ID */ +#define PCI_DEVICE_ID_CHT_PMC 0x229C + +/* PMC Memory mapped IO registers */ +#define PMC_BASE_ADDR_OFFSET 0x44 +#define PMC_BASE_ADDR_MASK 0xFFFFFE00 +#define PMC_MMIO_REG_LEN 0x100 +#define PMC_REG_BIT_WIDTH 32 + +/* BIOS uses FUNC_DIS to disable specific function */ +#define PMC_FUNC_DIS 0x34 +#define PMC_FUNC_DIS_2 0x38 + +/* CHT specific bits in FUNC_DIS2 register */ +#define BIT_FD_GMM BIT(3) +#define BIT_FD_ISH BIT(4) + +/* S0ix wake event control */ +#define PMC_S0IX_WAKE_EN 0x3C + +#define BIT_LPC_CLOCK_RUN BIT(4) +#define BIT_SHARED_IRQ_GPSC BIT(5) +#define BIT_ORED_DEDICATED_IRQ_GPSS BIT(18) +#define BIT_ORED_DEDICATED_IRQ_GPSC BIT(19) +#define BIT_SHARED_IRQ_GPSS BIT(20) + +#define PMC_WAKE_EN_SETTING ~(BIT_LPC_CLOCK_RUN | \ + BIT_SHARED_IRQ_GPSC | \ + BIT_ORED_DEDICATED_IRQ_GPSS | \ + BIT_ORED_DEDICATED_IRQ_GPSC | \ + BIT_SHARED_IRQ_GPSS) + +/* The timers accumulate time spent in sleep state */ +#define PMC_S0IR_TMR 0x80 +#define PMC_S0I1_TMR 0x84 +#define PMC_S0I2_TMR 0x88 +#define PMC_S0I3_TMR 0x8C +#define PMC_S0_TMR 0x90 +/* Sleep state counter is in units of of 32us */ +#define PMC_TMR_SHIFT 5 + +/* Power status of power islands */ +#define PMC_PSS 0x98 + +#define PMC_PSS_BIT_GBE BIT(0) +#define PMC_PSS_BIT_SATA BIT(1) +#define PMC_PSS_BIT_HDA BIT(2) +#define PMC_PSS_BIT_SEC BIT(3) +#define PMC_PSS_BIT_PCIE BIT(4) +#define PMC_PSS_BIT_LPSS BIT(5) +#define PMC_PSS_BIT_LPE BIT(6) +#define PMC_PSS_BIT_DFX BIT(7) +#define PMC_PSS_BIT_USH_CTRL BIT(8) +#define PMC_PSS_BIT_USH_SUS BIT(9) +#define PMC_PSS_BIT_USH_VCCS BIT(10) +#define PMC_PSS_BIT_USH_VCCA BIT(11) +#define PMC_PSS_BIT_OTG_CTRL BIT(12) +#define PMC_PSS_BIT_OTG_VCCS BIT(13) +#define PMC_PSS_BIT_OTG_VCCA_CLK BIT(14) +#define PMC_PSS_BIT_OTG_VCCA BIT(15) +#define PMC_PSS_BIT_USB BIT(16) +#define PMC_PSS_BIT_USB_SUS BIT(17) + +/* CHT specific bits in PSS register */ +#define PMC_PSS_BIT_CHT_UFS BIT(7) +#define PMC_PSS_BIT_CHT_UXD BIT(11) +#define PMC_PSS_BIT_CHT_UXD_FD BIT(12) +#define PMC_PSS_BIT_CHT_UX_ENG BIT(15) +#define PMC_PSS_BIT_CHT_USB_SUS BIT(16) +#define PMC_PSS_BIT_CHT_GMM BIT(17) +#define PMC_PSS_BIT_CHT_ISH BIT(18) +#define PMC_PSS_BIT_CHT_DFX_MASTER BIT(26) +#define PMC_PSS_BIT_CHT_DFX_CLUSTER1 BIT(27) +#define PMC_PSS_BIT_CHT_DFX_CLUSTER2 BIT(28) +#define PMC_PSS_BIT_CHT_DFX_CLUSTER3 BIT(29) +#define PMC_PSS_BIT_CHT_DFX_CLUSTER4 BIT(30) +#define PMC_PSS_BIT_CHT_DFX_CLUSTER5 BIT(31) + +/* These registers reflect D3 status of functions */ +#define PMC_D3_STS_0 0xA0 + +#define BIT_LPSS1_F0_DMA BIT(0) +#define BIT_LPSS1_F1_PWM1 BIT(1) +#define BIT_LPSS1_F2_PWM2 BIT(2) +#define BIT_LPSS1_F3_HSUART1 BIT(3) +#define BIT_LPSS1_F4_HSUART2 BIT(4) +#define BIT_LPSS1_F5_SPI BIT(5) +#define BIT_LPSS1_F6_XXX BIT(6) +#define BIT_LPSS1_F7_XXX BIT(7) +#define BIT_SCC_EMMC BIT(8) +#define BIT_SCC_SDIO BIT(9) +#define BIT_SCC_SDCARD BIT(10) +#define BIT_SCC_MIPI BIT(11) +#define BIT_HDA BIT(12) +#define BIT_LPE BIT(13) +#define BIT_OTG BIT(14) +#define BIT_USH BIT(15) +#define BIT_GBE BIT(16) +#define BIT_SATA BIT(17) +#define BIT_USB_EHCI BIT(18) +#define BIT_SEC BIT(19) +#define BIT_PCIE_PORT0 BIT(20) +#define BIT_PCIE_PORT1 BIT(21) +#define BIT_PCIE_PORT2 BIT(22) +#define BIT_PCIE_PORT3 BIT(23) +#define BIT_LPSS2_F0_DMA BIT(24) +#define BIT_LPSS2_F1_I2C1 BIT(25) +#define BIT_LPSS2_F2_I2C2 BIT(26) +#define BIT_LPSS2_F3_I2C3 BIT(27) +#define BIT_LPSS2_F4_I2C4 BIT(28) +#define BIT_LPSS2_F5_I2C5 BIT(29) +#define BIT_LPSS2_F6_I2C6 BIT(30) +#define BIT_LPSS2_F7_I2C7 BIT(31) + +#define PMC_D3_STS_1 0xA4 +#define BIT_SMB BIT(0) +#define BIT_OTG_SS_PHY BIT(1) +#define BIT_USH_SS_PHY BIT(2) +#define BIT_DFX BIT(3) + +/* CHT specific bits in PMC_D3_STS_1 register */ +#define BIT_STS_GMM BIT(1) +#define BIT_STS_ISH BIT(2) + +/* PMC I/O Registers */ +#define ACPI_BASE_ADDR_OFFSET 0x40 +#define ACPI_BASE_ADDR_MASK 0xFFFFFE00 +#define ACPI_MMIO_REG_LEN 0x100 + +#define PM1_CNT 0x4 +#define SLEEP_TYPE_MASK 0xFFFFECFF +#define SLEEP_TYPE_S5 0x1C00 +#define SLEEP_ENABLE 0x2000 + +extern int pmc_atom_read(int offset, u32 *value); +extern int pmc_atom_write(int offset, u32 value); + +#endif /* PMC_ATOM_H */ diff --git a/include/linux/pm.h b/include/linux/pm.h index f926af41e122..a0894bc52bb4 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -64,24 +64,7 @@ typedef struct pm_message { } pm_message_t; /** - * struct dev_pm_ops - device PM callbacks - * - * Several device power state transitions are externally visible, affecting - * the state of pending I/O queues and (for drivers that touch hardware) - * interrupts, wakeups, DMA, and other hardware state. There may also be - * internal transitions to various low-power modes which are transparent - * to the rest of the driver stack (such as a driver that's ON gating off - * clocks which are not in active use). - * - * The externally visible transitions are handled with the help of callbacks - * included in this structure in such a way that two levels of callbacks are - * involved. First, the PM core executes callbacks provided by PM domains, - * device types, classes and bus types. They are the subsystem-level callbacks - * supposed to execute callbacks provided by device drivers, although they may - * choose not to do that. If the driver callbacks are executed, they have to - * collaborate with the subsystem-level callbacks to achieve the goals - * appropriate for the given system transition, given transition phase and the - * subsystem the device belongs to. + * struct dev_pm_ops - device PM callbacks. * * @prepare: The principal role of this callback is to prevent new children of * the device from being registered after it has returned (the driver's @@ -240,34 +223,6 @@ typedef struct pm_message { * driver's interrupt handler, which is guaranteed not to run while * @restore_noirq() is being executed. Analogous to @resume_noirq(). * - * All of the above callbacks, except for @complete(), return error codes. - * However, the error codes returned by the resume operations, @resume(), - * @thaw(), @restore(), @resume_noirq(), @thaw_noirq(), and @restore_noirq(), do - * not cause the PM core to abort the resume transition during which they are - * returned. The error codes returned in those cases are only printed by the PM - * core to the system logs for debugging purposes. Still, it is recommended - * that drivers only return error codes from their resume methods in case of an - * unrecoverable failure (i.e. when the device being handled refuses to resume - * and becomes unusable) to allow us to modify the PM core in the future, so - * that it can avoid attempting to handle devices that failed to resume and - * their children. - * - * It is allowed to unregister devices while the above callbacks are being - * executed. However, a callback routine must NOT try to unregister the device - * it was called for, although it may unregister children of that device (for - * example, if it detects that a child was unplugged while the system was - * asleep). - * - * Refer to Documentation/power/admin-guide/devices.rst for more information about the role - * of the above callbacks in the system suspend process. - * - * There also are callbacks related to runtime power management of devices. - * Again, these callbacks are executed by the PM core only for subsystems - * (PM domains, device types, classes and bus types) and the subsystem-level - * callbacks are supposed to invoke the driver callbacks. Moreover, the exact - * actions to be performed by a device driver's callbacks generally depend on - * the platform and subsystem the device belongs to. - * * @runtime_suspend: Prepare the device for a condition in which it won't be * able to communicate with the CPU(s) and RAM due to power management. * This need not mean that the device should be put into a low-power state. @@ -287,11 +242,51 @@ typedef struct pm_message { * Check these conditions, and return 0 if it's appropriate to let the PM * core queue a suspend request for the device. * - * Refer to Documentation/power/runtime_pm.txt for more information about the - * role of the above callbacks in device runtime power management. + * Several device power state transitions are externally visible, affecting + * the state of pending I/O queues and (for drivers that touch hardware) + * interrupts, wakeups, DMA, and other hardware state. There may also be + * internal transitions to various low-power modes which are transparent + * to the rest of the driver stack (such as a driver that's ON gating off + * clocks which are not in active use). * + * The externally visible transitions are handled with the help of callbacks + * included in this structure in such a way that, typically, two levels of + * callbacks are involved. First, the PM core executes callbacks provided by PM + * domains, device types, classes and bus types. They are the subsystem-level + * callbacks expected to execute callbacks provided by device drivers, although + * they may choose not to do that. If the driver callbacks are executed, they + * have to collaborate with the subsystem-level callbacks to achieve the goals + * appropriate for the given system transition, given transition phase and the + * subsystem the device belongs to. + * + * All of the above callbacks, except for @complete(), return error codes. + * However, the error codes returned by @resume(), @thaw(), @restore(), + * @resume_noirq(), @thaw_noirq(), and @restore_noirq(), do not cause the PM + * core to abort the resume transition during which they are returned. The + * error codes returned in those cases are only printed to the system logs for + * debugging purposes. Still, it is recommended that drivers only return error + * codes from their resume methods in case of an unrecoverable failure (i.e. + * when the device being handled refuses to resume and becomes unusable) to + * allow the PM core to be modified in the future, so that it can avoid + * attempting to handle devices that failed to resume and their children. + * + * It is allowed to unregister devices while the above callbacks are being + * executed. However, a callback routine MUST NOT try to unregister the device + * it was called for, although it may unregister children of that device (for + * example, if it detects that a child was unplugged while the system was + * asleep). + * + * There also are callbacks related to runtime power management of devices. + * Again, as a rule these callbacks are executed by the PM core for subsystems + * (PM domains, device types, classes and bus types) and the subsystem-level + * callbacks are expected to invoke the driver callbacks. Moreover, the exact + * actions to be performed by a device driver's callbacks generally depend on + * the platform and subsystem the device belongs to. + * + * Refer to Documentation/power/runtime_pm.txt for more information about the + * role of the @runtime_suspend(), @runtime_resume() and @runtime_idle() + * callbacks in device runtime power management. */ - struct dev_pm_ops { int (*prepare)(struct device *dev); void (*complete)(struct device *dev); @@ -391,7 +386,7 @@ const struct dev_pm_ops name = { \ SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ } -/** +/* * PM_EVENT_ messages * * The following PM_EVENT_ messages are defined for the internal use of the PM @@ -487,7 +482,7 @@ const struct dev_pm_ops name = { \ #define PMSG_IS_AUTO(msg) (((msg).event & PM_EVENT_AUTO) != 0) -/** +/* * Device run-time power management status. * * These status labels are used internally by the PM core to indicate the @@ -517,7 +512,7 @@ enum rpm_status { RPM_SUSPENDING, }; -/** +/* * Device run-time power management request types. * * RPM_REQ_NONE Do nothing. @@ -616,15 +611,18 @@ extern void update_pm_runtime_accounting(struct device *dev); extern int dev_pm_get_subsys_data(struct device *dev); extern void dev_pm_put_subsys_data(struct device *dev); -/* - * Power domains provide callbacks that are executed during system suspend, - * hibernation, system resume and during runtime PM transitions along with - * subsystem-level and driver-level callbacks. +/** + * struct dev_pm_domain - power management domain representation. * + * @ops: Power management operations associated with this domain. * @detach: Called when removing a device from the domain. * @activate: Called before executing probe routines for bus types and drivers. * @sync: Called after successful driver probe. * @dismiss: Called after unsuccessful driver probe and after driver removal. + * + * Power domains provide callbacks that are executed during system suspend, + * hibernation, system resume and during runtime PM transitions instead of + * subsystem-level and driver-level callbacks. */ struct dev_pm_domain { struct dev_pm_ops ops; diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h index d4d34791e463..032b55909145 100644 --- a/include/linux/pm_qos.h +++ b/include/linux/pm_qos.h @@ -146,8 +146,6 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier); int dev_pm_qos_remove_notifier(struct device *dev, struct notifier_block *notifier); -int dev_pm_qos_add_global_notifier(struct notifier_block *notifier); -int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier); void dev_pm_qos_constraints_init(struct device *dev); void dev_pm_qos_constraints_destroy(struct device *dev); int dev_pm_qos_add_ancestor_request(struct device *dev, @@ -172,6 +170,12 @@ static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return dev->power.qos->flags_req->data.flr.flags; } + +static inline s32 dev_pm_qos_raw_read_value(struct device *dev) +{ + return IS_ERR_OR_NULL(dev->power.qos) ? + 0 : pm_qos_read_value(&dev->power.qos->resume_latency); +} #else static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask) @@ -199,12 +203,6 @@ static inline int dev_pm_qos_add_notifier(struct device *dev, static inline int dev_pm_qos_remove_notifier(struct device *dev, struct notifier_block *notifier) { return 0; } -static inline int dev_pm_qos_add_global_notifier( - struct notifier_block *notifier) - { return 0; } -static inline int dev_pm_qos_remove_global_notifier( - struct notifier_block *notifier) - { return 0; } static inline void dev_pm_qos_constraints_init(struct device *dev) { dev->power.power_state = PMSG_ON; @@ -236,6 +234,7 @@ static inline void dev_pm_qos_hide_latency_tolerance(struct device *dev) {} static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) { return 0; } static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; } +static inline s32 dev_pm_qos_raw_read_value(struct device *dev) { return 0; } #endif #endif diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 7eeceac52dea..cae461224948 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -55,6 +55,27 @@ /* We use the MSB mostly because its available */ #define PREEMPT_NEED_RESCHED 0x80000000 +#define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) + +/* + * Disable preemption until the scheduler is running -- use an unconditional + * value so that it also works on !PREEMPT_COUNT kernels. + * + * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count(). + */ +#define INIT_PREEMPT_COUNT PREEMPT_OFFSET + +/* + * Initial preempt_count value; reflects the preempt_count schedule invariant + * which states that during context switches: + * + * preempt_count() == 2*PREEMPT_DISABLE_OFFSET + * + * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels. + * Note: See finish_task_switch(). + */ +#define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) + /* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */ #include <asm/preempt.h> diff --git a/include/linux/prime_numbers.h b/include/linux/prime_numbers.h new file mode 100644 index 000000000000..14ec4f567342 --- /dev/null +++ b/include/linux/prime_numbers.h @@ -0,0 +1,37 @@ +#ifndef __LINUX_PRIME_NUMBERS_H +#define __LINUX_PRIME_NUMBERS_H + +#include <linux/types.h> + +bool is_prime_number(unsigned long x); +unsigned long next_prime_number(unsigned long x); + +/** + * for_each_prime_number - iterate over each prime upto a value + * @prime: the current prime number in this iteration + * @max: the upper limit + * + * Starting from the first prime number 2 iterate over each prime number up to + * the @max value. On each iteration, @prime is set to the current prime number. + * @max should be less than ULONG_MAX to ensure termination. To begin with + * @prime set to 1 on the first iteration use for_each_prime_number_from() + * instead. + */ +#define for_each_prime_number(prime, max) \ + for_each_prime_number_from((prime), 2, (max)) + +/** + * for_each_prime_number_from - iterate over each prime upto a value + * @prime: the current prime number in this iteration + * @from: the initial value + * @max: the upper limit + * + * Starting from @from iterate over each successive prime number up to the + * @max value. On each iteration, @prime is set to the current prime number. + * @max should be less than ULONG_MAX, and @from less than @max, to ensure + * termination. + */ +#define for_each_prime_number_from(prime, from, max) \ + for (prime = (from); prime <= (max); prime = next_prime_number(prime)) + +#endif /* !__LINUX_PRIME_NUMBERS_H */ diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index e0e539321ab9..422bc2e4cb6a 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -3,6 +3,7 @@ #include <linux/compiler.h> /* For unlikely. */ #include <linux/sched.h> /* For struct task_struct. */ +#include <linux/sched/signal.h> /* For send_sig(), same_thread_group(), etc. */ #include <linux/err.h> /* for IS_ERR_VALUE */ #include <linux/bug.h> /* For BUG_ON. */ #include <linux/pid_namespace.h> /* For task_active_pid_ns. */ diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 2c6c5114c089..08fad7c6a471 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -287,8 +287,6 @@ struct pwm_ops { * @pwms: array of PWM devices allocated by the framework * @of_xlate: request a PWM device given a device tree PWM specifier * @of_pwm_n_cells: number of cells expected in the device tree PWM specifier - * @can_sleep: must be true if the .config(), .enable() or .disable() - * operations may sleep */ struct pwm_chip { struct device *dev; @@ -302,7 +300,6 @@ struct pwm_chip { struct pwm_device * (*of_xlate)(struct pwm_chip *pc, const struct of_phandle_args *args); unsigned int of_pwm_n_cells; - bool can_sleep; }; /** @@ -451,8 +448,6 @@ struct pwm_device *devm_pwm_get(struct device *dev, const char *con_id); struct pwm_device *devm_of_pwm_get(struct device *dev, struct device_node *np, const char *con_id); void devm_pwm_put(struct device *dev, struct pwm_device *pwm); - -bool pwm_can_sleep(struct pwm_device *pwm); #else static inline struct pwm_device *pwm_request(int pwm_id, const char *label) { @@ -566,11 +561,6 @@ static inline struct pwm_device *devm_of_pwm_get(struct device *dev, static inline void devm_pwm_put(struct device *dev, struct pwm_device *pwm) { } - -static inline bool pwm_can_sleep(struct pwm_device *pwm) -{ - return false; -} #endif static inline void pwm_apply_args(struct pwm_device *pwm) @@ -613,18 +603,25 @@ struct pwm_lookup { const char *con_id; unsigned int period; enum pwm_polarity polarity; + const char *module; /* optional, may be NULL */ }; -#define PWM_LOOKUP(_provider, _index, _dev_id, _con_id, _period, _polarity) \ - { \ - .provider = _provider, \ - .index = _index, \ - .dev_id = _dev_id, \ - .con_id = _con_id, \ - .period = _period, \ - .polarity = _polarity \ +#define PWM_LOOKUP_WITH_MODULE(_provider, _index, _dev_id, _con_id, \ + _period, _polarity, _module) \ + { \ + .provider = _provider, \ + .index = _index, \ + .dev_id = _dev_id, \ + .con_id = _con_id, \ + .period = _period, \ + .polarity = _polarity, \ + .module = _module, \ } +#define PWM_LOOKUP(_provider, _index, _dev_id, _con_id, _period, _polarity) \ + PWM_LOOKUP_WITH_MODULE(_provider, _index, _dev_id, _con_id, _period, \ + _polarity, NULL) + #if IS_ENABLED(CONFIG_PWM) void pwm_add_table(struct pwm_lookup *table, size_t num); void pwm_remove_table(struct pwm_lookup *table, size_t num); diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h index cc32ab852fbc..d32f6f1a5225 100644 --- a/include/linux/qcom_scm.h +++ b/include/linux/qcom_scm.h @@ -13,9 +13,9 @@ #ifndef __QCOM_SCM_H #define __QCOM_SCM_H -extern int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus); -extern int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus); - +#define QCOM_SCM_VERSION(major, minor) (((major) << 16) | ((minor) & 0xFF)) +#define QCOM_SCM_CPU_PWR_DOWN_L2_ON 0x0 +#define QCOM_SCM_CPU_PWR_DOWN_L2_OFF 0x1 #define QCOM_SCM_HDCP_MAX_REQ_CNT 5 struct qcom_scm_hdcp_req { @@ -23,27 +23,49 @@ struct qcom_scm_hdcp_req { u32 val; }; +#if IS_ENABLED(CONFIG_QCOM_SCM) +extern int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus); +extern int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus); extern bool qcom_scm_is_available(void); - extern bool qcom_scm_hdcp_available(void); extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, - u32 *resp); - + u32 *resp); extern bool qcom_scm_pas_supported(u32 peripheral); extern int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, - size_t size); + size_t size); extern int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, - phys_addr_t size); + phys_addr_t size); extern int qcom_scm_pas_auth_and_reset(u32 peripheral); extern int qcom_scm_pas_shutdown(u32 peripheral); - -#define QCOM_SCM_CPU_PWR_DOWN_L2_ON 0x0 -#define QCOM_SCM_CPU_PWR_DOWN_L2_OFF 0x1 - extern void qcom_scm_cpu_power_down(u32 flags); - -#define QCOM_SCM_VERSION(major, minor) (((major) << 16) | ((minor) & 0xFF)) - extern u32 qcom_scm_get_version(void); - +extern int qcom_scm_set_remote_state(u32 state, u32 id); +#else +static inline +int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) +{ + return -ENODEV; +} +static inline +int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus) +{ + return -ENODEV; +} +static inline bool qcom_scm_is_available(void) { return false; } +static inline bool qcom_scm_hdcp_available(void) { return false; } +static inline int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, + u32 *resp) { return -ENODEV; } +static inline bool qcom_scm_pas_supported(u32 peripheral) { return false; } +static inline int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, + size_t size) { return -ENODEV; } +static inline int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, + phys_addr_t size) { return -ENODEV; } +static inline int +qcom_scm_pas_auth_and_reset(u32 peripheral) { return -ENODEV; } +static inline int qcom_scm_pas_shutdown(u32 peripheral) { return -ENODEV; } +static inline void qcom_scm_cpu_power_down(u32 flags) {} +static inline u32 qcom_scm_get_version(void) { return 0; } +static inline u32 +qcom_scm_set_remote_state(u32 state,u32 id) { return -ENODEV; } +#endif #endif diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 52bda854593b..3e5735064b71 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -22,11 +22,13 @@ #define _LINUX_RADIX_TREE_H #include <linux/bitops.h> -#include <linux/preempt.h> -#include <linux/types.h> #include <linux/bug.h> #include <linux/kernel.h> +#include <linux/list.h> +#include <linux/preempt.h> #include <linux/rcupdate.h> +#include <linux/spinlock.h> +#include <linux/types.h> /* * The bottom two bits of the slot determine how the remaining bits in the @@ -94,7 +96,7 @@ struct radix_tree_node { unsigned char count; /* Total entry count */ unsigned char exceptional; /* Exceptional entry count */ struct radix_tree_node *parent; /* Used when ascending tree */ - void *private_data; /* For tree user */ + struct radix_tree_root *root; /* The tree we belong to */ union { struct list_head private_list; /* For tree user */ struct rcu_head rcu_head; /* Used when freeing node */ @@ -103,7 +105,10 @@ struct radix_tree_node { unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; }; -/* root tags are stored in gfp_mask, shifted by __GFP_BITS_SHIFT */ +/* The top bits of gfp_mask are used to store the root tags and the IDR flag */ +#define ROOT_IS_IDR ((__force gfp_t)(1 << __GFP_BITS_SHIFT)) +#define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT + 1) + struct radix_tree_root { gfp_t gfp_mask; struct radix_tree_node __rcu *rnode; @@ -123,7 +128,7 @@ do { \ (root)->rnode = NULL; \ } while (0) -static inline bool radix_tree_empty(struct radix_tree_root *root) +static inline bool radix_tree_empty(const struct radix_tree_root *root) { return root->rnode == NULL; } @@ -216,10 +221,8 @@ static inline unsigned int iter_shift(const struct radix_tree_iter *iter) */ /** - * radix_tree_deref_slot - dereference a slot - * @pslot: pointer to slot, returned by radix_tree_lookup_slot - * Returns: item that was stored in that slot with any direct pointer flag - * removed. + * radix_tree_deref_slot - dereference a slot + * @slot: slot pointer, returned by radix_tree_lookup_slot * * For use with radix_tree_lookup_slot(). Caller must hold tree at least read * locked across slot lookup and dereference. Not required if write lock is @@ -227,26 +230,27 @@ static inline unsigned int iter_shift(const struct radix_tree_iter *iter) * * radix_tree_deref_retry must be used to confirm validity of the pointer if * only the read lock is held. + * + * Return: entry stored in that slot. */ -static inline void *radix_tree_deref_slot(void **pslot) +static inline void *radix_tree_deref_slot(void __rcu **slot) { - return rcu_dereference(*pslot); + return rcu_dereference(*slot); } /** - * radix_tree_deref_slot_protected - dereference a slot without RCU lock but with tree lock held - * @pslot: pointer to slot, returned by radix_tree_lookup_slot - * Returns: item that was stored in that slot with any direct pointer flag - * removed. - * - * Similar to radix_tree_deref_slot but only used during migration when a pages - * mapping is being moved. The caller does not hold the RCU read lock but it - * must hold the tree lock to prevent parallel updates. + * radix_tree_deref_slot_protected - dereference a slot with tree lock held + * @slot: slot pointer, returned by radix_tree_lookup_slot + * + * Similar to radix_tree_deref_slot. The caller does not hold the RCU read + * lock but it must hold the tree lock to prevent parallel updates. + * + * Return: entry stored in that slot. */ -static inline void *radix_tree_deref_slot_protected(void **pslot, +static inline void *radix_tree_deref_slot_protected(void __rcu **slot, spinlock_t *treelock) { - return rcu_dereference_protected(*pslot, lockdep_is_held(treelock)); + return rcu_dereference_protected(*slot, lockdep_is_held(treelock)); } /** @@ -282,9 +286,9 @@ static inline int radix_tree_exception(void *arg) return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK); } -int __radix_tree_create(struct radix_tree_root *root, unsigned long index, +int __radix_tree_create(struct radix_tree_root *, unsigned long index, unsigned order, struct radix_tree_node **nodep, - void ***slotp); + void __rcu ***slotp); int __radix_tree_insert(struct radix_tree_root *, unsigned long index, unsigned order, void *); static inline int radix_tree_insert(struct radix_tree_root *root, @@ -292,55 +296,56 @@ static inline int radix_tree_insert(struct radix_tree_root *root, { return __radix_tree_insert(root, index, 0, entry); } -void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index, - struct radix_tree_node **nodep, void ***slotp); -void *radix_tree_lookup(struct radix_tree_root *, unsigned long); -void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long); +void *__radix_tree_lookup(const struct radix_tree_root *, unsigned long index, + struct radix_tree_node **nodep, void __rcu ***slotp); +void *radix_tree_lookup(const struct radix_tree_root *, unsigned long); +void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *, + unsigned long index); typedef void (*radix_tree_update_node_t)(struct radix_tree_node *, void *); -void __radix_tree_replace(struct radix_tree_root *root, - struct radix_tree_node *node, - void **slot, void *item, +void __radix_tree_replace(struct radix_tree_root *, struct radix_tree_node *, + void __rcu **slot, void *entry, radix_tree_update_node_t update_node, void *private); void radix_tree_iter_replace(struct radix_tree_root *, - const struct radix_tree_iter *, void **slot, void *item); -void radix_tree_replace_slot(struct radix_tree_root *root, - void **slot, void *item); -void __radix_tree_delete_node(struct radix_tree_root *root, - struct radix_tree_node *node, + const struct radix_tree_iter *, void __rcu **slot, void *entry); +void radix_tree_replace_slot(struct radix_tree_root *, + void __rcu **slot, void *entry); +void __radix_tree_delete_node(struct radix_tree_root *, + struct radix_tree_node *, radix_tree_update_node_t update_node, void *private); +void radix_tree_iter_delete(struct radix_tree_root *, + struct radix_tree_iter *iter, void __rcu **slot); void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *); void *radix_tree_delete(struct radix_tree_root *, unsigned long); -void radix_tree_clear_tags(struct radix_tree_root *root, - struct radix_tree_node *node, - void **slot); -unsigned int radix_tree_gang_lookup(struct radix_tree_root *root, +void radix_tree_clear_tags(struct radix_tree_root *, struct radix_tree_node *, + void __rcu **slot); +unsigned int radix_tree_gang_lookup(const struct radix_tree_root *, void **results, unsigned long first_index, unsigned int max_items); -unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root, - void ***results, unsigned long *indices, +unsigned int radix_tree_gang_lookup_slot(const struct radix_tree_root *, + void __rcu ***results, unsigned long *indices, unsigned long first_index, unsigned int max_items); int radix_tree_preload(gfp_t gfp_mask); int radix_tree_maybe_preload(gfp_t gfp_mask); int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order); void radix_tree_init(void); -void *radix_tree_tag_set(struct radix_tree_root *root, +void *radix_tree_tag_set(struct radix_tree_root *, unsigned long index, unsigned int tag); -void *radix_tree_tag_clear(struct radix_tree_root *root, +void *radix_tree_tag_clear(struct radix_tree_root *, unsigned long index, unsigned int tag); -int radix_tree_tag_get(struct radix_tree_root *root, +int radix_tree_tag_get(const struct radix_tree_root *, unsigned long index, unsigned int tag); -void radix_tree_iter_tag_set(struct radix_tree_root *root, +void radix_tree_iter_tag_set(struct radix_tree_root *, + const struct radix_tree_iter *iter, unsigned int tag); +void radix_tree_iter_tag_clear(struct radix_tree_root *, const struct radix_tree_iter *iter, unsigned int tag); -unsigned int -radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, - unsigned long first_index, unsigned int max_items, - unsigned int tag); -unsigned int -radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, - unsigned long first_index, unsigned int max_items, - unsigned int tag); -int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag); +unsigned int radix_tree_gang_lookup_tag(const struct radix_tree_root *, + void **results, unsigned long first_index, + unsigned int max_items, unsigned int tag); +unsigned int radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *, + void __rcu ***results, unsigned long first_index, + unsigned int max_items, unsigned int tag); +int radix_tree_tagged(const struct radix_tree_root *, unsigned int tag); static inline void radix_tree_preload_end(void) { @@ -352,10 +357,14 @@ int radix_tree_split(struct radix_tree_root *, unsigned long index, unsigned new_order); int radix_tree_join(struct radix_tree_root *, unsigned long index, unsigned new_order, void *); +void __rcu **idr_get_free(struct radix_tree_root *, struct radix_tree_iter *, + gfp_t, int end); -#define RADIX_TREE_ITER_TAG_MASK 0x00FF /* tag index in lower byte */ -#define RADIX_TREE_ITER_TAGGED 0x0100 /* lookup tagged slots */ -#define RADIX_TREE_ITER_CONTIG 0x0200 /* stop at first hole */ +enum { + RADIX_TREE_ITER_TAG_MASK = 0x0f, /* tag index in lower nybble */ + RADIX_TREE_ITER_TAGGED = 0x10, /* lookup tagged slots */ + RADIX_TREE_ITER_CONTIG = 0x20, /* stop at first hole */ +}; /** * radix_tree_iter_init - initialize radix tree iterator @@ -364,7 +373,7 @@ int radix_tree_join(struct radix_tree_root *, unsigned long index, * @start: iteration starting index * Returns: NULL */ -static __always_inline void ** +static __always_inline void __rcu ** radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start) { /* @@ -393,10 +402,46 @@ radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start) * Also it fills @iter with data about chunk: position in the tree (index), * its end (next_index), and constructs a bit mask for tagged iterating (tags). */ -void **radix_tree_next_chunk(struct radix_tree_root *root, +void __rcu **radix_tree_next_chunk(const struct radix_tree_root *, struct radix_tree_iter *iter, unsigned flags); /** + * radix_tree_iter_lookup - look up an index in the radix tree + * @root: radix tree root + * @iter: iterator state + * @index: key to look up + * + * If @index is present in the radix tree, this function returns the slot + * containing it and updates @iter to describe the entry. If @index is not + * present, it returns NULL. + */ +static inline void __rcu ** +radix_tree_iter_lookup(const struct radix_tree_root *root, + struct radix_tree_iter *iter, unsigned long index) +{ + radix_tree_iter_init(iter, index); + return radix_tree_next_chunk(root, iter, RADIX_TREE_ITER_CONTIG); +} + +/** + * radix_tree_iter_find - find a present entry + * @root: radix tree root + * @iter: iterator state + * @index: start location + * + * This function returns the slot containing the entry with the lowest index + * which is at least @index. If @index is larger than any present entry, this + * function returns NULL. The @iter is updated to describe the entry found. + */ +static inline void __rcu ** +radix_tree_iter_find(const struct radix_tree_root *root, + struct radix_tree_iter *iter, unsigned long index) +{ + radix_tree_iter_init(iter, index); + return radix_tree_next_chunk(root, iter, 0); +} + +/** * radix_tree_iter_retry - retry this chunk of the iteration * @iter: iterator state * @@ -406,7 +451,7 @@ void **radix_tree_next_chunk(struct radix_tree_root *root, * and continue the iteration. */ static inline __must_check -void **radix_tree_iter_retry(struct radix_tree_iter *iter) +void __rcu **radix_tree_iter_retry(struct radix_tree_iter *iter) { iter->next_index = iter->index; iter->tags = 0; @@ -429,7 +474,7 @@ __radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots) * have been invalidated by an insertion or deletion. Call this function * before releasing the lock to continue the iteration from the next index. */ -void **__must_check radix_tree_iter_resume(void **slot, +void __rcu **__must_check radix_tree_iter_resume(void __rcu **slot, struct radix_tree_iter *iter); /** @@ -445,11 +490,11 @@ radix_tree_chunk_size(struct radix_tree_iter *iter) } #ifdef CONFIG_RADIX_TREE_MULTIORDER -void ** __radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, - unsigned flags); +void __rcu **__radix_tree_next_slot(void __rcu **slot, + struct radix_tree_iter *iter, unsigned flags); #else /* Can't happen without sibling entries, but the compiler can't tell that */ -static inline void ** __radix_tree_next_slot(void **slot, +static inline void __rcu **__radix_tree_next_slot(void __rcu **slot, struct radix_tree_iter *iter, unsigned flags) { return slot; @@ -475,8 +520,8 @@ static inline void ** __radix_tree_next_slot(void **slot, * b) we are doing non-tagged iteration, and iter->index and iter->next_index * have been set up so that radix_tree_chunk_size() returns 1 or 0. */ -static __always_inline void ** -radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) +static __always_inline void __rcu **radix_tree_next_slot(void __rcu **slot, + struct radix_tree_iter *iter, unsigned flags) { if (flags & RADIX_TREE_ITER_TAGGED) { iter->tags >>= 1; @@ -514,7 +559,7 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) return NULL; found: - if (unlikely(radix_tree_is_internal_node(*slot))) + if (unlikely(radix_tree_is_internal_node(rcu_dereference_raw(*slot)))) return __radix_tree_next_slot(slot, iter, flags); return slot; } diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h index d076183e49be..9702b6e183bc 100644 --- a/include/linux/rbtree_augmented.h +++ b/include/linux/rbtree_augmented.h @@ -90,7 +90,9 @@ rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \ old->rbaugmented = rbcompute(old); \ } \ rbstatic const struct rb_augment_callbacks rbname = { \ - rbname ## _propagate, rbname ## _copy, rbname ## _rotate \ + .propagate = rbname ## _propagate, \ + .copy = rbname ## _copy, \ + .rotate = rbname ## _rotate \ }; diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 6ade6a52d9d4..de88b33c0974 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -40,7 +40,6 @@ #include <linux/cpumask.h> #include <linux/seqlock.h> #include <linux/lockdep.h> -#include <linux/completion.h> #include <linux/debugobjects.h> #include <linux/bug.h> #include <linux/compiler.h> @@ -226,45 +225,6 @@ void call_rcu_sched(struct rcu_head *head, void synchronize_sched(void); -/* - * Structure allowing asynchronous waiting on RCU. - */ -struct rcu_synchronize { - struct rcu_head head; - struct completion completion; -}; -void wakeme_after_rcu(struct rcu_head *head); - -void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, - struct rcu_synchronize *rs_array); - -#define _wait_rcu_gp(checktiny, ...) \ -do { \ - call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \ - struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)]; \ - __wait_rcu_gp(checktiny, ARRAY_SIZE(__crcu_array), \ - __crcu_array, __rs_array); \ -} while (0) - -#define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__) - -/** - * synchronize_rcu_mult - Wait concurrently for multiple grace periods - * @...: List of call_rcu() functions for the flavors to wait on. - * - * This macro waits concurrently for multiple flavors of RCU grace periods. - * For example, synchronize_rcu_mult(call_rcu, call_rcu_bh) would wait - * on concurrent RCU and RCU-bh grace periods. Waiting on a give SRCU - * domain requires you to write a wrapper function for that SRCU domain's - * call_srcu() function, supplying the corresponding srcu_struct. - * - * If Tiny RCU, tell _wait_rcu_gp() not to bother waiting for RCU - * or RCU-bh, given that anywhere synchronize_rcu_mult() can be called - * is automatically a grace period. - */ -#define synchronize_rcu_mult(...) \ - _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__) - /** * call_rcu_tasks() - Queue an RCU for invocation task-based grace period * @head: structure to be used for queueing the RCU updates. diff --git a/include/linux/rcupdate_wait.h b/include/linux/rcupdate_wait.h new file mode 100644 index 000000000000..e774b4f5f220 --- /dev/null +++ b/include/linux/rcupdate_wait.h @@ -0,0 +1,50 @@ +#ifndef _LINUX_SCHED_RCUPDATE_WAIT_H +#define _LINUX_SCHED_RCUPDATE_WAIT_H + +/* + * RCU synchronization types and methods: + */ + +#include <linux/rcupdate.h> +#include <linux/completion.h> + +/* + * Structure allowing asynchronous waiting on RCU. + */ +struct rcu_synchronize { + struct rcu_head head; + struct completion completion; +}; +void wakeme_after_rcu(struct rcu_head *head); + +void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, + struct rcu_synchronize *rs_array); + +#define _wait_rcu_gp(checktiny, ...) \ +do { \ + call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \ + struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)]; \ + __wait_rcu_gp(checktiny, ARRAY_SIZE(__crcu_array), \ + __crcu_array, __rs_array); \ +} while (0) + +#define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__) + +/** + * synchronize_rcu_mult - Wait concurrently for multiple grace periods + * @...: List of call_rcu() functions for the flavors to wait on. + * + * This macro waits concurrently for multiple flavors of RCU grace periods. + * For example, synchronize_rcu_mult(call_rcu, call_rcu_bh) would wait + * on concurrent RCU and RCU-bh grace periods. Waiting on a give SRCU + * domain requires you to write a wrapper function for that SRCU domain's + * call_srcu() function, supplying the corresponding srcu_struct. + * + * If Tiny RCU, tell _wait_rcu_gp() not to bother waiting for RCU + * or RCU-bh, given that anywhere synchronize_rcu_mult() can be called + * is automatically a grace period. + */ +#define synchronize_rcu_mult(...) \ + _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__) + +#endif /* _LINUX_SCHED_RCUPDATE_WAIT_H */ diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 4f9b2fa2173d..b452953e21c8 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -53,15 +53,8 @@ static inline void cond_synchronize_sched(unsigned long oldstate) might_sleep(); } -static inline void rcu_barrier_bh(void) -{ - wait_rcu_gp(call_rcu_bh); -} - -static inline void rcu_barrier_sched(void) -{ - wait_rcu_gp(call_rcu_sched); -} +extern void rcu_barrier_bh(void); +extern void rcu_barrier_sched(void); static inline void synchronize_rcu_expedited(void) { diff --git a/include/linux/refcount.h b/include/linux/refcount.h index 600aadf9cca4..0023fee4bbbc 100644 --- a/include/linux/refcount.h +++ b/include/linux/refcount.h @@ -1,54 +1,10 @@ #ifndef _LINUX_REFCOUNT_H #define _LINUX_REFCOUNT_H -/* - * Variant of atomic_t specialized for reference counts. - * - * The interface matches the atomic_t interface (to aid in porting) but only - * provides the few functions one should use for reference counting. - * - * It differs in that the counter saturates at UINT_MAX and will not move once - * there. This avoids wrapping the counter and causing 'spurious' - * use-after-free issues. - * - * Memory ordering rules are slightly relaxed wrt regular atomic_t functions - * and provide only what is strictly required for refcounts. - * - * The increments are fully relaxed; these will not provide ordering. The - * rationale is that whatever is used to obtain the object we're increasing the - * reference count on will provide the ordering. For locked data structures, - * its the lock acquire, for RCU/lockless data structures its the dependent - * load. - * - * Do note that inc_not_zero() provides a control dependency which will order - * future stores against the inc, this ensures we'll never modify the object - * if we did not in fact acquire a reference. - * - * The decrements will provide release order, such that all the prior loads and - * stores will be issued before, it also provides a control dependency, which - * will order us against the subsequent free(). - * - * The control dependency is against the load of the cmpxchg (ll/sc) that - * succeeded. This means the stores aren't fully ordered, but this is fine - * because the 1->0 transition indicates no concurrency. - * - * Note that the allocator is responsible for ordering things between free() - * and alloc(). - * - */ - #include <linux/atomic.h> -#include <linux/bug.h> #include <linux/mutex.h> #include <linux/spinlock.h> - -#ifdef CONFIG_DEBUG_REFCOUNT -#define REFCOUNT_WARN(cond, str) WARN_ON(cond) -#define __refcount_check __must_check -#else -#define REFCOUNT_WARN(cond, str) (void)(cond) -#define __refcount_check -#endif +#include <linux/kernel.h> typedef struct refcount_struct { atomic_t refs; @@ -66,229 +22,21 @@ static inline unsigned int refcount_read(const refcount_t *r) return atomic_read(&r->refs); } -static inline __refcount_check -bool refcount_add_not_zero(unsigned int i, refcount_t *r) -{ - unsigned int old, new, val = atomic_read(&r->refs); - - for (;;) { - if (!val) - return false; - - if (unlikely(val == UINT_MAX)) - return true; - - new = val + i; - if (new < val) - new = UINT_MAX; - old = atomic_cmpxchg_relaxed(&r->refs, val, new); - if (old == val) - break; - - val = old; - } - - REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); - - return true; -} - -static inline void refcount_add(unsigned int i, refcount_t *r) -{ - REFCOUNT_WARN(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n"); -} - -/* - * Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN. - * - * Provides no memory ordering, it is assumed the caller has guaranteed the - * object memory to be stable (RCU, etc.). It does provide a control dependency - * and thereby orders future stores. See the comment on top. - */ -static inline __refcount_check -bool refcount_inc_not_zero(refcount_t *r) -{ - unsigned int old, new, val = atomic_read(&r->refs); - - for (;;) { - new = val + 1; - - if (!val) - return false; - - if (unlikely(!new)) - return true; - - old = atomic_cmpxchg_relaxed(&r->refs, val, new); - if (old == val) - break; - - val = old; - } - - REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); - - return true; -} - -/* - * Similar to atomic_inc(), will saturate at UINT_MAX and WARN. - * - * Provides no memory ordering, it is assumed the caller already has a - * reference on the object, will WARN when this is not so. - */ -static inline void refcount_inc(refcount_t *r) -{ - REFCOUNT_WARN(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n"); -} - -/* - * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to - * decrement when saturated at UINT_MAX. - * - * Provides release memory ordering, such that prior loads and stores are done - * before, and provides a control dependency such that free() must come after. - * See the comment on top. - */ -static inline __refcount_check -bool refcount_sub_and_test(unsigned int i, refcount_t *r) -{ - unsigned int old, new, val = atomic_read(&r->refs); - - for (;;) { - if (unlikely(val == UINT_MAX)) - return false; - - new = val - i; - if (new > val) { - REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n"); - return false; - } - - old = atomic_cmpxchg_release(&r->refs, val, new); - if (old == val) - break; - - val = old; - } - - return !new; -} - -static inline __refcount_check -bool refcount_dec_and_test(refcount_t *r) -{ - return refcount_sub_and_test(1, r); -} +extern __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r); +extern void refcount_add(unsigned int i, refcount_t *r); -/* - * Similar to atomic_dec(), it will WARN on underflow and fail to decrement - * when saturated at UINT_MAX. - * - * Provides release memory ordering, such that prior loads and stores are done - * before. - */ -static inline -void refcount_dec(refcount_t *r) -{ - REFCOUNT_WARN(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n"); -} - -/* - * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the - * success thereof. - * - * Like all decrement operations, it provides release memory order and provides - * a control dependency. - * - * It can be used like a try-delete operator; this explicit case is provided - * and not cmpxchg in generic, because that would allow implementing unsafe - * operations. - */ -static inline __refcount_check -bool refcount_dec_if_one(refcount_t *r) -{ - return atomic_cmpxchg_release(&r->refs, 1, 0) == 1; -} - -/* - * No atomic_t counterpart, it decrements unless the value is 1, in which case - * it will return false. - * - * Was often done like: atomic_add_unless(&var, -1, 1) - */ -static inline __refcount_check -bool refcount_dec_not_one(refcount_t *r) -{ - unsigned int old, new, val = atomic_read(&r->refs); +extern __must_check bool refcount_inc_not_zero(refcount_t *r); +extern void refcount_inc(refcount_t *r); - for (;;) { - if (unlikely(val == UINT_MAX)) - return true; +extern __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r); +extern void refcount_sub(unsigned int i, refcount_t *r); - if (val == 1) - return false; +extern __must_check bool refcount_dec_and_test(refcount_t *r); +extern void refcount_dec(refcount_t *r); - new = val - 1; - if (new > val) { - REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n"); - return true; - } - - old = atomic_cmpxchg_release(&r->refs, val, new); - if (old == val) - break; - - val = old; - } - - return true; -} - -/* - * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail - * to decrement when saturated at UINT_MAX. - * - * Provides release memory ordering, such that prior loads and stores are done - * before, and provides a control dependency such that free() must come after. - * See the comment on top. - */ -static inline __refcount_check -bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) -{ - if (refcount_dec_not_one(r)) - return false; - - mutex_lock(lock); - if (!refcount_dec_and_test(r)) { - mutex_unlock(lock); - return false; - } - - return true; -} - -/* - * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to - * decrement when saturated at UINT_MAX. - * - * Provides release memory ordering, such that prior loads and stores are done - * before, and provides a control dependency such that free() must come after. - * See the comment on top. - */ -static inline __refcount_check -bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) -{ - if (refcount_dec_not_one(r)) - return false; - - spin_lock(lock); - if (!refcount_dec_and_test(r)) { - spin_unlock(lock); - return false; - } - - return true; -} +extern __must_check bool refcount_dec_if_one(refcount_t *r); +extern __must_check bool refcount_dec_not_one(refcount_t *r); +extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock); +extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock); #endif /* _LINUX_REFCOUNT_H */ diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 8265d351c9f0..81da49564ff4 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -346,6 +346,7 @@ struct rproc_ops { * a message. * @RPROC_RUNNING: device is up and running * @RPROC_CRASHED: device has crashed; need to start recovery + * @RPROC_DELETED: device is deleted * @RPROC_LAST: just keep this one at the end * * Please note that the values of these states are used as indices @@ -359,7 +360,8 @@ enum rproc_state { RPROC_SUSPENDED = 1, RPROC_RUNNING = 2, RPROC_CRASHED = 3, - RPROC_LAST = 4, + RPROC_DELETED = 4, + RPROC_LAST = 5, }; /** @@ -397,7 +399,6 @@ enum rproc_crash_type { * @num_traces: number of trace buffers * @carveouts: list of physically contiguous memory allocations * @mappings: list of iommu mappings we initiated, needed on shutdown - * @firmware_loading_complete: marks e/o asynchronous firmware loading * @bootaddr: address of first instruction to boot rproc with (optional) * @rvdevs: list of remote virtio devices * @subdevs: list of subdevices, to following the running state @@ -429,7 +430,6 @@ struct rproc { int num_traces; struct list_head carveouts; struct list_head mappings; - struct completion firmware_loading_complete; u32 bootaddr; struct list_head rvdevs; struct list_head subdevs; diff --git a/include/linux/reservation.h b/include/linux/reservation.h index d9706a6f5ae2..2b5a4679daea 100644 --- a/include/linux/reservation.h +++ b/include/linux/reservation.h @@ -145,6 +145,40 @@ reservation_object_get_list(struct reservation_object *obj) } /** + * reservation_object_lock - lock the reservation object + * @obj: the reservation object + * @ctx: the locking context + * + * Locks the reservation object for exclusive access and modification. Note, + * that the lock is only against other writers, readers will run concurrently + * with a writer under RCU. The seqlock is used to notify readers if they + * overlap with a writer. + * + * As the reservation object may be locked by multiple parties in an + * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle + * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation + * object may be locked by itself by passing NULL as @ctx. + */ +static inline int +reservation_object_lock(struct reservation_object *obj, + struct ww_acquire_ctx *ctx) +{ + return ww_mutex_lock(&obj->lock, ctx); +} + +/** + * reservation_object_unlock - unlock the reservation object + * @obj: the reservation object + * + * Unlocks the reservation object following exclusive access. + */ +static inline void +reservation_object_unlock(struct reservation_object *obj) +{ + ww_mutex_unlock(&obj->lock); +} + +/** * reservation_object_get_excl - get the reservation object's * exclusive fence, with update-side lock held * @obj: the reservation object diff --git a/include/linux/reset.h b/include/linux/reset.h index 5daff15722d3..86b4ed75359e 100644 --- a/include/linux/reset.h +++ b/include/linux/reset.h @@ -13,10 +13,12 @@ int reset_control_deassert(struct reset_control *rstc); int reset_control_status(struct reset_control *rstc); struct reset_control *__of_reset_control_get(struct device_node *node, - const char *id, int index, int shared); + const char *id, int index, bool shared, + bool optional); void reset_control_put(struct reset_control *rstc); struct reset_control *__devm_reset_control_get(struct device *dev, - const char *id, int index, int shared); + const char *id, int index, bool shared, + bool optional); int __must_check device_reset(struct device *dev); @@ -69,14 +71,15 @@ static inline int device_reset_optional(struct device *dev) static inline struct reset_control *__of_reset_control_get( struct device_node *node, - const char *id, int index, int shared) + const char *id, int index, bool shared, + bool optional) { return ERR_PTR(-ENOTSUPP); } static inline struct reset_control *__devm_reset_control_get( - struct device *dev, - const char *id, int index, int shared) + struct device *dev, const char *id, + int index, bool shared, bool optional) { return ERR_PTR(-ENOTSUPP); } @@ -104,7 +107,8 @@ __must_check reset_control_get_exclusive(struct device *dev, const char *id) #ifndef CONFIG_RESET_CONTROLLER WARN_ON(1); #endif - return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0); + return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, false, + false); } /** @@ -132,19 +136,22 @@ __must_check reset_control_get_exclusive(struct device *dev, const char *id) static inline struct reset_control *reset_control_get_shared( struct device *dev, const char *id) { - return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 1); + return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, true, + false); } static inline struct reset_control *reset_control_get_optional_exclusive( struct device *dev, const char *id) { - return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0); + return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, false, + true); } static inline struct reset_control *reset_control_get_optional_shared( struct device *dev, const char *id) { - return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 1); + return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, true, + true); } /** @@ -160,7 +167,7 @@ static inline struct reset_control *reset_control_get_optional_shared( static inline struct reset_control *of_reset_control_get_exclusive( struct device_node *node, const char *id) { - return __of_reset_control_get(node, id, 0, 0); + return __of_reset_control_get(node, id, 0, false, false); } /** @@ -185,7 +192,7 @@ static inline struct reset_control *of_reset_control_get_exclusive( static inline struct reset_control *of_reset_control_get_shared( struct device_node *node, const char *id) { - return __of_reset_control_get(node, id, 0, 1); + return __of_reset_control_get(node, id, 0, true, false); } /** @@ -202,7 +209,7 @@ static inline struct reset_control *of_reset_control_get_shared( static inline struct reset_control *of_reset_control_get_exclusive_by_index( struct device_node *node, int index) { - return __of_reset_control_get(node, NULL, index, 0); + return __of_reset_control_get(node, NULL, index, false, false); } /** @@ -230,7 +237,7 @@ static inline struct reset_control *of_reset_control_get_exclusive_by_index( static inline struct reset_control *of_reset_control_get_shared_by_index( struct device_node *node, int index) { - return __of_reset_control_get(node, NULL, index, 1); + return __of_reset_control_get(node, NULL, index, true, false); } /** @@ -252,7 +259,7 @@ __must_check devm_reset_control_get_exclusive(struct device *dev, #ifndef CONFIG_RESET_CONTROLLER WARN_ON(1); #endif - return __devm_reset_control_get(dev, id, 0, 0); + return __devm_reset_control_get(dev, id, 0, false, false); } /** @@ -267,19 +274,19 @@ __must_check devm_reset_control_get_exclusive(struct device *dev, static inline struct reset_control *devm_reset_control_get_shared( struct device *dev, const char *id) { - return __devm_reset_control_get(dev, id, 0, 1); + return __devm_reset_control_get(dev, id, 0, true, false); } static inline struct reset_control *devm_reset_control_get_optional_exclusive( struct device *dev, const char *id) { - return __devm_reset_control_get(dev, id, 0, 0); + return __devm_reset_control_get(dev, id, 0, false, true); } static inline struct reset_control *devm_reset_control_get_optional_shared( struct device *dev, const char *id) { - return __devm_reset_control_get(dev, id, 0, 1); + return __devm_reset_control_get(dev, id, 0, true, true); } /** @@ -297,7 +304,7 @@ static inline struct reset_control *devm_reset_control_get_optional_shared( static inline struct reset_control * devm_reset_control_get_exclusive_by_index(struct device *dev, int index) { - return __devm_reset_control_get(dev, NULL, index, 0); + return __devm_reset_control_get(dev, NULL, index, false, false); } /** @@ -313,7 +320,7 @@ devm_reset_control_get_exclusive_by_index(struct device *dev, int index) static inline struct reset_control * devm_reset_control_get_shared_by_index(struct device *dev, int index) { - return __devm_reset_control_get(dev, NULL, index, 1); + return __devm_reset_control_get(dev, NULL, index, true, false); } /* diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index f2e12a845910..092292b6675e 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -25,7 +25,7 @@ #include <linux/list_nulls.h> #include <linux/workqueue.h> #include <linux/mutex.h> -#include <linux/rcupdate.h> +#include <linux/rculist.h> /* * The end of the chain is marked with a special nulls marks which has diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 15321fb1df6b..8c89e902df3e 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -9,6 +9,7 @@ #include <linux/mm.h> #include <linux/rwsem.h> #include <linux/memcontrol.h> +#include <linux/highmem.h> /* * The anon_vma heads a list of private "related" vmas, to scan if @@ -196,41 +197,30 @@ int page_referenced(struct page *, int is_locked, int try_to_unmap(struct page *, enum ttu_flags flags); -/* - * Used by uprobes to replace a userspace page safely - */ -pte_t *__page_check_address(struct page *, struct mm_struct *, - unsigned long, spinlock_t **, int); - -static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm, - unsigned long address, - spinlock_t **ptlp, int sync) -{ - pte_t *ptep; +/* Avoid racy checks */ +#define PVMW_SYNC (1 << 0) +/* Look for migarion entries rather than present PTEs */ +#define PVMW_MIGRATION (1 << 1) - __cond_lock(*ptlp, ptep = __page_check_address(page, mm, address, - ptlp, sync)); - return ptep; -} +struct page_vma_mapped_walk { + struct page *page; + struct vm_area_struct *vma; + unsigned long address; + pmd_t *pmd; + pte_t *pte; + spinlock_t *ptl; + unsigned int flags; +}; -/* - * Used by idle page tracking to check if a page was referenced via page - * tables. - */ -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -bool page_check_address_transhuge(struct page *page, struct mm_struct *mm, - unsigned long address, pmd_t **pmdp, - pte_t **ptep, spinlock_t **ptlp); -#else -static inline bool page_check_address_transhuge(struct page *page, - struct mm_struct *mm, unsigned long address, - pmd_t **pmdp, pte_t **ptep, spinlock_t **ptlp) +static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw) { - *ptep = page_check_address(page, mm, address, ptlp, 0); - *pmdp = NULL; - return !!*ptep; + if (pvmw->pte) + pte_unmap(pvmw->pte); + if (pvmw->ptl) + spin_unlock(pvmw->ptl); } -#endif + +bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw); /* * Used by swapoff to help locate where page is expected in vma. diff --git a/include/linux/rodata_test.h b/include/linux/rodata_test.h new file mode 100644 index 000000000000..ea05f6c51413 --- /dev/null +++ b/include/linux/rodata_test.h @@ -0,0 +1,23 @@ +/* + * rodata_test.h: functional test for mark_rodata_ro function + * + * (C) Copyright 2008 Intel Corporation + * Author: Arjan van de Ven <arjan@linux.intel.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ + +#ifndef _RODATA_TEST_H +#define _RODATA_TEST_H + +#ifdef CONFIG_DEBUG_RODATA_TEST +extern const int rodata_test_data; +void rodata_test(void); +#else +static inline void rodata_test(void) {} +#endif + +#endif /* _RODATA_TEST_H */ diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h index 18f9e1ae4b7e..10d6ae8bbb7d 100644 --- a/include/linux/rpmsg.h +++ b/include/linux/rpmsg.h @@ -41,6 +41,7 @@ #include <linux/mod_devicetable.h> #include <linux/kref.h> #include <linux/mutex.h> +#include <linux/poll.h> #define RPMSG_ADDR_ANY 0xFFFFFFFF @@ -156,6 +157,9 @@ int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst); int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst, void *data, int len); +unsigned int rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp, + poll_table *wait); + #else static inline int register_rpmsg_device(struct rpmsg_device *dev) @@ -254,6 +258,15 @@ static inline int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, return -ENXIO; } +static inline unsigned int rpmsg_poll(struct rpmsg_endpoint *ept, + struct file *filp, poll_table *wait) +{ + /* This shouldn't be possible */ + WARN_ON(1); + + return 0; +} + #endif /* IS_ENABLED(CONFIG_RPMSG) */ /* use a macro to avoid include chaining to get THIS_MODULE */ diff --git a/include/linux/rpmsg/qcom_smd.h b/include/linux/rpmsg/qcom_smd.h index e674b2e3074b..8ec8b6439b25 100644 --- a/include/linux/rpmsg/qcom_smd.h +++ b/include/linux/rpmsg/qcom_smd.h @@ -18,14 +18,12 @@ static inline struct qcom_smd_edge * qcom_smd_register_edge(struct device *parent, struct device_node *node) { - return ERR_PTR(-ENXIO); + return NULL; } static inline int qcom_smd_unregister_edge(struct qcom_smd_edge *edge) { - /* This shouldn't be possible */ - WARN_ON(1); - return -ENXIO; + return 0; } #endif diff --git a/include/linux/sched.h b/include/linux/sched.h index c8e519d0b4a3..d67eee84fd43 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1,197 +1,57 @@ #ifndef _LINUX_SCHED_H #define _LINUX_SCHED_H -#include <uapi/linux/sched.h> - -#include <linux/sched/prio.h> - - -struct sched_param { - int sched_priority; -}; - -#include <asm/param.h> /* for HZ */ +/* + * Define 'struct task_struct' and provide the main scheduler + * APIs (schedule(), wakeup variants, etc.) + */ -#include <linux/capability.h> -#include <linux/threads.h> -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/timex.h> -#include <linux/jiffies.h> -#include <linux/plist.h> -#include <linux/rbtree.h> -#include <linux/thread_info.h> -#include <linux/cpumask.h> -#include <linux/errno.h> -#include <linux/nodemask.h> -#include <linux/mm_types.h> -#include <linux/preempt.h> +#include <uapi/linux/sched.h> -#include <asm/page.h> -#include <asm/ptrace.h> +#include <asm/current.h> -#include <linux/smp.h> +#include <linux/pid.h> #include <linux/sem.h> #include <linux/shm.h> -#include <linux/signal.h> -#include <linux/compiler.h> -#include <linux/completion.h> -#include <linux/pid.h> -#include <linux/percpu.h> -#include <linux/topology.h> +#include <linux/kcov.h> +#include <linux/mutex.h> +#include <linux/plist.h> +#include <linux/hrtimer.h> #include <linux/seccomp.h> +#include <linux/nodemask.h> #include <linux/rcupdate.h> -#include <linux/rculist.h> -#include <linux/rtmutex.h> - -#include <linux/time.h> -#include <linux/param.h> #include <linux/resource.h> -#include <linux/timer.h> -#include <linux/hrtimer.h> -#include <linux/kcov.h> -#include <linux/task_io_accounting.h> #include <linux/latencytop.h> -#include <linux/cred.h> -#include <linux/llist.h> -#include <linux/uidgid.h> -#include <linux/gfp.h> -#include <linux/magic.h> -#include <linux/cgroup-defs.h> - -#include <asm/processor.h> - -#define SCHED_ATTR_SIZE_VER0 48 /* sizeof first published struct */ - -/* - * Extended scheduling parameters data structure. - * - * This is needed because the original struct sched_param can not be - * altered without introducing ABI issues with legacy applications - * (e.g., in sched_getparam()). - * - * However, the possibility of specifying more than just a priority for - * the tasks may be useful for a wide variety of application fields, e.g., - * multimedia, streaming, automation and control, and many others. - * - * This variant (sched_attr) is meant at describing a so-called - * sporadic time-constrained task. In such model a task is specified by: - * - the activation period or minimum instance inter-arrival time; - * - the maximum (or average, depending on the actual scheduling - * discipline) computation time of all instances, a.k.a. runtime; - * - the deadline (relative to the actual activation time) of each - * instance. - * Very briefly, a periodic (sporadic) task asks for the execution of - * some specific computation --which is typically called an instance-- - * (at most) every period. Moreover, each instance typically lasts no more - * than the runtime and must be completed by time instant t equal to - * the instance activation time + the deadline. - * - * This is reflected by the actual fields of the sched_attr structure: - * - * @size size of the structure, for fwd/bwd compat. - * - * @sched_policy task's scheduling policy - * @sched_flags for customizing the scheduler behaviour - * @sched_nice task's nice value (SCHED_NORMAL/BATCH) - * @sched_priority task's static priority (SCHED_FIFO/RR) - * @sched_deadline representative of the task's deadline - * @sched_runtime representative of the task's runtime - * @sched_period representative of the task's period - * - * Given this task model, there are a multiplicity of scheduling algorithms - * and policies, that can be used to ensure all the tasks will make their - * timing constraints. - * - * As of now, the SCHED_DEADLINE policy (sched_dl scheduling class) is the - * only user of this new interface. More information about the algorithm - * available in the scheduling class file or in Documentation/. - */ -struct sched_attr { - u32 size; - - u32 sched_policy; - u64 sched_flags; - - /* SCHED_NORMAL, SCHED_BATCH */ - s32 sched_nice; - - /* SCHED_FIFO, SCHED_RR */ - u32 sched_priority; - - /* SCHED_DEADLINE */ - u64 sched_runtime; - u64 sched_deadline; - u64 sched_period; -}; +#include <linux/sched/prio.h> +#include <linux/signal_types.h> +#include <linux/mm_types_task.h> +#include <linux/task_io_accounting.h> -struct futex_pi_state; -struct robust_list_head; +/* task_struct member predeclarations (sorted alphabetically): */ +struct audit_context; +struct backing_dev_info; struct bio_list; -struct fs_struct; -struct perf_event_context; struct blk_plug; -struct filename; +struct cfs_rq; +struct fs_struct; +struct futex_pi_state; +struct io_context; +struct mempolicy; struct nameidata; - -#define VMACACHE_BITS 2 -#define VMACACHE_SIZE (1U << VMACACHE_BITS) -#define VMACACHE_MASK (VMACACHE_SIZE - 1) - -/* - * These are the constant used to fake the fixed-point load-average - * counting. Some notes: - * - 11 bit fractions expand to 22 bits by the multiplies: this gives - * a load-average precision of 10 bits integer + 11 bits fractional - * - if you want to count load-averages more often, you need more - * precision, or rounding will get you. With 2-second counting freq, - * the EXP_n values would be 1981, 2034 and 2043 if still using only - * 11 bit fractions. - */ -extern unsigned long avenrun[]; /* Load averages */ -extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift); - -#define FSHIFT 11 /* nr of bits of precision */ -#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */ -#define LOAD_FREQ (5*HZ+1) /* 5 sec intervals */ -#define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */ -#define EXP_5 2014 /* 1/exp(5sec/5min) */ -#define EXP_15 2037 /* 1/exp(5sec/15min) */ - -#define CALC_LOAD(load,exp,n) \ - load *= exp; \ - load += n*(FIXED_1-exp); \ - load >>= FSHIFT; - -extern unsigned long total_forks; -extern int nr_threads; -DECLARE_PER_CPU(unsigned long, process_counts); -extern int nr_processes(void); -extern unsigned long nr_running(void); -extern bool single_task_running(void); -extern unsigned long nr_iowait(void); -extern unsigned long nr_iowait_cpu(int cpu); -extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load); - -extern void calc_global_load(unsigned long ticks); - -#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) -extern void cpu_load_update_nohz_start(void); -extern void cpu_load_update_nohz_stop(void); -#else -static inline void cpu_load_update_nohz_start(void) { } -static inline void cpu_load_update_nohz_stop(void) { } -#endif - -extern void dump_cpu_task(int cpu); - +struct nsproxy; +struct perf_event_context; +struct pid_namespace; +struct pipe_inode_info; +struct rcu_node; +struct reclaim_state; +struct robust_list_head; +struct sched_attr; +struct sched_param; struct seq_file; -struct cfs_rq; +struct sighand_struct; +struct signal_struct; +struct task_delay_info; struct task_group; -#ifdef CONFIG_SCHED_DEBUG -extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m); -extern void proc_sched_set_task(struct task_struct *p); -#endif /* * Task state bitmask. NOTE! These bits are also @@ -203,53 +63,53 @@ extern void proc_sched_set_task(struct task_struct *p); * modifying one set can't modify the other one by * mistake. */ -#define TASK_RUNNING 0 -#define TASK_INTERRUPTIBLE 1 -#define TASK_UNINTERRUPTIBLE 2 -#define __TASK_STOPPED 4 -#define __TASK_TRACED 8 -/* in tsk->exit_state */ -#define EXIT_DEAD 16 -#define EXIT_ZOMBIE 32 -#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD) -/* in tsk->state again */ -#define TASK_DEAD 64 -#define TASK_WAKEKILL 128 -#define TASK_WAKING 256 -#define TASK_PARKED 512 -#define TASK_NOLOAD 1024 -#define TASK_NEW 2048 -#define TASK_STATE_MAX 4096 - -#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn" - -extern char ___assert_task_state[1 - 2*!!( - sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; - -/* Convenience macros for the sake of set_current_state */ -#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) -#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) -#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED) - -#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD) - -/* Convenience macros for the sake of wake_up */ -#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) -#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED) - -/* get_task_state() */ -#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ - TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ - __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD) - -#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) -#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) -#define task_is_stopped_or_traced(task) \ - ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) -#define task_contributes_to_load(task) \ - ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ - (task->flags & PF_FROZEN) == 0 && \ - (task->state & TASK_NOLOAD) == 0) + +/* Used in tsk->state: */ +#define TASK_RUNNING 0 +#define TASK_INTERRUPTIBLE 1 +#define TASK_UNINTERRUPTIBLE 2 +#define __TASK_STOPPED 4 +#define __TASK_TRACED 8 +/* Used in tsk->exit_state: */ +#define EXIT_DEAD 16 +#define EXIT_ZOMBIE 32 +#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD) +/* Used in tsk->state again: */ +#define TASK_DEAD 64 +#define TASK_WAKEKILL 128 +#define TASK_WAKING 256 +#define TASK_PARKED 512 +#define TASK_NOLOAD 1024 +#define TASK_NEW 2048 +#define TASK_STATE_MAX 4096 + +#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn" + +/* Convenience macros for the sake of set_current_state: */ +#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) +#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) +#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED) + +#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD) + +/* Convenience macros for the sake of wake_up(): */ +#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) +#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED) + +/* get_task_state(): */ +#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ + TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ + __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD) + +#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) + +#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) + +#define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) + +#define task_contributes_to_load(task) ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ + (task->flags & PF_FROZEN) == 0 && \ + (task->state & TASK_NOLOAD) == 0) #ifdef CONFIG_DEBUG_ATOMIC_SLEEP @@ -299,139 +159,24 @@ extern char ___assert_task_state[1 - 2*!!( * * Also see the comments of try_to_wake_up(). */ -#define __set_current_state(state_value) \ - do { current->state = (state_value); } while (0) -#define set_current_state(state_value) \ - smp_store_mb(current->state, (state_value)) - -#endif - -/* Task command name length */ -#define TASK_COMM_LEN 16 - -#include <linux/spinlock.h> - -/* - * This serializes "schedule()" and also protects - * the run-queue from deletions/modifications (but - * _adding_ to the beginning of the run-queue has - * a separate lock). - */ -extern rwlock_t tasklist_lock; -extern spinlock_t mmlist_lock; - -struct task_struct; - -#ifdef CONFIG_PROVE_RCU -extern int lockdep_tasklist_lock_is_held(void); -#endif /* #ifdef CONFIG_PROVE_RCU */ - -extern void sched_init(void); -extern void sched_init_smp(void); -extern asmlinkage void schedule_tail(struct task_struct *prev); -extern void init_idle(struct task_struct *idle, int cpu); -extern void init_idle_bootup_task(struct task_struct *idle); - -extern cpumask_var_t cpu_isolated_map; - -extern int runqueue_is_locked(int cpu); - -#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) -extern void nohz_balance_enter_idle(int cpu); -extern void set_cpu_sd_state_idle(void); -extern int get_nohz_timer_target(void); -#else -static inline void nohz_balance_enter_idle(int cpu) { } -static inline void set_cpu_sd_state_idle(void) { } +#define __set_current_state(state_value) do { current->state = (state_value); } while (0) +#define set_current_state(state_value) smp_store_mb(current->state, (state_value)) #endif -/* - * Only dump TASK_* tasks. (0 for all tasks) - */ -extern void show_state_filter(unsigned long state_filter); - -static inline void show_state(void) -{ - show_state_filter(0); -} - -extern void show_regs(struct pt_regs *); +/* Task command name length: */ +#define TASK_COMM_LEN 16 -/* - * TASK is a pointer to the task whose backtrace we want to see (or NULL for current - * task), SP is the stack pointer of the first frame that should be shown in the back - * trace (or NULL if the entire call-chain of the task should be shown). - */ -extern void show_stack(struct task_struct *task, unsigned long *sp); +extern cpumask_var_t cpu_isolated_map; -extern void cpu_init (void); -extern void trap_init(void); -extern void update_process_times(int user); extern void scheduler_tick(void); -extern int sched_cpu_starting(unsigned int cpu); -extern int sched_cpu_activate(unsigned int cpu); -extern int sched_cpu_deactivate(unsigned int cpu); -#ifdef CONFIG_HOTPLUG_CPU -extern int sched_cpu_dying(unsigned int cpu); -#else -# define sched_cpu_dying NULL -#endif - -extern void sched_show_task(struct task_struct *p); - -#ifdef CONFIG_LOCKUP_DETECTOR -extern void touch_softlockup_watchdog_sched(void); -extern void touch_softlockup_watchdog(void); -extern void touch_softlockup_watchdog_sync(void); -extern void touch_all_softlockup_watchdogs(void); -extern int proc_dowatchdog_thresh(struct ctl_table *table, int write, - void __user *buffer, - size_t *lenp, loff_t *ppos); -extern unsigned int softlockup_panic; -extern unsigned int hardlockup_panic; -void lockup_detector_init(void); -#else -static inline void touch_softlockup_watchdog_sched(void) -{ -} -static inline void touch_softlockup_watchdog(void) -{ -} -static inline void touch_softlockup_watchdog_sync(void) -{ -} -static inline void touch_all_softlockup_watchdogs(void) -{ -} -static inline void lockup_detector_init(void) -{ -} -#endif - -#ifdef CONFIG_DETECT_HUNG_TASK -void reset_hung_task_detector(void); -#else -static inline void reset_hung_task_detector(void) -{ -} -#endif - -/* Attach to any functions which should be ignored in wchan output. */ -#define __sched __attribute__((__section__(".sched.text"))) +#define MAX_SCHEDULE_TIMEOUT LONG_MAX -/* Linker adds these: start and end of __sched functions */ -extern char __sched_text_start[], __sched_text_end[]; - -/* Is this address in the __sched functions? */ -extern int in_sched_functions(unsigned long addr); - -#define MAX_SCHEDULE_TIMEOUT LONG_MAX -extern signed long schedule_timeout(signed long timeout); -extern signed long schedule_timeout_interruptible(signed long timeout); -extern signed long schedule_timeout_killable(signed long timeout); -extern signed long schedule_timeout_uninterruptible(signed long timeout); -extern signed long schedule_timeout_idle(signed long timeout); +extern long schedule_timeout(long timeout); +extern long schedule_timeout_interruptible(long timeout); +extern long schedule_timeout_killable(long timeout); +extern long schedule_timeout_uninterruptible(long timeout); +extern long schedule_timeout_idle(long timeout); asmlinkage void schedule(void); extern void schedule_preempt_disabled(void); @@ -440,112 +185,6 @@ extern void io_schedule_finish(int token); extern long io_schedule_timeout(long timeout); extern void io_schedule(void); -void __noreturn do_task_dead(void); - -struct nsproxy; -struct user_namespace; - -#ifdef CONFIG_MMU -extern void arch_pick_mmap_layout(struct mm_struct *mm); -extern unsigned long -arch_get_unmapped_area(struct file *, unsigned long, unsigned long, - unsigned long, unsigned long); -extern unsigned long -arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, - unsigned long len, unsigned long pgoff, - unsigned long flags); -#else -static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} -#endif - -#define SUID_DUMP_DISABLE 0 /* No setuid dumping */ -#define SUID_DUMP_USER 1 /* Dump as user of process */ -#define SUID_DUMP_ROOT 2 /* Dump as root */ - -/* mm flags */ - -/* for SUID_DUMP_* above */ -#define MMF_DUMPABLE_BITS 2 -#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1) - -extern void set_dumpable(struct mm_struct *mm, int value); -/* - * This returns the actual value of the suid_dumpable flag. For things - * that are using this for checking for privilege transitions, it must - * test against SUID_DUMP_USER rather than treating it as a boolean - * value. - */ -static inline int __get_dumpable(unsigned long mm_flags) -{ - return mm_flags & MMF_DUMPABLE_MASK; -} - -static inline int get_dumpable(struct mm_struct *mm) -{ - return __get_dumpable(mm->flags); -} - -/* coredump filter bits */ -#define MMF_DUMP_ANON_PRIVATE 2 -#define MMF_DUMP_ANON_SHARED 3 -#define MMF_DUMP_MAPPED_PRIVATE 4 -#define MMF_DUMP_MAPPED_SHARED 5 -#define MMF_DUMP_ELF_HEADERS 6 -#define MMF_DUMP_HUGETLB_PRIVATE 7 -#define MMF_DUMP_HUGETLB_SHARED 8 -#define MMF_DUMP_DAX_PRIVATE 9 -#define MMF_DUMP_DAX_SHARED 10 - -#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS -#define MMF_DUMP_FILTER_BITS 9 -#define MMF_DUMP_FILTER_MASK \ - (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) -#define MMF_DUMP_FILTER_DEFAULT \ - ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\ - (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF) - -#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS -# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS) -#else -# define MMF_DUMP_MASK_DEFAULT_ELF 0 -#endif - /* leave room for more dump flags */ -#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ -#define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */ -/* - * This one-shot flag is dropped due to necessity of changing exe once again - * on NFS restore - */ -//#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */ - -#define MMF_HAS_UPROBES 19 /* has uprobes */ -#define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */ -#define MMF_OOM_SKIP 21 /* mm is of no interest for the OOM killer */ -#define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */ -#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */ - -#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK) - -struct sighand_struct { - atomic_t count; - struct k_sigaction action[_NSIG]; - spinlock_t siglock; - wait_queue_head_t signalfd_wqh; -}; - -struct pacct_struct { - int ac_flag; - long ac_exitcode; - unsigned long ac_mem; - u64 ac_utime, ac_stime; - unsigned long ac_minflt, ac_majflt; -}; - -struct cpu_itimer { - u64 expires; - u64 incr; -}; - /** * struct prev_cputime - snaphsot of system and user cputime * @utime: time spent in user mode @@ -557,20 +196,12 @@ struct cpu_itimer { */ struct prev_cputime { #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE - u64 utime; - u64 stime; - raw_spinlock_t lock; + u64 utime; + u64 stime; + raw_spinlock_t lock; #endif }; -static inline void prev_cputime_init(struct prev_cputime *prev) -{ -#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE - prev->utime = prev->stime = 0; - raw_spin_lock_init(&prev->lock); -#endif -} - /** * struct task_cputime - collected CPU time counts * @utime: time spent in user mode, in nanoseconds @@ -582,380 +213,35 @@ static inline void prev_cputime_init(struct prev_cputime *prev) * these counts together and treat all three of them in parallel. */ struct task_cputime { - u64 utime; - u64 stime; - unsigned long long sum_exec_runtime; -}; - -/* Alternate field names when used to cache expirations. */ -#define virt_exp utime -#define prof_exp stime -#define sched_exp sum_exec_runtime - -/* - * This is the atomic variant of task_cputime, which can be used for - * storing and updating task_cputime statistics without locking. - */ -struct task_cputime_atomic { - atomic64_t utime; - atomic64_t stime; - atomic64_t sum_exec_runtime; -}; - -#define INIT_CPUTIME_ATOMIC \ - (struct task_cputime_atomic) { \ - .utime = ATOMIC64_INIT(0), \ - .stime = ATOMIC64_INIT(0), \ - .sum_exec_runtime = ATOMIC64_INIT(0), \ - } - -#define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) - -/* - * Disable preemption until the scheduler is running -- use an unconditional - * value so that it also works on !PREEMPT_COUNT kernels. - * - * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count(). - */ -#define INIT_PREEMPT_COUNT PREEMPT_OFFSET - -/* - * Initial preempt_count value; reflects the preempt_count schedule invariant - * which states that during context switches: - * - * preempt_count() == 2*PREEMPT_DISABLE_OFFSET - * - * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels. - * Note: See finish_task_switch(). - */ -#define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) - -/** - * struct thread_group_cputimer - thread group interval timer counts - * @cputime_atomic: atomic thread group interval timers. - * @running: true when there are timers running and - * @cputime_atomic receives updates. - * @checking_timer: true when a thread in the group is in the - * process of checking for thread group timers. - * - * This structure contains the version of task_cputime, above, that is - * used for thread group CPU timer calculations. - */ -struct thread_group_cputimer { - struct task_cputime_atomic cputime_atomic; - bool running; - bool checking_timer; -}; - -#include <linux/rwsem.h> -struct autogroup; - -/* - * NOTE! "signal_struct" does not have its own - * locking, because a shared signal_struct always - * implies a shared sighand_struct, so locking - * sighand_struct is always a proper superset of - * the locking of signal_struct. - */ -struct signal_struct { - atomic_t sigcnt; - atomic_t live; - int nr_threads; - struct list_head thread_head; - - wait_queue_head_t wait_chldexit; /* for wait4() */ - - /* current thread group signal load-balancing target: */ - struct task_struct *curr_target; - - /* shared signal handling: */ - struct sigpending shared_pending; - - /* thread group exit support */ - int group_exit_code; - /* overloaded: - * - notify group_exit_task when ->count is equal to notify_count - * - everyone except group_exit_task is stopped during signal delivery - * of fatal signals, group_exit_task processes the signal. - */ - int notify_count; - struct task_struct *group_exit_task; - - /* thread group stop support, overloads group_exit_code too */ - int group_stop_count; - unsigned int flags; /* see SIGNAL_* flags below */ - - /* - * PR_SET_CHILD_SUBREAPER marks a process, like a service - * manager, to re-parent orphan (double-forking) child processes - * to this process instead of 'init'. The service manager is - * able to receive SIGCHLD signals and is able to investigate - * the process until it calls wait(). All children of this - * process will inherit a flag if they should look for a - * child_subreaper process at exit. - */ - unsigned int is_child_subreaper:1; - unsigned int has_child_subreaper:1; - -#ifdef CONFIG_POSIX_TIMERS - - /* POSIX.1b Interval Timers */ - int posix_timer_id; - struct list_head posix_timers; - - /* ITIMER_REAL timer for the process */ - struct hrtimer real_timer; - ktime_t it_real_incr; - - /* - * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use - * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these - * values are defined to 0 and 1 respectively - */ - struct cpu_itimer it[2]; - - /* - * Thread group totals for process CPU timers. - * See thread_group_cputimer(), et al, for details. - */ - struct thread_group_cputimer cputimer; - - /* Earliest-expiration cache. */ - struct task_cputime cputime_expires; - - struct list_head cpu_timers[3]; - -#endif - - struct pid *leader_pid; - -#ifdef CONFIG_NO_HZ_FULL - atomic_t tick_dep_mask; -#endif - - struct pid *tty_old_pgrp; - - /* boolean value for session group leader */ - int leader; - - struct tty_struct *tty; /* NULL if no tty */ - -#ifdef CONFIG_SCHED_AUTOGROUP - struct autogroup *autogroup; -#endif - /* - * Cumulative resource counters for dead threads in the group, - * and for reaped dead child processes forked by this group. - * Live threads maintain their own counters and add to these - * in __exit_signal, except for the group leader. - */ - seqlock_t stats_lock; - u64 utime, stime, cutime, cstime; - u64 gtime; - u64 cgtime; - struct prev_cputime prev_cputime; - unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; - unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; - unsigned long inblock, oublock, cinblock, coublock; - unsigned long maxrss, cmaxrss; - struct task_io_accounting ioac; - - /* - * Cumulative ns of schedule CPU time fo dead threads in the - * group, not including a zombie group leader, (This only differs - * from jiffies_to_ns(utime + stime) if sched_clock uses something - * other than jiffies.) - */ - unsigned long long sum_sched_runtime; - - /* - * We don't bother to synchronize most readers of this at all, - * because there is no reader checking a limit that actually needs - * to get both rlim_cur and rlim_max atomically, and either one - * alone is a single word that can safely be read normally. - * getrlimit/setrlimit use task_lock(current->group_leader) to - * protect this instead of the siglock, because they really - * have no need to disable irqs. - */ - struct rlimit rlim[RLIM_NLIMITS]; - -#ifdef CONFIG_BSD_PROCESS_ACCT - struct pacct_struct pacct; /* per-process accounting information */ -#endif -#ifdef CONFIG_TASKSTATS - struct taskstats *stats; -#endif -#ifdef CONFIG_AUDIT - unsigned audit_tty; - struct tty_audit_buf *tty_audit_buf; -#endif - - /* - * Thread is the potential origin of an oom condition; kill first on - * oom - */ - bool oom_flag_origin; - short oom_score_adj; /* OOM kill score adjustment */ - short oom_score_adj_min; /* OOM kill score adjustment min value. - * Only settable by CAP_SYS_RESOURCE. */ - struct mm_struct *oom_mm; /* recorded mm when the thread group got - * killed by the oom killer */ - - struct mutex cred_guard_mutex; /* guard against foreign influences on - * credential calculations - * (notably. ptrace) */ + u64 utime; + u64 stime; + unsigned long long sum_exec_runtime; }; -/* - * Bits in flags field of signal_struct. - */ -#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ -#define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */ -#define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */ -#define SIGNAL_GROUP_COREDUMP 0x00000008 /* coredump in progress */ -/* - * Pending notifications to parent. - */ -#define SIGNAL_CLD_STOPPED 0x00000010 -#define SIGNAL_CLD_CONTINUED 0x00000020 -#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED) - -#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */ - -#define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \ - SIGNAL_STOP_CONTINUED) - -static inline void signal_set_stop_flags(struct signal_struct *sig, - unsigned int flags) -{ - WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP)); - sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags; -} - -/* If true, all threads except ->group_exit_task have pending SIGKILL */ -static inline int signal_group_exit(const struct signal_struct *sig) -{ - return (sig->flags & SIGNAL_GROUP_EXIT) || - (sig->group_exit_task != NULL); -} - -/* - * Some day this will be a full-fledged user tracking system.. - */ -struct user_struct { - atomic_t __count; /* reference count */ - atomic_t processes; /* How many processes does this user have? */ - atomic_t sigpending; /* How many pending signals does this user have? */ -#ifdef CONFIG_INOTIFY_USER - atomic_t inotify_watches; /* How many inotify watches does this user have? */ - atomic_t inotify_devs; /* How many inotify devs does this user have opened? */ -#endif -#ifdef CONFIG_FANOTIFY - atomic_t fanotify_listeners; -#endif -#ifdef CONFIG_EPOLL - atomic_long_t epoll_watches; /* The number of file descriptors currently watched */ -#endif -#ifdef CONFIG_POSIX_MQUEUE - /* protected by mq_lock */ - unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */ -#endif - unsigned long locked_shm; /* How many pages of mlocked shm ? */ - unsigned long unix_inflight; /* How many files in flight in unix sockets */ - atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */ +/* Alternate field names when used on cache expirations: */ +#define virt_exp utime +#define prof_exp stime +#define sched_exp sum_exec_runtime -#ifdef CONFIG_KEYS - struct key *uid_keyring; /* UID specific keyring */ - struct key *session_keyring; /* UID's default session keyring */ -#endif - - /* Hash table maintenance information */ - struct hlist_node uidhash_node; - kuid_t uid; - -#if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL) - atomic_long_t locked_vm; -#endif -}; - -extern int uids_sysfs_init(void); - -extern struct user_struct *find_user(kuid_t); - -extern struct user_struct root_user; -#define INIT_USER (&root_user) - - -struct backing_dev_info; -struct reclaim_state; - -#ifdef CONFIG_SCHED_INFO struct sched_info { - /* cumulative counters */ - unsigned long pcount; /* # of times run on this cpu */ - unsigned long long run_delay; /* time spent waiting on a runqueue */ - - /* timestamps */ - unsigned long long last_arrival,/* when we last ran on a cpu */ - last_queued; /* when we were last queued to run */ -}; -#endif /* CONFIG_SCHED_INFO */ +#ifdef CONFIG_SCHED_INFO + /* Cumulative counters: */ -#ifdef CONFIG_TASK_DELAY_ACCT -struct task_delay_info { - spinlock_t lock; - unsigned int flags; /* Private per-task flags */ + /* # of times we have run on this CPU: */ + unsigned long pcount; - /* For each stat XXX, add following, aligned appropriately - * - * struct timespec XXX_start, XXX_end; - * u64 XXX_delay; - * u32 XXX_count; - * - * Atomicity of updates to XXX_delay, XXX_count protected by - * single lock above (split into XXX_lock if contention is an issue). - */ + /* Time spent waiting on a runqueue: */ + unsigned long long run_delay; - /* - * XXX_count is incremented on every XXX operation, the delay - * associated with the operation is added to XXX_delay. - * XXX_delay contains the accumulated delay time in nanoseconds. - */ - u64 blkio_start; /* Shared by blkio, swapin */ - u64 blkio_delay; /* wait for sync block io completion */ - u64 swapin_delay; /* wait for swapin block io completion */ - u32 blkio_count; /* total count of the number of sync block */ - /* io operations performed */ - u32 swapin_count; /* total count of the number of swapin block */ - /* io operations performed */ - - u64 freepages_start; - u64 freepages_delay; /* wait for memory reclaim */ - u32 freepages_count; /* total count of memory reclaim */ -}; -#endif /* CONFIG_TASK_DELAY_ACCT */ + /* Timestamps: */ -static inline int sched_info_on(void) -{ -#ifdef CONFIG_SCHEDSTATS - return 1; -#elif defined(CONFIG_TASK_DELAY_ACCT) - extern int delayacct_on; - return delayacct_on; -#else - return 0; -#endif -} + /* When did we last run on a CPU? */ + unsigned long long last_arrival; -#ifdef CONFIG_SCHEDSTATS -void force_schedstat_enabled(void); -#endif + /* When were we last queued to run? */ + unsigned long long last_queued; -enum cpu_idle_type { - CPU_IDLE, - CPU_NOT_IDLE, - CPU_NEWLY_IDLE, - CPU_MAX_IDLE_TYPES +#endif /* CONFIG_SCHED_INFO */ }; /* @@ -965,290 +251,12 @@ enum cpu_idle_type { * We define a basic fixed point arithmetic range, and then formalize * all these metrics based on that basic range. */ -# define SCHED_FIXEDPOINT_SHIFT 10 -# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT) - -/* - * Increase resolution of cpu_capacity calculations - */ -#define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT -#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT) - -/* - * Wake-queues are lists of tasks with a pending wakeup, whose - * callers have already marked the task as woken internally, - * and can thus carry on. A common use case is being able to - * do the wakeups once the corresponding user lock as been - * released. - * - * We hold reference to each task in the list across the wakeup, - * thus guaranteeing that the memory is still valid by the time - * the actual wakeups are performed in wake_up_q(). - * - * One per task suffices, because there's never a need for a task to be - * in two wake queues simultaneously; it is forbidden to abandon a task - * in a wake queue (a call to wake_up_q() _must_ follow), so if a task is - * already in a wake queue, the wakeup will happen soon and the second - * waker can just skip it. - * - * The DEFINE_WAKE_Q macro declares and initializes the list head. - * wake_up_q() does NOT reinitialize the list; it's expected to be - * called near the end of a function. Otherwise, the list can be - * re-initialized for later re-use by wake_q_init(). - * - * Note that this can cause spurious wakeups. schedule() callers - * must ensure the call is done inside a loop, confirming that the - * wakeup condition has in fact occurred. - */ -struct wake_q_node { - struct wake_q_node *next; -}; - -struct wake_q_head { - struct wake_q_node *first; - struct wake_q_node **lastp; -}; - -#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01) - -#define DEFINE_WAKE_Q(name) \ - struct wake_q_head name = { WAKE_Q_TAIL, &name.first } - -static inline void wake_q_init(struct wake_q_head *head) -{ - head->first = WAKE_Q_TAIL; - head->lastp = &head->first; -} - -extern void wake_q_add(struct wake_q_head *head, - struct task_struct *task); -extern void wake_up_q(struct wake_q_head *head); - -/* - * sched-domains (multiprocessor balancing) declarations: - */ -#ifdef CONFIG_SMP -#define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */ -#define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */ -#define SD_BALANCE_EXEC 0x0004 /* Balance on exec */ -#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */ -#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */ -#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ -#define SD_ASYM_CPUCAPACITY 0x0040 /* Groups have different max cpu capacities */ -#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share cpu capacity */ -#define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */ -#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ -#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ -#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ -#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ -#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */ -#define SD_NUMA 0x4000 /* cross-node balancing */ - -#ifdef CONFIG_SCHED_SMT -static inline int cpu_smt_flags(void) -{ - return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; -} -#endif - -#ifdef CONFIG_SCHED_MC -static inline int cpu_core_flags(void) -{ - return SD_SHARE_PKG_RESOURCES; -} -#endif - -#ifdef CONFIG_NUMA -static inline int cpu_numa_flags(void) -{ - return SD_NUMA; -} -#endif - -extern int arch_asym_cpu_priority(int cpu); - -struct sched_domain_attr { - int relax_domain_level; -}; - -#define SD_ATTR_INIT (struct sched_domain_attr) { \ - .relax_domain_level = -1, \ -} - -extern int sched_domain_level_max; - -struct sched_group; - -struct sched_domain_shared { - atomic_t ref; - atomic_t nr_busy_cpus; - int has_idle_cores; -}; - -struct sched_domain { - /* These fields must be setup */ - struct sched_domain *parent; /* top domain must be null terminated */ - struct sched_domain *child; /* bottom domain must be null terminated */ - struct sched_group *groups; /* the balancing groups of the domain */ - unsigned long min_interval; /* Minimum balance interval ms */ - unsigned long max_interval; /* Maximum balance interval ms */ - unsigned int busy_factor; /* less balancing by factor if busy */ - unsigned int imbalance_pct; /* No balance until over watermark */ - unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ - unsigned int busy_idx; - unsigned int idle_idx; - unsigned int newidle_idx; - unsigned int wake_idx; - unsigned int forkexec_idx; - unsigned int smt_gain; - - int nohz_idle; /* NOHZ IDLE status */ - int flags; /* See SD_* */ - int level; - - /* Runtime fields. */ - unsigned long last_balance; /* init to jiffies. units in jiffies */ - unsigned int balance_interval; /* initialise to 1. units in ms. */ - unsigned int nr_balance_failed; /* initialise to 0 */ - - /* idle_balance() stats */ - u64 max_newidle_lb_cost; - unsigned long next_decay_max_lb_cost; - - u64 avg_scan_cost; /* select_idle_sibling */ - -#ifdef CONFIG_SCHEDSTATS - /* load_balance() stats */ - unsigned int lb_count[CPU_MAX_IDLE_TYPES]; - unsigned int lb_failed[CPU_MAX_IDLE_TYPES]; - unsigned int lb_balanced[CPU_MAX_IDLE_TYPES]; - unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES]; - unsigned int lb_gained[CPU_MAX_IDLE_TYPES]; - unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES]; - unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES]; - unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES]; - - /* Active load balancing */ - unsigned int alb_count; - unsigned int alb_failed; - unsigned int alb_pushed; - - /* SD_BALANCE_EXEC stats */ - unsigned int sbe_count; - unsigned int sbe_balanced; - unsigned int sbe_pushed; - - /* SD_BALANCE_FORK stats */ - unsigned int sbf_count; - unsigned int sbf_balanced; - unsigned int sbf_pushed; - - /* try_to_wake_up() stats */ - unsigned int ttwu_wake_remote; - unsigned int ttwu_move_affine; - unsigned int ttwu_move_balance; -#endif -#ifdef CONFIG_SCHED_DEBUG - char *name; -#endif - union { - void *private; /* used during construction */ - struct rcu_head rcu; /* used during destruction */ - }; - struct sched_domain_shared *shared; - - unsigned int span_weight; - /* - * Span of all CPUs in this domain. - * - * NOTE: this field is variable length. (Allocated dynamically - * by attaching extra space to the end of the structure, - * depending on how many CPUs the kernel has booted up with) - */ - unsigned long span[0]; -}; - -static inline struct cpumask *sched_domain_span(struct sched_domain *sd) -{ - return to_cpumask(sd->span); -} - -extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], - struct sched_domain_attr *dattr_new); - -/* Allocate an array of sched domains, for partition_sched_domains(). */ -cpumask_var_t *alloc_sched_domains(unsigned int ndoms); -void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); - -bool cpus_share_cache(int this_cpu, int that_cpu); - -typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); -typedef int (*sched_domain_flags_f)(void); - -#define SDTL_OVERLAP 0x01 - -struct sd_data { - struct sched_domain **__percpu sd; - struct sched_domain_shared **__percpu sds; - struct sched_group **__percpu sg; - struct sched_group_capacity **__percpu sgc; -}; - -struct sched_domain_topology_level { - sched_domain_mask_f mask; - sched_domain_flags_f sd_flags; - int flags; - int numa_level; - struct sd_data data; -#ifdef CONFIG_SCHED_DEBUG - char *name; -#endif -}; - -extern void set_sched_topology(struct sched_domain_topology_level *tl); -extern void wake_up_if_idle(int cpu); - -#ifdef CONFIG_SCHED_DEBUG -# define SD_INIT_NAME(type) .name = #type -#else -# define SD_INIT_NAME(type) -#endif - -#else /* CONFIG_SMP */ - -struct sched_domain_attr; - -static inline void -partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], - struct sched_domain_attr *dattr_new) -{ -} - -static inline bool cpus_share_cache(int this_cpu, int that_cpu) -{ - return true; -} - -#endif /* !CONFIG_SMP */ - - -struct io_context; /* See blkdev.h */ - - -#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK -extern void prefetch_stack(struct task_struct *t); -#else -static inline void prefetch_stack(struct task_struct *t) { } -#endif - -struct audit_context; /* See audit.c */ -struct mempolicy; -struct pipe_inode_info; -struct uts_namespace; +# define SCHED_FIXEDPOINT_SHIFT 10 +# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT) struct load_weight { - unsigned long weight; - u32 inv_weight; + unsigned long weight; + u32 inv_weight; }; /* @@ -1304,71 +312,73 @@ struct load_weight { * issues. */ struct sched_avg { - u64 last_update_time, load_sum; - u32 util_sum, period_contrib; - unsigned long load_avg, util_avg; + u64 last_update_time; + u64 load_sum; + u32 util_sum; + u32 period_contrib; + unsigned long load_avg; + unsigned long util_avg; }; -#ifdef CONFIG_SCHEDSTATS struct sched_statistics { - u64 wait_start; - u64 wait_max; - u64 wait_count; - u64 wait_sum; - u64 iowait_count; - u64 iowait_sum; - - u64 sleep_start; - u64 sleep_max; - s64 sum_sleep_runtime; - - u64 block_start; - u64 block_max; - u64 exec_max; - u64 slice_max; - - u64 nr_migrations_cold; - u64 nr_failed_migrations_affine; - u64 nr_failed_migrations_running; - u64 nr_failed_migrations_hot; - u64 nr_forced_migrations; - - u64 nr_wakeups; - u64 nr_wakeups_sync; - u64 nr_wakeups_migrate; - u64 nr_wakeups_local; - u64 nr_wakeups_remote; - u64 nr_wakeups_affine; - u64 nr_wakeups_affine_attempts; - u64 nr_wakeups_passive; - u64 nr_wakeups_idle; -}; +#ifdef CONFIG_SCHEDSTATS + u64 wait_start; + u64 wait_max; + u64 wait_count; + u64 wait_sum; + u64 iowait_count; + u64 iowait_sum; + + u64 sleep_start; + u64 sleep_max; + s64 sum_sleep_runtime; + + u64 block_start; + u64 block_max; + u64 exec_max; + u64 slice_max; + + u64 nr_migrations_cold; + u64 nr_failed_migrations_affine; + u64 nr_failed_migrations_running; + u64 nr_failed_migrations_hot; + u64 nr_forced_migrations; + + u64 nr_wakeups; + u64 nr_wakeups_sync; + u64 nr_wakeups_migrate; + u64 nr_wakeups_local; + u64 nr_wakeups_remote; + u64 nr_wakeups_affine; + u64 nr_wakeups_affine_attempts; + u64 nr_wakeups_passive; + u64 nr_wakeups_idle; #endif +}; struct sched_entity { - struct load_weight load; /* for load-balancing */ - struct rb_node run_node; - struct list_head group_node; - unsigned int on_rq; + /* For load-balancing: */ + struct load_weight load; + struct rb_node run_node; + struct list_head group_node; + unsigned int on_rq; - u64 exec_start; - u64 sum_exec_runtime; - u64 vruntime; - u64 prev_sum_exec_runtime; + u64 exec_start; + u64 sum_exec_runtime; + u64 vruntime; + u64 prev_sum_exec_runtime; - u64 nr_migrations; + u64 nr_migrations; -#ifdef CONFIG_SCHEDSTATS - struct sched_statistics statistics; -#endif + struct sched_statistics statistics; #ifdef CONFIG_FAIR_GROUP_SCHED - int depth; - struct sched_entity *parent; + int depth; + struct sched_entity *parent; /* rq on which this entity is (to be) queued: */ - struct cfs_rq *cfs_rq; + struct cfs_rq *cfs_rq; /* rq "owned" by this entity/group: */ - struct cfs_rq *my_q; + struct cfs_rq *my_q; #endif #ifdef CONFIG_SMP @@ -1378,49 +388,49 @@ struct sched_entity { * Put into separate cache line so it does not * collide with read-mostly values above. */ - struct sched_avg avg ____cacheline_aligned_in_smp; + struct sched_avg avg ____cacheline_aligned_in_smp; #endif }; struct sched_rt_entity { - struct list_head run_list; - unsigned long timeout; - unsigned long watchdog_stamp; - unsigned int time_slice; - unsigned short on_rq; - unsigned short on_list; - - struct sched_rt_entity *back; + struct list_head run_list; + unsigned long timeout; + unsigned long watchdog_stamp; + unsigned int time_slice; + unsigned short on_rq; + unsigned short on_list; + + struct sched_rt_entity *back; #ifdef CONFIG_RT_GROUP_SCHED - struct sched_rt_entity *parent; + struct sched_rt_entity *parent; /* rq on which this entity is (to be) queued: */ - struct rt_rq *rt_rq; + struct rt_rq *rt_rq; /* rq "owned" by this entity/group: */ - struct rt_rq *my_q; + struct rt_rq *my_q; #endif }; struct sched_dl_entity { - struct rb_node rb_node; + struct rb_node rb_node; /* * Original scheduling parameters. Copied here from sched_attr * during sched_setattr(), they will remain the same until * the next sched_setattr(). */ - u64 dl_runtime; /* maximum runtime for each instance */ - u64 dl_deadline; /* relative deadline of each instance */ - u64 dl_period; /* separation of two instances (period) */ - u64 dl_bw; /* dl_runtime / dl_deadline */ + u64 dl_runtime; /* Maximum runtime for each instance */ + u64 dl_deadline; /* Relative deadline of each instance */ + u64 dl_period; /* Separation of two instances (period) */ + u64 dl_bw; /* dl_runtime / dl_deadline */ /* * Actual scheduling parameters. Initialized with the values above, * they are continously updated during task execution. Note that * the remaining runtime could be < 0 in case we are in overrun. */ - s64 runtime; /* remaining runtime for this instance */ - u64 deadline; /* absolute deadline for this instance */ - unsigned int flags; /* specifying the scheduler behaviour */ + s64 runtime; /* Remaining runtime for this instance */ + u64 deadline; /* Absolute deadline for this instance */ + unsigned int flags; /* Specifying the scheduler behaviour */ /* * Some bool flags: @@ -1433,28 +443,31 @@ struct sched_dl_entity { * outside bandwidth enforcement mechanism (but only until we * exit the critical section); * - * @dl_yielded tells if task gave up the cpu before consuming + * @dl_yielded tells if task gave up the CPU before consuming * all its available runtime during the last job. */ - int dl_throttled, dl_boosted, dl_yielded; + int dl_throttled; + int dl_boosted; + int dl_yielded; /* * Bandwidth enforcement timer. Each -deadline task has its * own bandwidth to be enforced, thus we need one timer per task. */ - struct hrtimer dl_timer; + struct hrtimer dl_timer; }; union rcu_special { struct { - u8 blocked; - u8 need_qs; - u8 exp_need_qs; - u8 pad; /* Otherwise the compiler can store garbage here. */ + u8 blocked; + u8 need_qs; + u8 exp_need_qs; + + /* Otherwise the compiler can store garbage here: */ + u8 pad; } b; /* Bits. */ u32 s; /* Set of bits. */ }; -struct rcu_node; enum perf_event_task_context { perf_invalid_context = -1, @@ -1463,23 +476,8 @@ enum perf_event_task_context { perf_nr_task_contexts, }; -/* Track pages that require TLB flushes */ -struct tlbflush_unmap_batch { - /* - * Each bit set is a CPU that potentially has a TLB entry for one of - * the PFNs being flushed. See set_tlb_ubc_flush_pending(). - */ - struct cpumask cpumask; - - /* True if any bit in cpumask is set */ - bool flush_required; - - /* - * If true then the PTE was dirty when unmapped. The entry must be - * flushed before IO is initiated or a stale TLB entry potentially - * allows an update without redirtying the page. - */ - bool writable; +struct wake_q_node { + struct wake_q_node *next; }; struct task_struct { @@ -1488,362 +486,417 @@ struct task_struct { * For reasons of header soup (see current_thread_info()), this * must be the first element of task_struct. */ - struct thread_info thread_info; + struct thread_info thread_info; #endif - volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ - void *stack; - atomic_t usage; - unsigned int flags; /* per process flags, defined below */ - unsigned int ptrace; + /* -1 unrunnable, 0 runnable, >0 stopped: */ + volatile long state; + void *stack; + atomic_t usage; + /* Per task flags (PF_*), defined further below: */ + unsigned int flags; + unsigned int ptrace; #ifdef CONFIG_SMP - struct llist_node wake_entry; - int on_cpu; + struct llist_node wake_entry; + int on_cpu; #ifdef CONFIG_THREAD_INFO_IN_TASK - unsigned int cpu; /* current CPU */ + /* Current CPU: */ + unsigned int cpu; #endif - unsigned int wakee_flips; - unsigned long wakee_flip_decay_ts; - struct task_struct *last_wakee; + unsigned int wakee_flips; + unsigned long wakee_flip_decay_ts; + struct task_struct *last_wakee; - int wake_cpu; + int wake_cpu; #endif - int on_rq; + int on_rq; - int prio, static_prio, normal_prio; - unsigned int rt_priority; - const struct sched_class *sched_class; - struct sched_entity se; - struct sched_rt_entity rt; + int prio; + int static_prio; + int normal_prio; + unsigned int rt_priority; + + const struct sched_class *sched_class; + struct sched_entity se; + struct sched_rt_entity rt; #ifdef CONFIG_CGROUP_SCHED - struct task_group *sched_task_group; + struct task_group *sched_task_group; #endif - struct sched_dl_entity dl; + struct sched_dl_entity dl; #ifdef CONFIG_PREEMPT_NOTIFIERS - /* list of struct preempt_notifier: */ - struct hlist_head preempt_notifiers; + /* List of struct preempt_notifier: */ + struct hlist_head preempt_notifiers; #endif #ifdef CONFIG_BLK_DEV_IO_TRACE - unsigned int btrace_seq; + unsigned int btrace_seq; #endif - unsigned int policy; - int nr_cpus_allowed; - cpumask_t cpus_allowed; + unsigned int policy; + int nr_cpus_allowed; + cpumask_t cpus_allowed; #ifdef CONFIG_PREEMPT_RCU - int rcu_read_lock_nesting; - union rcu_special rcu_read_unlock_special; - struct list_head rcu_node_entry; - struct rcu_node *rcu_blocked_node; + int rcu_read_lock_nesting; + union rcu_special rcu_read_unlock_special; + struct list_head rcu_node_entry; + struct rcu_node *rcu_blocked_node; #endif /* #ifdef CONFIG_PREEMPT_RCU */ + #ifdef CONFIG_TASKS_RCU - unsigned long rcu_tasks_nvcsw; - bool rcu_tasks_holdout; - struct list_head rcu_tasks_holdout_list; - int rcu_tasks_idle_cpu; + unsigned long rcu_tasks_nvcsw; + bool rcu_tasks_holdout; + struct list_head rcu_tasks_holdout_list; + int rcu_tasks_idle_cpu; #endif /* #ifdef CONFIG_TASKS_RCU */ -#ifdef CONFIG_SCHED_INFO - struct sched_info sched_info; -#endif + struct sched_info sched_info; - struct list_head tasks; + struct list_head tasks; #ifdef CONFIG_SMP - struct plist_node pushable_tasks; - struct rb_node pushable_dl_tasks; -#endif - - struct mm_struct *mm, *active_mm; - /* per-thread vma caching */ - u32 vmacache_seqnum; - struct vm_area_struct *vmacache[VMACACHE_SIZE]; -#if defined(SPLIT_RSS_COUNTING) - struct task_rss_stat rss_stat; -#endif -/* task state */ - int exit_state; - int exit_code, exit_signal; - int pdeath_signal; /* The signal sent when the parent dies */ - unsigned long jobctl; /* JOBCTL_*, siglock protected */ - - /* Used for emulating ABI behavior of previous Linux versions */ - unsigned int personality; - - /* scheduler bits, serialized by scheduler locks */ - unsigned sched_reset_on_fork:1; - unsigned sched_contributes_to_load:1; - unsigned sched_migrated:1; - unsigned sched_remote_wakeup:1; - unsigned :0; /* force alignment to the next boundary */ - - /* unserialized, strictly 'current' */ - unsigned in_execve:1; /* bit to tell LSMs we're in execve */ - unsigned in_iowait:1; -#if !defined(TIF_RESTORE_SIGMASK) - unsigned restore_sigmask:1; + struct plist_node pushable_tasks; + struct rb_node pushable_dl_tasks; +#endif + + struct mm_struct *mm; + struct mm_struct *active_mm; + + /* Per-thread vma caching: */ + struct vmacache vmacache; + +#ifdef SPLIT_RSS_COUNTING + struct task_rss_stat rss_stat; +#endif + int exit_state; + int exit_code; + int exit_signal; + /* The signal sent when the parent dies: */ + int pdeath_signal; + /* JOBCTL_*, siglock protected: */ + unsigned long jobctl; + + /* Used for emulating ABI behavior of previous Linux versions: */ + unsigned int personality; + + /* Scheduler bits, serialized by scheduler locks: */ + unsigned sched_reset_on_fork:1; + unsigned sched_contributes_to_load:1; + unsigned sched_migrated:1; + unsigned sched_remote_wakeup:1; + /* Force alignment to the next boundary: */ + unsigned :0; + + /* Unserialized, strictly 'current' */ + + /* Bit to tell LSMs we're in execve(): */ + unsigned in_execve:1; + unsigned in_iowait:1; +#ifndef TIF_RESTORE_SIGMASK + unsigned restore_sigmask:1; #endif #ifdef CONFIG_MEMCG - unsigned memcg_may_oom:1; + unsigned memcg_may_oom:1; #ifndef CONFIG_SLOB - unsigned memcg_kmem_skip_account:1; + unsigned memcg_kmem_skip_account:1; #endif #endif #ifdef CONFIG_COMPAT_BRK - unsigned brk_randomized:1; + unsigned brk_randomized:1; #endif - unsigned long atomic_flags; /* Flags needing atomic access. */ + unsigned long atomic_flags; /* Flags requiring atomic access. */ - struct restart_block restart_block; + struct restart_block restart_block; - pid_t pid; - pid_t tgid; + pid_t pid; + pid_t tgid; #ifdef CONFIG_CC_STACKPROTECTOR - /* Canary value for the -fstack-protector gcc feature */ - unsigned long stack_canary; + /* Canary value for the -fstack-protector GCC feature: */ + unsigned long stack_canary; #endif /* - * pointers to (original) parent process, youngest child, younger sibling, + * Pointers to the (original) parent process, youngest child, younger sibling, * older sibling, respectively. (p->father can be replaced with * p->real_parent->pid) */ - struct task_struct __rcu *real_parent; /* real parent process */ - struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */ + + /* Real parent process: */ + struct task_struct __rcu *real_parent; + + /* Recipient of SIGCHLD, wait4() reports: */ + struct task_struct __rcu *parent; + /* - * children/sibling forms the list of my natural children + * Children/sibling form the list of natural children: */ - struct list_head children; /* list of my children */ - struct list_head sibling; /* linkage in my parent's children list */ - struct task_struct *group_leader; /* threadgroup leader */ + struct list_head children; + struct list_head sibling; + struct task_struct *group_leader; /* - * ptraced is the list of tasks this task is using ptrace on. + * 'ptraced' is the list of tasks this task is using ptrace() on. + * * This includes both natural children and PTRACE_ATTACH targets. - * p->ptrace_entry is p's link on the p->parent->ptraced list. + * 'ptrace_entry' is this task's link on the p->parent->ptraced list. */ - struct list_head ptraced; - struct list_head ptrace_entry; + struct list_head ptraced; + struct list_head ptrace_entry; /* PID/PID hash table linkage. */ - struct pid_link pids[PIDTYPE_MAX]; - struct list_head thread_group; - struct list_head thread_node; + struct pid_link pids[PIDTYPE_MAX]; + struct list_head thread_group; + struct list_head thread_node; + + struct completion *vfork_done; - struct completion *vfork_done; /* for vfork() */ - int __user *set_child_tid; /* CLONE_CHILD_SETTID */ - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ + /* CLONE_CHILD_SETTID: */ + int __user *set_child_tid; - u64 utime, stime; + /* CLONE_CHILD_CLEARTID: */ + int __user *clear_child_tid; + + u64 utime; + u64 stime; #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME - u64 utimescaled, stimescaled; + u64 utimescaled; + u64 stimescaled; #endif - u64 gtime; - struct prev_cputime prev_cputime; + u64 gtime; + struct prev_cputime prev_cputime; #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN - seqcount_t vtime_seqcount; - unsigned long long vtime_snap; + seqcount_t vtime_seqcount; + unsigned long long vtime_snap; enum { - /* Task is sleeping or running in a CPU with VTIME inactive */ + /* Task is sleeping or running in a CPU with VTIME inactive: */ VTIME_INACTIVE = 0, - /* Task runs in userspace in a CPU with VTIME active */ + /* Task runs in userspace in a CPU with VTIME active: */ VTIME_USER, - /* Task runs in kernelspace in a CPU with VTIME active */ + /* Task runs in kernelspace in a CPU with VTIME active: */ VTIME_SYS, } vtime_snap_whence; #endif #ifdef CONFIG_NO_HZ_FULL - atomic_t tick_dep_mask; + atomic_t tick_dep_mask; #endif - unsigned long nvcsw, nivcsw; /* context switch counts */ - u64 start_time; /* monotonic time in nsec */ - u64 real_start_time; /* boot based time in nsec */ -/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ - unsigned long min_flt, maj_flt; + /* Context switch counts: */ + unsigned long nvcsw; + unsigned long nivcsw; + + /* Monotonic time in nsecs: */ + u64 start_time; + + /* Boot based time in nsecs: */ + u64 real_start_time; + + /* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */ + unsigned long min_flt; + unsigned long maj_flt; #ifdef CONFIG_POSIX_TIMERS - struct task_cputime cputime_expires; - struct list_head cpu_timers[3]; -#endif - -/* process credentials */ - const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */ - const struct cred __rcu *real_cred; /* objective and real subjective task - * credentials (COW) */ - const struct cred __rcu *cred; /* effective (overridable) subjective task - * credentials (COW) */ - char comm[TASK_COMM_LEN]; /* executable name excluding path - - access with [gs]et_task_comm (which lock - it with task_lock()) - - initialized normally by setup_new_exec */ -/* file system info */ - struct nameidata *nameidata; + struct task_cputime cputime_expires; + struct list_head cpu_timers[3]; +#endif + + /* Process credentials: */ + + /* Tracer's credentials at attach: */ + const struct cred __rcu *ptracer_cred; + + /* Objective and real subjective task credentials (COW): */ + const struct cred __rcu *real_cred; + + /* Effective (overridable) subjective task credentials (COW): */ + const struct cred __rcu *cred; + + /* + * executable name, excluding path. + * + * - normally initialized setup_new_exec() + * - access it with [gs]et_task_comm() + * - lock it with task_lock() + */ + char comm[TASK_COMM_LEN]; + + struct nameidata *nameidata; + #ifdef CONFIG_SYSVIPC -/* ipc stuff */ - struct sysv_sem sysvsem; - struct sysv_shm sysvshm; + struct sysv_sem sysvsem; + struct sysv_shm sysvshm; #endif #ifdef CONFIG_DETECT_HUNG_TASK -/* hung task detection */ - unsigned long last_switch_count; -#endif -/* filesystem information */ - struct fs_struct *fs; -/* open file information */ - struct files_struct *files; -/* namespaces */ - struct nsproxy *nsproxy; -/* signal handlers */ - struct signal_struct *signal; - struct sighand_struct *sighand; - - sigset_t blocked, real_blocked; - sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */ - struct sigpending pending; - - unsigned long sas_ss_sp; - size_t sas_ss_size; - unsigned sas_ss_flags; - - struct callback_head *task_works; - - struct audit_context *audit_context; + unsigned long last_switch_count; +#endif + /* Filesystem information: */ + struct fs_struct *fs; + + /* Open file information: */ + struct files_struct *files; + + /* Namespaces: */ + struct nsproxy *nsproxy; + + /* Signal handlers: */ + struct signal_struct *signal; + struct sighand_struct *sighand; + sigset_t blocked; + sigset_t real_blocked; + /* Restored if set_restore_sigmask() was used: */ + sigset_t saved_sigmask; + struct sigpending pending; + unsigned long sas_ss_sp; + size_t sas_ss_size; + unsigned int sas_ss_flags; + + struct callback_head *task_works; + + struct audit_context *audit_context; #ifdef CONFIG_AUDITSYSCALL - kuid_t loginuid; - unsigned int sessionid; + kuid_t loginuid; + unsigned int sessionid; #endif - struct seccomp seccomp; + struct seccomp seccomp; + + /* Thread group tracking: */ + u32 parent_exec_id; + u32 self_exec_id; -/* Thread group tracking */ - u32 parent_exec_id; - u32 self_exec_id; -/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, - * mempolicy */ - spinlock_t alloc_lock; + /* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */ + spinlock_t alloc_lock; /* Protection of the PI data structures: */ - raw_spinlock_t pi_lock; + raw_spinlock_t pi_lock; - struct wake_q_node wake_q; + struct wake_q_node wake_q; #ifdef CONFIG_RT_MUTEXES - /* PI waiters blocked on a rt_mutex held by this task */ - struct rb_root pi_waiters; - struct rb_node *pi_waiters_leftmost; - /* Deadlock detection and priority inheritance handling */ - struct rt_mutex_waiter *pi_blocked_on; + /* PI waiters blocked on a rt_mutex held by this task: */ + struct rb_root pi_waiters; + struct rb_node *pi_waiters_leftmost; + /* Deadlock detection and priority inheritance handling: */ + struct rt_mutex_waiter *pi_blocked_on; #endif #ifdef CONFIG_DEBUG_MUTEXES - /* mutex deadlock detection */ - struct mutex_waiter *blocked_on; + /* Mutex deadlock detection: */ + struct mutex_waiter *blocked_on; #endif + #ifdef CONFIG_TRACE_IRQFLAGS - unsigned int irq_events; - unsigned long hardirq_enable_ip; - unsigned long hardirq_disable_ip; - unsigned int hardirq_enable_event; - unsigned int hardirq_disable_event; - int hardirqs_enabled; - int hardirq_context; - unsigned long softirq_disable_ip; - unsigned long softirq_enable_ip; - unsigned int softirq_disable_event; - unsigned int softirq_enable_event; - int softirqs_enabled; - int softirq_context; + unsigned int irq_events; + unsigned long hardirq_enable_ip; + unsigned long hardirq_disable_ip; + unsigned int hardirq_enable_event; + unsigned int hardirq_disable_event; + int hardirqs_enabled; + int hardirq_context; + unsigned long softirq_disable_ip; + unsigned long softirq_enable_ip; + unsigned int softirq_disable_event; + unsigned int softirq_enable_event; + int softirqs_enabled; + int softirq_context; #endif + #ifdef CONFIG_LOCKDEP -# define MAX_LOCK_DEPTH 48UL - u64 curr_chain_key; - int lockdep_depth; - unsigned int lockdep_recursion; - struct held_lock held_locks[MAX_LOCK_DEPTH]; - gfp_t lockdep_reclaim_gfp; +# define MAX_LOCK_DEPTH 48UL + u64 curr_chain_key; + int lockdep_depth; + unsigned int lockdep_recursion; + struct held_lock held_locks[MAX_LOCK_DEPTH]; + gfp_t lockdep_reclaim_gfp; #endif + #ifdef CONFIG_UBSAN - unsigned int in_ubsan; + unsigned int in_ubsan; #endif -/* journalling filesystem info */ - void *journal_info; + /* Journalling filesystem info: */ + void *journal_info; -/* stacked block device info */ - struct bio_list *bio_list; + /* Stacked block device info: */ + struct bio_list *bio_list; #ifdef CONFIG_BLOCK -/* stack plugging */ - struct blk_plug *plug; + /* Stack plugging: */ + struct blk_plug *plug; #endif -/* VM state */ - struct reclaim_state *reclaim_state; + /* VM state: */ + struct reclaim_state *reclaim_state; + + struct backing_dev_info *backing_dev_info; - struct backing_dev_info *backing_dev_info; + struct io_context *io_context; - struct io_context *io_context; + /* Ptrace state: */ + unsigned long ptrace_message; + siginfo_t *last_siginfo; - unsigned long ptrace_message; - siginfo_t *last_siginfo; /* For ptrace use. */ - struct task_io_accounting ioac; -#if defined(CONFIG_TASK_XACCT) - u64 acct_rss_mem1; /* accumulated rss usage */ - u64 acct_vm_mem1; /* accumulated virtual memory usage */ - u64 acct_timexpd; /* stime + utime since last update */ + struct task_io_accounting ioac; +#ifdef CONFIG_TASK_XACCT + /* Accumulated RSS usage: */ + u64 acct_rss_mem1; + /* Accumulated virtual memory usage: */ + u64 acct_vm_mem1; + /* stime + utime since last update: */ + u64 acct_timexpd; #endif #ifdef CONFIG_CPUSETS - nodemask_t mems_allowed; /* Protected by alloc_lock */ - seqcount_t mems_allowed_seq; /* Seqence no to catch updates */ - int cpuset_mem_spread_rotor; - int cpuset_slab_spread_rotor; + /* Protected by ->alloc_lock: */ + nodemask_t mems_allowed; + /* Seqence number to catch updates: */ + seqcount_t mems_allowed_seq; + int cpuset_mem_spread_rotor; + int cpuset_slab_spread_rotor; #endif #ifdef CONFIG_CGROUPS - /* Control Group info protected by css_set_lock */ - struct css_set __rcu *cgroups; - /* cg_list protected by css_set_lock and tsk->alloc_lock */ - struct list_head cg_list; + /* Control Group info protected by css_set_lock: */ + struct css_set __rcu *cgroups; + /* cg_list protected by css_set_lock and tsk->alloc_lock: */ + struct list_head cg_list; #endif #ifdef CONFIG_INTEL_RDT_A - int closid; + int closid; #endif #ifdef CONFIG_FUTEX - struct robust_list_head __user *robust_list; + struct robust_list_head __user *robust_list; #ifdef CONFIG_COMPAT struct compat_robust_list_head __user *compat_robust_list; #endif - struct list_head pi_state_list; - struct futex_pi_state *pi_state_cache; + struct list_head pi_state_list; + struct futex_pi_state *pi_state_cache; #endif #ifdef CONFIG_PERF_EVENTS - struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; - struct mutex perf_event_mutex; - struct list_head perf_event_list; + struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; + struct mutex perf_event_mutex; + struct list_head perf_event_list; #endif #ifdef CONFIG_DEBUG_PREEMPT - unsigned long preempt_disable_ip; + unsigned long preempt_disable_ip; #endif #ifdef CONFIG_NUMA - struct mempolicy *mempolicy; /* Protected by alloc_lock */ - short il_next; - short pref_node_fork; + /* Protected by alloc_lock: */ + struct mempolicy *mempolicy; + short il_next; + short pref_node_fork; #endif #ifdef CONFIG_NUMA_BALANCING - int numa_scan_seq; - unsigned int numa_scan_period; - unsigned int numa_scan_period_max; - int numa_preferred_nid; - unsigned long numa_migrate_retry; - u64 node_stamp; /* migration stamp */ - u64 last_task_numa_placement; - u64 last_sum_exec_runtime; - struct callback_head numa_work; - - struct list_head numa_entry; - struct numa_group *numa_group; + int numa_scan_seq; + unsigned int numa_scan_period; + unsigned int numa_scan_period_max; + int numa_preferred_nid; + unsigned long numa_migrate_retry; + /* Migration stamp: */ + u64 node_stamp; + u64 last_task_numa_placement; + u64 last_sum_exec_runtime; + struct callback_head numa_work; + + struct list_head numa_entry; + struct numa_group *numa_group; /* * numa_faults is an array split into four regions: @@ -1859,8 +912,8 @@ struct task_struct { * during the current scan window. When the scan completes, the counts * in faults_memory and faults_cpu decay and these values are copied. */ - unsigned long *numa_faults; - unsigned long total_numa_faults; + unsigned long *numa_faults; + unsigned long total_numa_faults; /* * numa_faults_locality tracks if faults recorded during the last @@ -1868,208 +921,133 @@ struct task_struct { * period is adapted based on the locality of the faults with different * weights depending on whether they were shared or private faults */ - unsigned long numa_faults_locality[3]; + unsigned long numa_faults_locality[3]; - unsigned long numa_pages_migrated; + unsigned long numa_pages_migrated; #endif /* CONFIG_NUMA_BALANCING */ -#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH - struct tlbflush_unmap_batch tlb_ubc; -#endif + struct tlbflush_unmap_batch tlb_ubc; - struct rcu_head rcu; + struct rcu_head rcu; - /* - * cache last used pipe for splice - */ - struct pipe_inode_info *splice_pipe; + /* Cache last used pipe for splice(): */ + struct pipe_inode_info *splice_pipe; - struct page_frag task_frag; + struct page_frag task_frag; -#ifdef CONFIG_TASK_DELAY_ACCT - struct task_delay_info *delays; +#ifdef CONFIG_TASK_DELAY_ACCT + struct task_delay_info *delays; #endif + #ifdef CONFIG_FAULT_INJECTION - int make_it_fail; + int make_it_fail; #endif /* - * when (nr_dirtied >= nr_dirtied_pause), it's time to call - * balance_dirty_pages() for some dirty throttling pause + * When (nr_dirtied >= nr_dirtied_pause), it's time to call + * balance_dirty_pages() for a dirty throttling pause: */ - int nr_dirtied; - int nr_dirtied_pause; - unsigned long dirty_paused_when; /* start of a write-and-pause period */ + int nr_dirtied; + int nr_dirtied_pause; + /* Start of a write-and-pause period: */ + unsigned long dirty_paused_when; #ifdef CONFIG_LATENCYTOP - int latency_record_count; - struct latency_record latency_record[LT_SAVECOUNT]; + int latency_record_count; + struct latency_record latency_record[LT_SAVECOUNT]; #endif /* - * time slack values; these are used to round up poll() and + * Time slack values; these are used to round up poll() and * select() etc timeout values. These are in nanoseconds. */ - u64 timer_slack_ns; - u64 default_timer_slack_ns; + u64 timer_slack_ns; + u64 default_timer_slack_ns; #ifdef CONFIG_KASAN - unsigned int kasan_depth; + unsigned int kasan_depth; #endif + #ifdef CONFIG_FUNCTION_GRAPH_TRACER - /* Index of current stored address in ret_stack */ - int curr_ret_stack; - /* Stack of return addresses for return function tracing */ - struct ftrace_ret_stack *ret_stack; - /* time stamp for last schedule */ - unsigned long long ftrace_timestamp; + /* Index of current stored address in ret_stack: */ + int curr_ret_stack; + + /* Stack of return addresses for return function tracing: */ + struct ftrace_ret_stack *ret_stack; + + /* Timestamp for last schedule: */ + unsigned long long ftrace_timestamp; + /* * Number of functions that haven't been traced - * because of depth overrun. + * because of depth overrun: */ - atomic_t trace_overrun; - /* Pause for the tracing */ - atomic_t tracing_graph_pause; + atomic_t trace_overrun; + + /* Pause tracing: */ + atomic_t tracing_graph_pause; #endif + #ifdef CONFIG_TRACING - /* state flags for use by tracers */ - unsigned long trace; - /* bitmask and counter of trace recursion */ - unsigned long trace_recursion; + /* State flags for use by tracers: */ + unsigned long trace; + + /* Bitmask and counter of trace recursion: */ + unsigned long trace_recursion; #endif /* CONFIG_TRACING */ + #ifdef CONFIG_KCOV - /* Coverage collection mode enabled for this task (0 if disabled). */ - enum kcov_mode kcov_mode; - /* Size of the kcov_area. */ - unsigned kcov_size; - /* Buffer for coverage collection. */ - void *kcov_area; - /* kcov desciptor wired with this task or NULL. */ - struct kcov *kcov; + /* Coverage collection mode enabled for this task (0 if disabled): */ + enum kcov_mode kcov_mode; + + /* Size of the kcov_area: */ + unsigned int kcov_size; + + /* Buffer for coverage collection: */ + void *kcov_area; + + /* KCOV descriptor wired with this task or NULL: */ + struct kcov *kcov; #endif + #ifdef CONFIG_MEMCG - struct mem_cgroup *memcg_in_oom; - gfp_t memcg_oom_gfp_mask; - int memcg_oom_order; + struct mem_cgroup *memcg_in_oom; + gfp_t memcg_oom_gfp_mask; + int memcg_oom_order; - /* number of pages to reclaim on returning to userland */ - unsigned int memcg_nr_pages_over_high; + /* Number of pages to reclaim on returning to userland: */ + unsigned int memcg_nr_pages_over_high; #endif + #ifdef CONFIG_UPROBES - struct uprobe_task *utask; + struct uprobe_task *utask; #endif #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE) - unsigned int sequential_io; - unsigned int sequential_io_avg; + unsigned int sequential_io; + unsigned int sequential_io_avg; #endif #ifdef CONFIG_DEBUG_ATOMIC_SLEEP - unsigned long task_state_change; + unsigned long task_state_change; #endif - int pagefault_disabled; + int pagefault_disabled; #ifdef CONFIG_MMU - struct task_struct *oom_reaper_list; + struct task_struct *oom_reaper_list; #endif #ifdef CONFIG_VMAP_STACK - struct vm_struct *stack_vm_area; + struct vm_struct *stack_vm_area; #endif #ifdef CONFIG_THREAD_INFO_IN_TASK - /* A live task holds one reference. */ - atomic_t stack_refcount; + /* A live task holds one reference: */ + atomic_t stack_refcount; #endif -/* CPU-specific state of this task */ - struct thread_struct thread; -/* - * WARNING: on x86, 'thread_struct' contains a variable-sized - * structure. It *MUST* be at the end of 'task_struct'. - * - * Do not put anything below here! - */ -}; - -#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT -extern int arch_task_struct_size __read_mostly; -#else -# define arch_task_struct_size (sizeof(struct task_struct)) -#endif - -#ifdef CONFIG_VMAP_STACK -static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) -{ - return t->stack_vm_area; -} -#else -static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) -{ - return NULL; -} -#endif - -/* Future-safe accessor for struct task_struct's cpus_allowed. */ -#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) - -static inline int tsk_nr_cpus_allowed(struct task_struct *p) -{ - return p->nr_cpus_allowed; -} - -#define TNF_MIGRATED 0x01 -#define TNF_NO_GROUP 0x02 -#define TNF_SHARED 0x04 -#define TNF_FAULT_LOCAL 0x08 -#define TNF_MIGRATE_FAIL 0x10 - -static inline bool in_vfork(struct task_struct *tsk) -{ - bool ret; + /* CPU-specific state of this task: */ + struct thread_struct thread; /* - * need RCU to access ->real_parent if CLONE_VM was used along with - * CLONE_PARENT. - * - * We check real_parent->mm == tsk->mm because CLONE_VFORK does not - * imply CLONE_VM - * - * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus - * ->real_parent is not necessarily the task doing vfork(), so in - * theory we can't rely on task_lock() if we want to dereference it. + * WARNING: on x86, 'thread_struct' contains a variable-sized + * structure. It *MUST* be at the end of 'task_struct'. * - * And in this case we can't trust the real_parent->mm == tsk->mm - * check, it can be false negative. But we do not care, if init or - * another oom-unkillable task does this it should blame itself. + * Do not put anything below here! */ - rcu_read_lock(); - ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm; - rcu_read_unlock(); - - return ret; -} - -#ifdef CONFIG_NUMA_BALANCING -extern void task_numa_fault(int last_node, int node, int pages, int flags); -extern pid_t task_numa_group_id(struct task_struct *p); -extern void set_numabalancing_state(bool enabled); -extern void task_numa_free(struct task_struct *p); -extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page, - int src_nid, int dst_cpu); -#else -static inline void task_numa_fault(int last_node, int node, int pages, - int flags) -{ -} -static inline pid_t task_numa_group_id(struct task_struct *p) -{ - return 0; -} -static inline void set_numabalancing_state(bool enabled) -{ -} -static inline void task_numa_free(struct task_struct *p) -{ -} -static inline bool should_numa_migrate_memory(struct task_struct *p, - struct page *page, int src_nid, int dst_cpu) -{ - return true; -} -#endif +}; static inline struct pid *task_pid(struct task_struct *task) { @@ -2082,7 +1060,7 @@ static inline struct pid *task_tgid(struct task_struct *task) } /* - * Without tasklist or rcu lock it is not safe to dereference + * Without tasklist or RCU lock it is not safe to dereference * the result of task_pgrp/task_session even if task == current, * we can race with another thread doing sys_setsid/sys_setpgid. */ @@ -2096,8 +1074,6 @@ static inline struct pid *task_session(struct task_struct *task) return task->group_leader->pids[PIDTYPE_SID].pid; } -struct pid_namespace; - /* * the helpers to get the task's different pids as they are seen * from various namespaces @@ -2111,16 +1087,14 @@ struct pid_namespace; * * see also pid_nr() etc in include/linux/pid.h */ -pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, - struct pid_namespace *ns); +pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns); static inline pid_t task_pid_nr(struct task_struct *tsk) { return tsk->pid; } -static inline pid_t task_pid_nr_ns(struct task_struct *tsk, - struct pid_namespace *ns) +static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) { return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns); } @@ -2136,15 +1110,28 @@ static inline pid_t task_tgid_nr(struct task_struct *tsk) return tsk->tgid; } -pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); +extern pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); static inline pid_t task_tgid_vnr(struct task_struct *tsk) { return pid_vnr(task_tgid(tsk)); } +/** + * pid_alive - check that a task structure is not stale + * @p: Task structure to be checked. + * + * Test if a process is not yet dead (at most zombie state) + * If pid_alive fails, then pointers within the task structure + * can be stale and must not be dereferenced. + * + * Return: 1 if the process is alive. 0 otherwise. + */ +static inline int pid_alive(const struct task_struct *p) +{ + return p->pids[PIDTYPE_PID].pid != NULL; +} -static inline int pid_alive(const struct task_struct *p); static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns) { pid_t pid = 0; @@ -2162,8 +1149,7 @@ static inline pid_t task_ppid_nr(const struct task_struct *tsk) return task_ppid_nr_ns(tsk, &init_pid_ns); } -static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, - struct pid_namespace *ns) +static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) { return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); } @@ -2174,8 +1160,7 @@ static inline pid_t task_pgrp_vnr(struct task_struct *tsk) } -static inline pid_t task_session_nr_ns(struct task_struct *tsk, - struct pid_namespace *ns) +static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) { return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns); } @@ -2185,28 +1170,13 @@ static inline pid_t task_session_vnr(struct task_struct *tsk) return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL); } -/* obsolete, do not use */ +/* Obsolete, do not use: */ static inline pid_t task_pgrp_nr(struct task_struct *tsk) { return task_pgrp_nr_ns(tsk, &init_pid_ns); } /** - * pid_alive - check that a task structure is not stale - * @p: Task structure to be checked. - * - * Test if a process is not yet dead (at most zombie state) - * If pid_alive fails, then pointers within the task structure - * can be stale and must not be dereferenced. - * - * Return: 1 if the process is alive. 0 otherwise. - */ -static inline int pid_alive(const struct task_struct *p) -{ - return p->pids[PIDTYPE_PID].pid != NULL; -} - -/** * is_global_init - check if a task structure is init. Since init * is free to have sub-threads we need to check tgid. * @tsk: Task structure to be checked. @@ -2222,89 +1192,37 @@ static inline int is_global_init(struct task_struct *tsk) extern struct pid *cad_pid; -extern void free_task(struct task_struct *tsk); -#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) - -extern void __put_task_struct(struct task_struct *t); - -static inline void put_task_struct(struct task_struct *t) -{ - if (atomic_dec_and_test(&t->usage)) - __put_task_struct(t); -} - -struct task_struct *task_rcu_dereference(struct task_struct **ptask); -struct task_struct *try_get_task_struct(struct task_struct **ptask); - -#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN -extern void task_cputime(struct task_struct *t, - u64 *utime, u64 *stime); -extern u64 task_gtime(struct task_struct *t); -#else -static inline void task_cputime(struct task_struct *t, - u64 *utime, u64 *stime) -{ - *utime = t->utime; - *stime = t->stime; -} - -static inline u64 task_gtime(struct task_struct *t) -{ - return t->gtime; -} -#endif - -#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME -static inline void task_cputime_scaled(struct task_struct *t, - u64 *utimescaled, - u64 *stimescaled) -{ - *utimescaled = t->utimescaled; - *stimescaled = t->stimescaled; -} -#else -static inline void task_cputime_scaled(struct task_struct *t, - u64 *utimescaled, - u64 *stimescaled) -{ - task_cputime(t, utimescaled, stimescaled); -} -#endif - -extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st); -extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st); - /* * Per process flags */ -#define PF_IDLE 0x00000002 /* I am an IDLE thread */ -#define PF_EXITING 0x00000004 /* getting shut down */ -#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ -#define PF_VCPU 0x00000010 /* I'm a virtual CPU */ -#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ -#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ -#define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */ -#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ -#define PF_DUMPCORE 0x00000200 /* dumped core */ -#define PF_SIGNALED 0x00000400 /* killed by a signal */ -#define PF_MEMALLOC 0x00000800 /* Allocating memory */ -#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */ -#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ -#define PF_USED_ASYNC 0x00004000 /* used async_schedule*(), used by module init */ -#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ -#define PF_FROZEN 0x00010000 /* frozen for system suspend */ -#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ -#define PF_KSWAPD 0x00040000 /* I am kswapd */ -#define PF_MEMALLOC_NOIO 0x00080000 /* Allocating memory without IO involved */ -#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ -#define PF_KTHREAD 0x00200000 /* I am a kernel thread */ -#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ -#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ -#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ -#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ -#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ -#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ -#define PF_SUSPEND_TASK 0x80000000 /* this thread called freeze_processes and should not be frozen */ +#define PF_IDLE 0x00000002 /* I am an IDLE thread */ +#define PF_EXITING 0x00000004 /* Getting shut down */ +#define PF_EXITPIDONE 0x00000008 /* PI exit done on shut down */ +#define PF_VCPU 0x00000010 /* I'm a virtual CPU */ +#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ +#define PF_FORKNOEXEC 0x00000040 /* Forked but didn't exec */ +#define PF_MCE_PROCESS 0x00000080 /* Process policy on mce errors */ +#define PF_SUPERPRIV 0x00000100 /* Used super-user privileges */ +#define PF_DUMPCORE 0x00000200 /* Dumped core */ +#define PF_SIGNALED 0x00000400 /* Killed by a signal */ +#define PF_MEMALLOC 0x00000800 /* Allocating memory */ +#define PF_NPROC_EXCEEDED 0x00001000 /* set_user() noticed that RLIMIT_NPROC was exceeded */ +#define PF_USED_MATH 0x00002000 /* If unset the fpu must be initialized before use */ +#define PF_USED_ASYNC 0x00004000 /* Used async_schedule*(), used by module init */ +#define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */ +#define PF_FROZEN 0x00010000 /* Frozen for system suspend */ +#define PF_FSTRANS 0x00020000 /* Inside a filesystem transaction */ +#define PF_KSWAPD 0x00040000 /* I am kswapd */ +#define PF_MEMALLOC_NOIO 0x00080000 /* Allocating memory without IO involved */ +#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ +#define PF_KTHREAD 0x00200000 /* I am a kernel thread */ +#define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ +#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ +#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ +#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ +#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ +#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ +#define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */ /* * Only the _current_ task can read/write to tsk->flags, but other @@ -2317,55 +1235,38 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *s * child is not running and in turn not changing child->flags * at the same time the parent does it. */ -#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0) -#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0) -#define clear_used_math() clear_stopped_child_used_math(current) -#define set_used_math() set_stopped_child_used_math(current) +#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0) +#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0) +#define clear_used_math() clear_stopped_child_used_math(current) +#define set_used_math() set_stopped_child_used_math(current) + #define conditional_stopped_child_used_math(condition, child) \ do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0) -#define conditional_used_math(condition) \ - conditional_stopped_child_used_math(condition, current) -#define copy_to_stopped_child_used_math(child) \ - do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0) -/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */ -#define tsk_used_math(p) ((p)->flags & PF_USED_MATH) -#define used_math() tsk_used_math(current) -/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags - * __GFP_FS is also cleared as it implies __GFP_IO. - */ -static inline gfp_t memalloc_noio_flags(gfp_t flags) -{ - if (unlikely(current->flags & PF_MEMALLOC_NOIO)) - flags &= ~(__GFP_IO | __GFP_FS); - return flags; -} +#define conditional_used_math(condition) conditional_stopped_child_used_math(condition, current) -static inline unsigned int memalloc_noio_save(void) -{ - unsigned int flags = current->flags & PF_MEMALLOC_NOIO; - current->flags |= PF_MEMALLOC_NOIO; - return flags; -} +#define copy_to_stopped_child_used_math(child) \ + do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0) -static inline void memalloc_noio_restore(unsigned int flags) -{ - current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags; -} +/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */ +#define tsk_used_math(p) ((p)->flags & PF_USED_MATH) +#define used_math() tsk_used_math(current) /* Per-process atomic flags. */ -#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */ -#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ -#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ -#define PFA_LMK_WAITING 3 /* Lowmemorykiller is waiting */ +#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */ +#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ +#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ +#define PFA_LMK_WAITING 3 /* Lowmemorykiller is waiting */ #define TASK_PFA_TEST(name, func) \ static inline bool task_##func(struct task_struct *p) \ { return test_bit(PFA_##name, &p->atomic_flags); } + #define TASK_PFA_SET(name, func) \ static inline void task_set_##func(struct task_struct *p) \ { set_bit(PFA_##name, &p->atomic_flags); } + #define TASK_PFA_CLEAR(name, func) \ static inline void task_clear_##func(struct task_struct *p) \ { clear_bit(PFA_##name, &p->atomic_flags); } @@ -2384,75 +1285,23 @@ TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab) TASK_PFA_TEST(LMK_WAITING, lmk_waiting) TASK_PFA_SET(LMK_WAITING, lmk_waiting) -/* - * task->jobctl flags - */ -#define JOBCTL_STOP_SIGMASK 0xffff /* signr of the last group stop */ - -#define JOBCTL_STOP_DEQUEUED_BIT 16 /* stop signal dequeued */ -#define JOBCTL_STOP_PENDING_BIT 17 /* task should stop for group stop */ -#define JOBCTL_STOP_CONSUME_BIT 18 /* consume group stop count */ -#define JOBCTL_TRAP_STOP_BIT 19 /* trap for STOP */ -#define JOBCTL_TRAP_NOTIFY_BIT 20 /* trap for NOTIFY */ -#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */ -#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */ - -#define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT) -#define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT) -#define JOBCTL_STOP_CONSUME (1UL << JOBCTL_STOP_CONSUME_BIT) -#define JOBCTL_TRAP_STOP (1UL << JOBCTL_TRAP_STOP_BIT) -#define JOBCTL_TRAP_NOTIFY (1UL << JOBCTL_TRAP_NOTIFY_BIT) -#define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT) -#define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT) - -#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY) -#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK) - -extern bool task_set_jobctl_pending(struct task_struct *task, - unsigned long mask); -extern void task_clear_jobctl_trapping(struct task_struct *task); -extern void task_clear_jobctl_pending(struct task_struct *task, - unsigned long mask); - -static inline void rcu_copy_process(struct task_struct *p) -{ -#ifdef CONFIG_PREEMPT_RCU - p->rcu_read_lock_nesting = 0; - p->rcu_read_unlock_special.s = 0; - p->rcu_blocked_node = NULL; - INIT_LIST_HEAD(&p->rcu_node_entry); -#endif /* #ifdef CONFIG_PREEMPT_RCU */ -#ifdef CONFIG_TASKS_RCU - p->rcu_tasks_holdout = false; - INIT_LIST_HEAD(&p->rcu_tasks_holdout_list); - p->rcu_tasks_idle_cpu = -1; -#endif /* #ifdef CONFIG_TASKS_RCU */ -} - -static inline void tsk_restore_flags(struct task_struct *task, - unsigned long orig_flags, unsigned long flags) +static inline void +tsk_restore_flags(struct task_struct *task, unsigned long orig_flags, unsigned long flags) { task->flags &= ~flags; task->flags |= orig_flags & flags; } -extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, - const struct cpumask *trial); -extern int task_can_attach(struct task_struct *p, - const struct cpumask *cs_cpus_allowed); +extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); +extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed); #ifdef CONFIG_SMP -extern void do_set_cpus_allowed(struct task_struct *p, - const struct cpumask *new_mask); - -extern int set_cpus_allowed_ptr(struct task_struct *p, - const struct cpumask *new_mask); +extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask); +extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask); #else -static inline void do_set_cpus_allowed(struct task_struct *p, - const struct cpumask *new_mask) +static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) { } -static inline int set_cpus_allowed_ptr(struct task_struct *p, - const struct cpumask *new_mask) +static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) { if (!cpumask_test_cpu(0, new_mask)) return -EINVAL; @@ -2460,165 +1309,14 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p, } #endif -#ifdef CONFIG_NO_HZ_COMMON -void calc_load_enter_idle(void); -void calc_load_exit_idle(void); -#else -static inline void calc_load_enter_idle(void) { } -static inline void calc_load_exit_idle(void) { } -#endif /* CONFIG_NO_HZ_COMMON */ - #ifndef cpu_relax_yield #define cpu_relax_yield() cpu_relax() #endif -/* - * Do not use outside of architecture code which knows its limitations. - * - * sched_clock() has no promise of monotonicity or bounded drift between - * CPUs, use (which you should not) requires disabling IRQs. - * - * Please use one of the three interfaces below. - */ -extern unsigned long long notrace sched_clock(void); -/* - * See the comment in kernel/sched/clock.c - */ -extern u64 running_clock(void); -extern u64 sched_clock_cpu(int cpu); - - -extern void sched_clock_init(void); - -#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK -static inline void sched_clock_init_late(void) -{ -} - -static inline void sched_clock_tick(void) -{ -} - -static inline void clear_sched_clock_stable(void) -{ -} - -static inline void sched_clock_idle_sleep_event(void) -{ -} - -static inline void sched_clock_idle_wakeup_event(u64 delta_ns) -{ -} - -static inline u64 cpu_clock(int cpu) -{ - return sched_clock(); -} - -static inline u64 local_clock(void) -{ - return sched_clock(); -} -#else -extern void sched_clock_init_late(void); -/* - * Architectures can set this to 1 if they have specified - * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig, - * but then during bootup it turns out that sched_clock() - * is reliable after all: - */ -extern int sched_clock_stable(void); -extern void clear_sched_clock_stable(void); - -extern void sched_clock_tick(void); -extern void sched_clock_idle_sleep_event(void); -extern void sched_clock_idle_wakeup_event(u64 delta_ns); - -/* - * As outlined in clock.c, provides a fast, high resolution, nanosecond - * time source that is monotonic per cpu argument and has bounded drift - * between cpus. - * - * ######################### BIG FAT WARNING ########################## - * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can # - * # go backwards !! # - * #################################################################### - */ -static inline u64 cpu_clock(int cpu) -{ - return sched_clock_cpu(cpu); -} - -static inline u64 local_clock(void) -{ - return sched_clock_cpu(raw_smp_processor_id()); -} -#endif - -#ifdef CONFIG_IRQ_TIME_ACCOUNTING -/* - * An i/f to runtime opt-in for irq time accounting based off of sched_clock. - * The reason for this explicit opt-in is not to have perf penalty with - * slow sched_clocks. - */ -extern void enable_sched_clock_irqtime(void); -extern void disable_sched_clock_irqtime(void); -#else -static inline void enable_sched_clock_irqtime(void) {} -static inline void disable_sched_clock_irqtime(void) {} -#endif - -extern unsigned long long -task_sched_runtime(struct task_struct *task); - -/* sched_exec is called by processes performing an exec */ -#ifdef CONFIG_SMP -extern void sched_exec(void); -#else -#define sched_exec() {} -#endif - -extern void sched_clock_idle_sleep_event(void); -extern void sched_clock_idle_wakeup_event(u64 delta_ns); - -#ifdef CONFIG_HOTPLUG_CPU -extern void idle_task_exit(void); -#else -static inline void idle_task_exit(void) {} -#endif - -#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP) -extern void wake_up_nohz_cpu(int cpu); -#else -static inline void wake_up_nohz_cpu(int cpu) { } -#endif - -#ifdef CONFIG_NO_HZ_FULL -extern u64 scheduler_tick_max_deferment(void); -#endif - -#ifdef CONFIG_SCHED_AUTOGROUP -extern void sched_autogroup_create_attach(struct task_struct *p); -extern void sched_autogroup_detach(struct task_struct *p); -extern void sched_autogroup_fork(struct signal_struct *sig); -extern void sched_autogroup_exit(struct signal_struct *sig); -extern void sched_autogroup_exit_task(struct task_struct *p); -#ifdef CONFIG_PROC_FS -extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m); -extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice); -#endif -#else -static inline void sched_autogroup_create_attach(struct task_struct *p) { } -static inline void sched_autogroup_detach(struct task_struct *p) { } -static inline void sched_autogroup_fork(struct signal_struct *sig) { } -static inline void sched_autogroup_exit(struct signal_struct *sig) { } -static inline void sched_autogroup_exit_task(struct task_struct *p) { } -#endif - extern int yield_to(struct task_struct *p, bool preempt); extern void set_user_nice(struct task_struct *p, long nice); extern int task_prio(const struct task_struct *p); + /** * task_nice - return the nice value of a given task. * @p: the task in question. @@ -2629,16 +1327,15 @@ static inline int task_nice(const struct task_struct *p) { return PRIO_TO_NICE((p)->static_prio); } + extern int can_nice(const struct task_struct *p, const int nice); extern int task_curr(const struct task_struct *p); extern int idle_cpu(int cpu); -extern int sched_setscheduler(struct task_struct *, int, - const struct sched_param *); -extern int sched_setscheduler_nocheck(struct task_struct *, int, - const struct sched_param *); -extern int sched_setattr(struct task_struct *, - const struct sched_attr *); +extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *); +extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *); +extern int sched_setattr(struct task_struct *, const struct sched_attr *); extern struct task_struct *idle_task(int cpu); + /** * is_idle_task - is the specified task an idle task? * @p: the task in question. @@ -2649,6 +1346,7 @@ static inline bool is_idle_task(const struct task_struct *p) { return !!(p->flags & PF_IDLE); } + extern struct task_struct *curr_task(int cpu); extern void ia64_set_curr_task(int cpu, struct task_struct *p); @@ -2661,23 +1359,15 @@ union thread_union { unsigned long stack[THREAD_SIZE/sizeof(long)]; }; -#ifndef __HAVE_ARCH_KSTACK_END -static inline int kstack_end(void *addr) +#ifdef CONFIG_THREAD_INFO_IN_TASK +static inline struct thread_info *task_thread_info(struct task_struct *task) { - /* Reliable end of stack detection: - * Some APM bios versions misalign the stack - */ - return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*))); + return &task->thread_info; } +#elif !defined(__HAVE_THREAD_FUNCTIONS) +# define task_thread_info(task) ((struct thread_info *)(task)->stack) #endif -extern union thread_union init_thread_union; -extern struct task_struct init_task; - -extern struct mm_struct init_mm; - -extern struct pid_namespace init_pid_ns; - /* * find a task by one of its numerical ids * @@ -2690,322 +1380,25 @@ extern struct pid_namespace init_pid_ns; */ extern struct task_struct *find_task_by_vpid(pid_t nr); -extern struct task_struct *find_task_by_pid_ns(pid_t nr, - struct pid_namespace *ns); - -/* per-UID process charging. */ -extern struct user_struct * alloc_uid(kuid_t); -static inline struct user_struct *get_uid(struct user_struct *u) -{ - atomic_inc(&u->__count); - return u; -} -extern void free_uid(struct user_struct *); - -#include <asm/current.h> - -extern void xtime_update(unsigned long ticks); +extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns); extern int wake_up_state(struct task_struct *tsk, unsigned int state); extern int wake_up_process(struct task_struct *tsk); extern void wake_up_new_task(struct task_struct *tsk); -#ifdef CONFIG_SMP - extern void kick_process(struct task_struct *tsk); -#else - static inline void kick_process(struct task_struct *tsk) { } -#endif -extern int sched_fork(unsigned long clone_flags, struct task_struct *p); -extern void sched_dead(struct task_struct *p); - -extern void proc_caches_init(void); -extern void flush_signals(struct task_struct *); -extern void ignore_signals(struct task_struct *); -extern void flush_signal_handlers(struct task_struct *, int force_default); -extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); - -static inline int kernel_dequeue_signal(siginfo_t *info) -{ - struct task_struct *tsk = current; - siginfo_t __info; - int ret; - - spin_lock_irq(&tsk->sighand->siglock); - ret = dequeue_signal(tsk, &tsk->blocked, info ?: &__info); - spin_unlock_irq(&tsk->sighand->siglock); - - return ret; -} - -static inline void kernel_signal_stop(void) -{ - spin_lock_irq(¤t->sighand->siglock); - if (current->jobctl & JOBCTL_STOP_DEQUEUED) - __set_current_state(TASK_STOPPED); - spin_unlock_irq(¤t->sighand->siglock); - - schedule(); -} - -extern void release_task(struct task_struct * p); -extern int send_sig_info(int, struct siginfo *, struct task_struct *); -extern int force_sigsegv(int, struct task_struct *); -extern int force_sig_info(int, struct siginfo *, struct task_struct *); -extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp); -extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid); -extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *, - const struct cred *, u32); -extern int kill_pgrp(struct pid *pid, int sig, int priv); -extern int kill_pid(struct pid *pid, int sig, int priv); -extern int kill_proc_info(int, struct siginfo *, pid_t); -extern __must_check bool do_notify_parent(struct task_struct *, int); -extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); -extern void force_sig(int, struct task_struct *); -extern int send_sig(int, struct task_struct *, int); -extern int zap_other_threads(struct task_struct *p); -extern struct sigqueue *sigqueue_alloc(void); -extern void sigqueue_free(struct sigqueue *); -extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group); -extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); - -#ifdef TIF_RESTORE_SIGMASK -/* - * Legacy restore_sigmask accessors. These are inefficient on - * SMP architectures because they require atomic operations. - */ - -/** - * set_restore_sigmask() - make sure saved_sigmask processing gets done - * - * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code - * will run before returning to user mode, to process the flag. For - * all callers, TIF_SIGPENDING is already set or it's no harm to set - * it. TIF_RESTORE_SIGMASK need not be in the set of bits that the - * arch code will notice on return to user mode, in case those bits - * are scarce. We set TIF_SIGPENDING here to ensure that the arch - * signal code always gets run when TIF_RESTORE_SIGMASK is set. - */ -static inline void set_restore_sigmask(void) -{ - set_thread_flag(TIF_RESTORE_SIGMASK); - WARN_ON(!test_thread_flag(TIF_SIGPENDING)); -} -static inline void clear_restore_sigmask(void) -{ - clear_thread_flag(TIF_RESTORE_SIGMASK); -} -static inline bool test_restore_sigmask(void) -{ - return test_thread_flag(TIF_RESTORE_SIGMASK); -} -static inline bool test_and_clear_restore_sigmask(void) -{ - return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK); -} - -#else /* TIF_RESTORE_SIGMASK */ - -/* Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. */ -static inline void set_restore_sigmask(void) -{ - current->restore_sigmask = true; - WARN_ON(!test_thread_flag(TIF_SIGPENDING)); -} -static inline void clear_restore_sigmask(void) -{ - current->restore_sigmask = false; -} -static inline bool test_restore_sigmask(void) -{ - return current->restore_sigmask; -} -static inline bool test_and_clear_restore_sigmask(void) -{ - if (!current->restore_sigmask) - return false; - current->restore_sigmask = false; - return true; -} -#endif - -static inline void restore_saved_sigmask(void) -{ - if (test_and_clear_restore_sigmask()) - __set_current_blocked(¤t->saved_sigmask); -} - -static inline sigset_t *sigmask_to_save(void) -{ - sigset_t *res = ¤t->blocked; - if (unlikely(test_restore_sigmask())) - res = ¤t->saved_sigmask; - return res; -} - -static inline int kill_cad_pid(int sig, int priv) -{ - return kill_pid(cad_pid, sig, priv); -} - -/* These can be the second arg to send_sig_info/send_group_sig_info. */ -#define SEND_SIG_NOINFO ((struct siginfo *) 0) -#define SEND_SIG_PRIV ((struct siginfo *) 1) -#define SEND_SIG_FORCED ((struct siginfo *) 2) - -/* - * True if we are on the alternate signal stack. - */ -static inline int on_sig_stack(unsigned long sp) -{ - /* - * If the signal stack is SS_AUTODISARM then, by construction, we - * can't be on the signal stack unless user code deliberately set - * SS_AUTODISARM when we were already on it. - * - * This improves reliability: if user state gets corrupted such that - * the stack pointer points very close to the end of the signal stack, - * then this check will enable the signal to be handled anyway. - */ - if (current->sas_ss_flags & SS_AUTODISARM) - return 0; - -#ifdef CONFIG_STACK_GROWSUP - return sp >= current->sas_ss_sp && - sp - current->sas_ss_sp < current->sas_ss_size; -#else - return sp > current->sas_ss_sp && - sp - current->sas_ss_sp <= current->sas_ss_size; -#endif -} - -static inline int sas_ss_flags(unsigned long sp) -{ - if (!current->sas_ss_size) - return SS_DISABLE; - return on_sig_stack(sp) ? SS_ONSTACK : 0; -} - -static inline void sas_ss_reset(struct task_struct *p) -{ - p->sas_ss_sp = 0; - p->sas_ss_size = 0; - p->sas_ss_flags = SS_DISABLE; -} - -static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig) -{ - if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp)) -#ifdef CONFIG_STACK_GROWSUP - return current->sas_ss_sp; -#else - return current->sas_ss_sp + current->sas_ss_size; -#endif - return sp; -} - -/* - * Routines for handling mm_structs - */ -extern struct mm_struct * mm_alloc(void); - -/* mmdrop drops the mm and the page tables */ -extern void __mmdrop(struct mm_struct *); -static inline void mmdrop(struct mm_struct *mm) -{ - if (unlikely(atomic_dec_and_test(&mm->mm_count))) - __mmdrop(mm); -} - -static inline void mmdrop_async_fn(struct work_struct *work) -{ - struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work); - __mmdrop(mm); -} - -static inline void mmdrop_async(struct mm_struct *mm) -{ - if (unlikely(atomic_dec_and_test(&mm->mm_count))) { - INIT_WORK(&mm->async_put_work, mmdrop_async_fn); - schedule_work(&mm->async_put_work); - } -} - -static inline bool mmget_not_zero(struct mm_struct *mm) -{ - return atomic_inc_not_zero(&mm->mm_users); -} - -/* mmput gets rid of the mappings and all user-space */ -extern void mmput(struct mm_struct *); -#ifdef CONFIG_MMU -/* same as above but performs the slow path from the async context. Can - * be called from the atomic context as well - */ -extern void mmput_async(struct mm_struct *); -#endif - -/* Grab a reference to a task's mm, if it is not already going away */ -extern struct mm_struct *get_task_mm(struct task_struct *task); -/* - * Grab a reference to a task's mm, if it is not already going away - * and ptrace_may_access with the mode parameter passed to it - * succeeds. - */ -extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode); -/* Remove the current tasks stale references to the old mm_struct */ -extern void mm_release(struct task_struct *, struct mm_struct *); - -#ifdef CONFIG_HAVE_COPY_THREAD_TLS -extern int copy_thread_tls(unsigned long, unsigned long, unsigned long, - struct task_struct *, unsigned long); -#else -extern int copy_thread(unsigned long, unsigned long, unsigned long, - struct task_struct *); - -/* Architectures that haven't opted into copy_thread_tls get the tls argument - * via pt_regs, so ignore the tls argument passed via C. */ -static inline int copy_thread_tls( - unsigned long clone_flags, unsigned long sp, unsigned long arg, - struct task_struct *p, unsigned long tls) -{ - return copy_thread(clone_flags, sp, arg, p); -} -#endif -extern void flush_thread(void); - -#ifdef CONFIG_HAVE_EXIT_THREAD -extern void exit_thread(struct task_struct *tsk); +#ifdef CONFIG_SMP +extern void kick_process(struct task_struct *tsk); #else -static inline void exit_thread(struct task_struct *tsk) -{ -} +static inline void kick_process(struct task_struct *tsk) { } #endif -extern void exit_files(struct task_struct *); -extern void __cleanup_sighand(struct sighand_struct *); - -extern void exit_itimers(struct signal_struct *); -extern void flush_itimer_signals(void); - -extern void do_group_exit(int); - -extern int do_execve(struct filename *, - const char __user * const __user *, - const char __user * const __user *); -extern int do_execveat(int, struct filename *, - const char __user * const __user *, - const char __user * const __user *, - int); -extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long); -extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); -struct task_struct *fork_idle(int); -extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); - extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec); + static inline void set_task_comm(struct task_struct *tsk, const char *from) { __set_task_comm(tsk, from, false); } + extern char *get_task_comm(char *to, struct task_struct *tsk); #ifdef CONFIG_SMP @@ -3013,260 +1406,15 @@ void scheduler_ipi(void); extern unsigned long wait_task_inactive(struct task_struct *, long match_state); #else static inline void scheduler_ipi(void) { } -static inline unsigned long wait_task_inactive(struct task_struct *p, - long match_state) +static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state) { return 1; } #endif -#define tasklist_empty() \ - list_empty(&init_task.tasks) - -#define next_task(p) \ - list_entry_rcu((p)->tasks.next, struct task_struct, tasks) - -#define for_each_process(p) \ - for (p = &init_task ; (p = next_task(p)) != &init_task ; ) - -extern bool current_is_single_threaded(void); - -/* - * Careful: do_each_thread/while_each_thread is a double loop so - * 'break' will not work as expected - use goto instead. - */ -#define do_each_thread(g, t) \ - for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do - -#define while_each_thread(g, t) \ - while ((t = next_thread(t)) != g) - -#define __for_each_thread(signal, t) \ - list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node) - -#define for_each_thread(p, t) \ - __for_each_thread((p)->signal, t) - -/* Careful: this is a double loop, 'break' won't work as expected. */ -#define for_each_process_thread(p, t) \ - for_each_process(p) for_each_thread(p, t) - -static inline int get_nr_threads(struct task_struct *tsk) -{ - return tsk->signal->nr_threads; -} - -static inline bool thread_group_leader(struct task_struct *p) -{ - return p->exit_signal >= 0; -} - -/* Do to the insanities of de_thread it is possible for a process - * to have the pid of the thread group leader without actually being - * the thread group leader. For iteration through the pids in proc - * all we care about is that we have a task with the appropriate - * pid, we don't actually care if we have the right task. - */ -static inline bool has_group_leader_pid(struct task_struct *p) -{ - return task_pid(p) == p->signal->leader_pid; -} - -static inline -bool same_thread_group(struct task_struct *p1, struct task_struct *p2) -{ - return p1->signal == p2->signal; -} - -static inline struct task_struct *next_thread(const struct task_struct *p) -{ - return list_entry_rcu(p->thread_group.next, - struct task_struct, thread_group); -} - -static inline int thread_group_empty(struct task_struct *p) -{ - return list_empty(&p->thread_group); -} - -#define delay_group_leader(p) \ - (thread_group_leader(p) && !thread_group_empty(p)) - -/* - * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring - * subscriptions and synchronises with wait4(). Also used in procfs. Also - * pins the final release of task.io_context. Also protects ->cpuset and - * ->cgroup.subsys[]. And ->vfork_done. - * - * Nests both inside and outside of read_lock(&tasklist_lock). - * It must not be nested with write_lock_irq(&tasklist_lock), - * neither inside nor outside. - */ -static inline void task_lock(struct task_struct *p) -{ - spin_lock(&p->alloc_lock); -} - -static inline void task_unlock(struct task_struct *p) -{ - spin_unlock(&p->alloc_lock); -} - -extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, - unsigned long *flags); - -static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk, - unsigned long *flags) -{ - struct sighand_struct *ret; - - ret = __lock_task_sighand(tsk, flags); - (void)__cond_lock(&tsk->sighand->siglock, ret); - return ret; -} - -static inline void unlock_task_sighand(struct task_struct *tsk, - unsigned long *flags) -{ - spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); -} - -/** - * threadgroup_change_begin - mark the beginning of changes to a threadgroup - * @tsk: task causing the changes - * - * All operations which modify a threadgroup - a new thread joining the - * group, death of a member thread (the assertion of PF_EXITING) and - * exec(2) dethreading the process and replacing the leader - are wrapped - * by threadgroup_change_{begin|end}(). This is to provide a place which - * subsystems needing threadgroup stability can hook into for - * synchronization. - */ -static inline void threadgroup_change_begin(struct task_struct *tsk) -{ - might_sleep(); - cgroup_threadgroup_change_begin(tsk); -} - -/** - * threadgroup_change_end - mark the end of changes to a threadgroup - * @tsk: task causing the changes - * - * See threadgroup_change_begin(). - */ -static inline void threadgroup_change_end(struct task_struct *tsk) -{ - cgroup_threadgroup_change_end(tsk); -} - -#ifdef CONFIG_THREAD_INFO_IN_TASK - -static inline struct thread_info *task_thread_info(struct task_struct *task) -{ - return &task->thread_info; -} - -/* - * When accessing the stack of a non-current task that might exit, use - * try_get_task_stack() instead. task_stack_page will return a pointer - * that could get freed out from under you. - */ -static inline void *task_stack_page(const struct task_struct *task) -{ - return task->stack; -} - -#define setup_thread_stack(new,old) do { } while(0) - -static inline unsigned long *end_of_stack(const struct task_struct *task) -{ - return task->stack; -} - -#elif !defined(__HAVE_THREAD_FUNCTIONS) - -#define task_thread_info(task) ((struct thread_info *)(task)->stack) -#define task_stack_page(task) ((void *)(task)->stack) - -static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org) -{ - *task_thread_info(p) = *task_thread_info(org); - task_thread_info(p)->task = p; -} - /* - * Return the address of the last usable long on the stack. - * - * When the stack grows down, this is just above the thread - * info struct. Going any lower will corrupt the threadinfo. - * - * When the stack grows up, this is the highest address. - * Beyond that position, we corrupt data on the next page. - */ -static inline unsigned long *end_of_stack(struct task_struct *p) -{ -#ifdef CONFIG_STACK_GROWSUP - return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1; -#else - return (unsigned long *)(task_thread_info(p) + 1); -#endif -} - -#endif - -#ifdef CONFIG_THREAD_INFO_IN_TASK -static inline void *try_get_task_stack(struct task_struct *tsk) -{ - return atomic_inc_not_zero(&tsk->stack_refcount) ? - task_stack_page(tsk) : NULL; -} - -extern void put_task_stack(struct task_struct *tsk); -#else -static inline void *try_get_task_stack(struct task_struct *tsk) -{ - return task_stack_page(tsk); -} - -static inline void put_task_stack(struct task_struct *tsk) {} -#endif - -#define task_stack_end_corrupted(task) \ - (*(end_of_stack(task)) != STACK_END_MAGIC) - -static inline int object_is_on_stack(void *obj) -{ - void *stack = task_stack_page(current); - - return (obj >= stack) && (obj < (stack + THREAD_SIZE)); -} - -extern void thread_stack_cache_init(void); - -#ifdef CONFIG_DEBUG_STACK_USAGE -static inline unsigned long stack_not_used(struct task_struct *p) -{ - unsigned long *n = end_of_stack(p); - - do { /* Skip over canary */ -# ifdef CONFIG_STACK_GROWSUP - n--; -# else - n++; -# endif - } while (!*n); - -# ifdef CONFIG_STACK_GROWSUP - return (unsigned long)end_of_stack(p) - (unsigned long)n; -# else - return (unsigned long)n - (unsigned long)end_of_stack(p); -# endif -} -#endif -extern void set_task_stack_end_magic(struct task_struct *tsk); - -/* set thread flags in other task's structures - * - see asm/thread_info.h for TIF_xxxx flags available + * Set thread flags in other task's structures. + * See asm/thread_info.h for TIF_xxxx flags available: */ static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag) { @@ -3308,37 +1456,6 @@ static inline int test_tsk_need_resched(struct task_struct *tsk) return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); } -static inline int restart_syscall(void) -{ - set_tsk_thread_flag(current, TIF_SIGPENDING); - return -ERESTARTNOINTR; -} - -static inline int signal_pending(struct task_struct *p) -{ - return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); -} - -static inline int __fatal_signal_pending(struct task_struct *p) -{ - return unlikely(sigismember(&p->pending.signal, SIGKILL)); -} - -static inline int fatal_signal_pending(struct task_struct *p) -{ - return signal_pending(p) && __fatal_signal_pending(p); -} - -static inline int signal_pending_state(long state, struct task_struct *p) -{ - if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL))) - return 0; - if (!signal_pending(p)) - return 0; - - return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); -} - /* * cond_resched() and cond_resched_lock(): latency reduction via * explicit rescheduling in places that are safe. The return @@ -3380,15 +1497,6 @@ static inline void cond_resched_rcu(void) #endif } -static inline unsigned long get_preempt_disable_ip(struct task_struct *p) -{ -#ifdef CONFIG_DEBUG_PREEMPT - return p->preempt_disable_ip; -#else - return 0; -#endif -} - /* * Does a critical section need to be broken due to another * task waiting?: (technically does not depend on CONFIG_PREEMPT, @@ -3403,114 +1511,12 @@ static inline int spin_needbreak(spinlock_t *lock) #endif } -/* - * Idle thread specific functions to determine the need_resched - * polling state. - */ -#ifdef TIF_POLLING_NRFLAG -static inline int tsk_is_polling(struct task_struct *p) -{ - return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG); -} - -static inline void __current_set_polling(void) -{ - set_thread_flag(TIF_POLLING_NRFLAG); -} - -static inline bool __must_check current_set_polling_and_test(void) -{ - __current_set_polling(); - - /* - * Polling state must be visible before we test NEED_RESCHED, - * paired by resched_curr() - */ - smp_mb__after_atomic(); - - return unlikely(tif_need_resched()); -} - -static inline void __current_clr_polling(void) -{ - clear_thread_flag(TIF_POLLING_NRFLAG); -} - -static inline bool __must_check current_clr_polling_and_test(void) -{ - __current_clr_polling(); - - /* - * Polling state must be visible before we test NEED_RESCHED, - * paired by resched_curr() - */ - smp_mb__after_atomic(); - - return unlikely(tif_need_resched()); -} - -#else -static inline int tsk_is_polling(struct task_struct *p) { return 0; } -static inline void __current_set_polling(void) { } -static inline void __current_clr_polling(void) { } - -static inline bool __must_check current_set_polling_and_test(void) -{ - return unlikely(tif_need_resched()); -} -static inline bool __must_check current_clr_polling_and_test(void) -{ - return unlikely(tif_need_resched()); -} -#endif - -static inline void current_clr_polling(void) -{ - __current_clr_polling(); - - /* - * Ensure we check TIF_NEED_RESCHED after we clear the polling bit. - * Once the bit is cleared, we'll get IPIs with every new - * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also - * fold. - */ - smp_mb(); /* paired with resched_curr() */ - - preempt_fold_need_resched(); -} - static __always_inline bool need_resched(void) { return unlikely(tif_need_resched()); } /* - * Thread group CPU time accounting. - */ -void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); -void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times); - -/* - * Reevaluate whether the task has signals pending delivery. - * Wake the task if so. - * This is required every time the blocked sigset_t changes. - * callers must hold sighand->siglock. - */ -extern void recalc_sigpending_and_wake(struct task_struct *t); -extern void recalc_sigpending(void); - -extern void signal_wake_up_state(struct task_struct *t, unsigned int state); - -static inline void signal_wake_up(struct task_struct *t, bool resume) -{ - signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0); -} -static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume) -{ - signal_wake_up_state(t, resume ? __TASK_TRACED : 0); -} - -/* * Wrappers for p->thread_info->cpu access. No-op on UP. */ #ifdef CONFIG_SMP @@ -3524,11 +1530,6 @@ static inline unsigned int task_cpu(const struct task_struct *p) #endif } -static inline int task_node(const struct task_struct *p) -{ - return cpu_to_node(task_cpu(p)); -} - extern void set_task_cpu(struct task_struct *p, unsigned int cpu); #else @@ -3559,100 +1560,8 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); extern long sched_getaffinity(pid_t pid, struct cpumask *mask); -#ifdef CONFIG_CGROUP_SCHED -extern struct task_group root_task_group; -#endif /* CONFIG_CGROUP_SCHED */ - -extern int task_can_switch_user(struct user_struct *up, - struct task_struct *tsk); - -#ifdef CONFIG_TASK_XACCT -static inline void add_rchar(struct task_struct *tsk, ssize_t amt) -{ - tsk->ioac.rchar += amt; -} - -static inline void add_wchar(struct task_struct *tsk, ssize_t amt) -{ - tsk->ioac.wchar += amt; -} - -static inline void inc_syscr(struct task_struct *tsk) -{ - tsk->ioac.syscr++; -} - -static inline void inc_syscw(struct task_struct *tsk) -{ - tsk->ioac.syscw++; -} -#else -static inline void add_rchar(struct task_struct *tsk, ssize_t amt) -{ -} - -static inline void add_wchar(struct task_struct *tsk, ssize_t amt) -{ -} - -static inline void inc_syscr(struct task_struct *tsk) -{ -} - -static inline void inc_syscw(struct task_struct *tsk) -{ -} -#endif - #ifndef TASK_SIZE_OF #define TASK_SIZE_OF(tsk) TASK_SIZE #endif -#ifdef CONFIG_MEMCG -extern void mm_update_next_owner(struct mm_struct *mm); -#else -static inline void mm_update_next_owner(struct mm_struct *mm) -{ -} -#endif /* CONFIG_MEMCG */ - -static inline unsigned long task_rlimit(const struct task_struct *tsk, - unsigned int limit) -{ - return READ_ONCE(tsk->signal->rlim[limit].rlim_cur); -} - -static inline unsigned long task_rlimit_max(const struct task_struct *tsk, - unsigned int limit) -{ - return READ_ONCE(tsk->signal->rlim[limit].rlim_max); -} - -static inline unsigned long rlimit(unsigned int limit) -{ - return task_rlimit(current, limit); -} - -static inline unsigned long rlimit_max(unsigned int limit) -{ - return task_rlimit_max(current, limit); -} - -#define SCHED_CPUFREQ_RT (1U << 0) -#define SCHED_CPUFREQ_DL (1U << 1) -#define SCHED_CPUFREQ_IOWAIT (1U << 2) - -#define SCHED_CPUFREQ_RT_DL (SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL) - -#ifdef CONFIG_CPU_FREQ -struct update_util_data { - void (*func)(struct update_util_data *data, u64 time, unsigned int flags); -}; - -void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data, - void (*func)(struct update_util_data *data, u64 time, - unsigned int flags)); -void cpufreq_remove_update_util_hook(int cpu); -#endif /* CONFIG_CPU_FREQ */ - #endif diff --git a/include/linux/sched/autogroup.h b/include/linux/sched/autogroup.h new file mode 100644 index 000000000000..55cd496df884 --- /dev/null +++ b/include/linux/sched/autogroup.h @@ -0,0 +1,31 @@ +#ifndef _LINUX_SCHED_AUTOGROUP_H +#define _LINUX_SCHED_AUTOGROUP_H + +struct signal_struct; +struct task_struct; +struct task_group; +struct seq_file; + +#ifdef CONFIG_SCHED_AUTOGROUP +extern void sched_autogroup_create_attach(struct task_struct *p); +extern void sched_autogroup_detach(struct task_struct *p); +extern void sched_autogroup_fork(struct signal_struct *sig); +extern void sched_autogroup_exit(struct signal_struct *sig); +extern void sched_autogroup_exit_task(struct task_struct *p); +#ifdef CONFIG_PROC_FS +extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m); +extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice); +#endif +#else +static inline void sched_autogroup_create_attach(struct task_struct *p) { } +static inline void sched_autogroup_detach(struct task_struct *p) { } +static inline void sched_autogroup_fork(struct signal_struct *sig) { } +static inline void sched_autogroup_exit(struct signal_struct *sig) { } +static inline void sched_autogroup_exit_task(struct task_struct *p) { } +#endif + +#ifdef CONFIG_CGROUP_SCHED +extern struct task_group root_task_group; +#endif /* CONFIG_CGROUP_SCHED */ + +#endif /* _LINUX_SCHED_AUTOGROUP_H */ diff --git a/include/linux/sched/clock.h b/include/linux/sched/clock.h new file mode 100644 index 000000000000..4a68c6791207 --- /dev/null +++ b/include/linux/sched/clock.h @@ -0,0 +1,104 @@ +#ifndef _LINUX_SCHED_CLOCK_H +#define _LINUX_SCHED_CLOCK_H + +#include <linux/smp.h> + +/* + * Do not use outside of architecture code which knows its limitations. + * + * sched_clock() has no promise of monotonicity or bounded drift between + * CPUs, use (which you should not) requires disabling IRQs. + * + * Please use one of the three interfaces below. + */ +extern unsigned long long notrace sched_clock(void); + +/* + * See the comment in kernel/sched/clock.c + */ +extern u64 running_clock(void); +extern u64 sched_clock_cpu(int cpu); + + +extern void sched_clock_init(void); + +#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK +static inline void sched_clock_init_late(void) +{ +} + +static inline void sched_clock_tick(void) +{ +} + +static inline void clear_sched_clock_stable(void) +{ +} + +static inline void sched_clock_idle_sleep_event(void) +{ +} + +static inline void sched_clock_idle_wakeup_event(u64 delta_ns) +{ +} + +static inline u64 cpu_clock(int cpu) +{ + return sched_clock(); +} + +static inline u64 local_clock(void) +{ + return sched_clock(); +} +#else +extern void sched_clock_init_late(void); +/* + * Architectures can set this to 1 if they have specified + * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig, + * but then during bootup it turns out that sched_clock() + * is reliable after all: + */ +extern int sched_clock_stable(void); +extern void clear_sched_clock_stable(void); + +extern void sched_clock_tick(void); +extern void sched_clock_idle_sleep_event(void); +extern void sched_clock_idle_wakeup_event(u64 delta_ns); + +/* + * As outlined in clock.c, provides a fast, high resolution, nanosecond + * time source that is monotonic per cpu argument and has bounded drift + * between cpus. + * + * ######################### BIG FAT WARNING ########################## + * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can # + * # go backwards !! # + * #################################################################### + */ +static inline u64 cpu_clock(int cpu) +{ + return sched_clock_cpu(cpu); +} + +static inline u64 local_clock(void) +{ + return sched_clock_cpu(raw_smp_processor_id()); +} +#endif + +#ifdef CONFIG_IRQ_TIME_ACCOUNTING +/* + * An i/f to runtime opt-in for irq time accounting based off of sched_clock. + * The reason for this explicit opt-in is not to have perf penalty with + * slow sched_clocks. + */ +extern void enable_sched_clock_irqtime(void); +extern void disable_sched_clock_irqtime(void); +#else +static inline void enable_sched_clock_irqtime(void) {} +static inline void disable_sched_clock_irqtime(void) {} +#endif + +#endif /* _LINUX_SCHED_CLOCK_H */ diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h new file mode 100644 index 000000000000..69eedcef8f03 --- /dev/null +++ b/include/linux/sched/coredump.h @@ -0,0 +1,74 @@ +#ifndef _LINUX_SCHED_COREDUMP_H +#define _LINUX_SCHED_COREDUMP_H + +#include <linux/mm_types.h> + +#define SUID_DUMP_DISABLE 0 /* No setuid dumping */ +#define SUID_DUMP_USER 1 /* Dump as user of process */ +#define SUID_DUMP_ROOT 2 /* Dump as root */ + +/* mm flags */ + +/* for SUID_DUMP_* above */ +#define MMF_DUMPABLE_BITS 2 +#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1) + +extern void set_dumpable(struct mm_struct *mm, int value); +/* + * This returns the actual value of the suid_dumpable flag. For things + * that are using this for checking for privilege transitions, it must + * test against SUID_DUMP_USER rather than treating it as a boolean + * value. + */ +static inline int __get_dumpable(unsigned long mm_flags) +{ + return mm_flags & MMF_DUMPABLE_MASK; +} + +static inline int get_dumpable(struct mm_struct *mm) +{ + return __get_dumpable(mm->flags); +} + +/* coredump filter bits */ +#define MMF_DUMP_ANON_PRIVATE 2 +#define MMF_DUMP_ANON_SHARED 3 +#define MMF_DUMP_MAPPED_PRIVATE 4 +#define MMF_DUMP_MAPPED_SHARED 5 +#define MMF_DUMP_ELF_HEADERS 6 +#define MMF_DUMP_HUGETLB_PRIVATE 7 +#define MMF_DUMP_HUGETLB_SHARED 8 +#define MMF_DUMP_DAX_PRIVATE 9 +#define MMF_DUMP_DAX_SHARED 10 + +#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS +#define MMF_DUMP_FILTER_BITS 9 +#define MMF_DUMP_FILTER_MASK \ + (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) +#define MMF_DUMP_FILTER_DEFAULT \ + ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\ + (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF) + +#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS +# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS) +#else +# define MMF_DUMP_MASK_DEFAULT_ELF 0 +#endif + /* leave room for more dump flags */ +#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ +#define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */ +/* + * This one-shot flag is dropped due to necessity of changing exe once again + * on NFS restore + */ +//#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */ + +#define MMF_HAS_UPROBES 19 /* has uprobes */ +#define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */ +#define MMF_OOM_SKIP 21 /* mm is of no interest for the OOM killer */ +#define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */ +#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */ + +#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK) + +#endif /* _LINUX_SCHED_COREDUMP_H */ diff --git a/include/linux/sched/cpufreq.h b/include/linux/sched/cpufreq.h new file mode 100644 index 000000000000..d2be2ccbb372 --- /dev/null +++ b/include/linux/sched/cpufreq.h @@ -0,0 +1,27 @@ +#ifndef _LINUX_SCHED_CPUFREQ_H +#define _LINUX_SCHED_CPUFREQ_H + +#include <linux/types.h> + +/* + * Interface between cpufreq drivers and the scheduler: + */ + +#define SCHED_CPUFREQ_RT (1U << 0) +#define SCHED_CPUFREQ_DL (1U << 1) +#define SCHED_CPUFREQ_IOWAIT (1U << 2) + +#define SCHED_CPUFREQ_RT_DL (SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL) + +#ifdef CONFIG_CPU_FREQ +struct update_util_data { + void (*func)(struct update_util_data *data, u64 time, unsigned int flags); +}; + +void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data, + void (*func)(struct update_util_data *data, u64 time, + unsigned int flags)); +void cpufreq_remove_update_util_hook(int cpu); +#endif /* CONFIG_CPU_FREQ */ + +#endif /* _LINUX_SCHED_CPUFREQ_H */ diff --git a/include/linux/sched/cputime.h b/include/linux/sched/cputime.h new file mode 100644 index 000000000000..4c5b9735c1ae --- /dev/null +++ b/include/linux/sched/cputime.h @@ -0,0 +1,187 @@ +#ifndef _LINUX_SCHED_CPUTIME_H +#define _LINUX_SCHED_CPUTIME_H + +#include <linux/sched/signal.h> + +/* + * cputime accounting APIs: + */ + +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE +#include <asm/cputime.h> + +#ifndef cputime_to_nsecs +# define cputime_to_nsecs(__ct) \ + (cputime_to_usecs(__ct) * NSEC_PER_USEC) +#endif +#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ + +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN +extern void task_cputime(struct task_struct *t, + u64 *utime, u64 *stime); +extern u64 task_gtime(struct task_struct *t); +#else +static inline void task_cputime(struct task_struct *t, + u64 *utime, u64 *stime) +{ + *utime = t->utime; + *stime = t->stime; +} + +static inline u64 task_gtime(struct task_struct *t) +{ + return t->gtime; +} +#endif + +#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME +static inline void task_cputime_scaled(struct task_struct *t, + u64 *utimescaled, + u64 *stimescaled) +{ + *utimescaled = t->utimescaled; + *stimescaled = t->stimescaled; +} +#else +static inline void task_cputime_scaled(struct task_struct *t, + u64 *utimescaled, + u64 *stimescaled) +{ + task_cputime(t, utimescaled, stimescaled); +} +#endif + +extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st); +extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st); + + +/* + * Thread group CPU time accounting. + */ +void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); +void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times); + + +/* + * The following are functions that support scheduler-internal time accounting. + * These functions are generally called at the timer tick. None of this depends + * on CONFIG_SCHEDSTATS. + */ + +/** + * get_running_cputimer - return &tsk->signal->cputimer if cputimer is running + * + * @tsk: Pointer to target task. + */ +#ifdef CONFIG_POSIX_TIMERS +static inline +struct thread_group_cputimer *get_running_cputimer(struct task_struct *tsk) +{ + struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; + + /* Check if cputimer isn't running. This is accessed without locking. */ + if (!READ_ONCE(cputimer->running)) + return NULL; + + /* + * After we flush the task's sum_exec_runtime to sig->sum_sched_runtime + * in __exit_signal(), we won't account to the signal struct further + * cputime consumed by that task, even though the task can still be + * ticking after __exit_signal(). + * + * In order to keep a consistent behaviour between thread group cputime + * and thread group cputimer accounting, lets also ignore the cputime + * elapsing after __exit_signal() in any thread group timer running. + * + * This makes sure that POSIX CPU clocks and timers are synchronized, so + * that a POSIX CPU timer won't expire while the corresponding POSIX CPU + * clock delta is behind the expiring timer value. + */ + if (unlikely(!tsk->sighand)) + return NULL; + + return cputimer; +} +#else +static inline +struct thread_group_cputimer *get_running_cputimer(struct task_struct *tsk) +{ + return NULL; +} +#endif + +/** + * account_group_user_time - Maintain utime for a thread group. + * + * @tsk: Pointer to task structure. + * @cputime: Time value by which to increment the utime field of the + * thread_group_cputime structure. + * + * If thread group time is being maintained, get the structure for the + * running CPU and update the utime field there. + */ +static inline void account_group_user_time(struct task_struct *tsk, + u64 cputime) +{ + struct thread_group_cputimer *cputimer = get_running_cputimer(tsk); + + if (!cputimer) + return; + + atomic64_add(cputime, &cputimer->cputime_atomic.utime); +} + +/** + * account_group_system_time - Maintain stime for a thread group. + * + * @tsk: Pointer to task structure. + * @cputime: Time value by which to increment the stime field of the + * thread_group_cputime structure. + * + * If thread group time is being maintained, get the structure for the + * running CPU and update the stime field there. + */ +static inline void account_group_system_time(struct task_struct *tsk, + u64 cputime) +{ + struct thread_group_cputimer *cputimer = get_running_cputimer(tsk); + + if (!cputimer) + return; + + atomic64_add(cputime, &cputimer->cputime_atomic.stime); +} + +/** + * account_group_exec_runtime - Maintain exec runtime for a thread group. + * + * @tsk: Pointer to task structure. + * @ns: Time value by which to increment the sum_exec_runtime field + * of the thread_group_cputime structure. + * + * If thread group time is being maintained, get the structure for the + * running CPU and update the sum_exec_runtime field there. + */ +static inline void account_group_exec_runtime(struct task_struct *tsk, + unsigned long long ns) +{ + struct thread_group_cputimer *cputimer = get_running_cputimer(tsk); + + if (!cputimer) + return; + + atomic64_add(ns, &cputimer->cputime_atomic.sum_exec_runtime); +} + +static inline void prev_cputime_init(struct prev_cputime *prev) +{ +#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE + prev->utime = prev->stime = 0; + raw_spin_lock_init(&prev->lock); +#endif +} + +extern unsigned long long +task_sched_runtime(struct task_struct *task); + +#endif /* _LINUX_SCHED_CPUTIME_H */ diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h index 9089a2ae913d..975be862e083 100644 --- a/include/linux/sched/deadline.h +++ b/include/linux/sched/deadline.h @@ -1,5 +1,7 @@ -#ifndef _SCHED_DEADLINE_H -#define _SCHED_DEADLINE_H +#ifndef _LINUX_SCHED_DEADLINE_H +#define _LINUX_SCHED_DEADLINE_H + +#include <linux/sched.h> /* * SCHED_DEADLINE tasks has negative priorities, reflecting @@ -26,4 +28,4 @@ static inline bool dl_time_before(u64 a, u64 b) return (s64)(a - b) < 0; } -#endif /* _SCHED_DEADLINE_H */ +#endif /* _LINUX_SCHED_DEADLINE_H */ diff --git a/include/linux/sched/debug.h b/include/linux/sched/debug.h new file mode 100644 index 000000000000..e0eaee54c5a4 --- /dev/null +++ b/include/linux/sched/debug.h @@ -0,0 +1,50 @@ +#ifndef _LINUX_SCHED_DEBUG_H +#define _LINUX_SCHED_DEBUG_H + +/* + * Various scheduler/task debugging interfaces: + */ + +struct task_struct; + +extern void dump_cpu_task(int cpu); + +/* + * Only dump TASK_* tasks. (0 for all tasks) + */ +extern void show_state_filter(unsigned long state_filter); + +static inline void show_state(void) +{ + show_state_filter(0); +} + +struct pt_regs; + +extern void show_regs(struct pt_regs *); + +/* + * TASK is a pointer to the task whose backtrace we want to see (or NULL for current + * task), SP is the stack pointer of the first frame that should be shown in the back + * trace (or NULL if the entire call-chain of the task should be shown). + */ +extern void show_stack(struct task_struct *task, unsigned long *sp); + +extern void sched_show_task(struct task_struct *p); + +#ifdef CONFIG_SCHED_DEBUG +struct seq_file; +extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m); +extern void proc_sched_set_task(struct task_struct *p); +#endif + +/* Attach to any functions which should be ignored in wchan output. */ +#define __sched __attribute__((__section__(".sched.text"))) + +/* Linker adds these: start and end of __sched functions */ +extern char __sched_text_start[], __sched_text_end[]; + +/* Is this address in the __sched functions? */ +extern int in_sched_functions(unsigned long addr); + +#endif /* _LINUX_SCHED_DEBUG_H */ diff --git a/include/linux/sched/hotplug.h b/include/linux/sched/hotplug.h new file mode 100644 index 000000000000..752ac7e628d7 --- /dev/null +++ b/include/linux/sched/hotplug.h @@ -0,0 +1,24 @@ +#ifndef _LINUX_SCHED_HOTPLUG_H +#define _LINUX_SCHED_HOTPLUG_H + +/* + * Scheduler interfaces for hotplug CPU support: + */ + +extern int sched_cpu_starting(unsigned int cpu); +extern int sched_cpu_activate(unsigned int cpu); +extern int sched_cpu_deactivate(unsigned int cpu); + +#ifdef CONFIG_HOTPLUG_CPU +extern int sched_cpu_dying(unsigned int cpu); +#else +# define sched_cpu_dying NULL +#endif + +#ifdef CONFIG_HOTPLUG_CPU +extern void idle_task_exit(void); +#else +static inline void idle_task_exit(void) {} +#endif + +#endif /* _LINUX_SCHED_HOTPLUG_H */ diff --git a/include/linux/sched/idle.h b/include/linux/sched/idle.h new file mode 100644 index 000000000000..5ca63ebad6b4 --- /dev/null +++ b/include/linux/sched/idle.h @@ -0,0 +1,86 @@ +#ifndef _LINUX_SCHED_IDLE_H +#define _LINUX_SCHED_IDLE_H + +#include <linux/sched.h> + +enum cpu_idle_type { + CPU_IDLE, + CPU_NOT_IDLE, + CPU_NEWLY_IDLE, + CPU_MAX_IDLE_TYPES +}; + +extern void wake_up_if_idle(int cpu); + +/* + * Idle thread specific functions to determine the need_resched + * polling state. + */ +#ifdef TIF_POLLING_NRFLAG + +static inline void __current_set_polling(void) +{ + set_thread_flag(TIF_POLLING_NRFLAG); +} + +static inline bool __must_check current_set_polling_and_test(void) +{ + __current_set_polling(); + + /* + * Polling state must be visible before we test NEED_RESCHED, + * paired by resched_curr() + */ + smp_mb__after_atomic(); + + return unlikely(tif_need_resched()); +} + +static inline void __current_clr_polling(void) +{ + clear_thread_flag(TIF_POLLING_NRFLAG); +} + +static inline bool __must_check current_clr_polling_and_test(void) +{ + __current_clr_polling(); + + /* + * Polling state must be visible before we test NEED_RESCHED, + * paired by resched_curr() + */ + smp_mb__after_atomic(); + + return unlikely(tif_need_resched()); +} + +#else +static inline void __current_set_polling(void) { } +static inline void __current_clr_polling(void) { } + +static inline bool __must_check current_set_polling_and_test(void) +{ + return unlikely(tif_need_resched()); +} +static inline bool __must_check current_clr_polling_and_test(void) +{ + return unlikely(tif_need_resched()); +} +#endif + +static inline void current_clr_polling(void) +{ + __current_clr_polling(); + + /* + * Ensure we check TIF_NEED_RESCHED after we clear the polling bit. + * Once the bit is cleared, we'll get IPIs with every new + * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also + * fold. + */ + smp_mb(); /* paired with resched_curr() */ + + preempt_fold_need_resched(); +} + +#endif /* _LINUX_SCHED_IDLE_H */ diff --git a/include/linux/sched/init.h b/include/linux/sched/init.h new file mode 100644 index 000000000000..127215045285 --- /dev/null +++ b/include/linux/sched/init.h @@ -0,0 +1,11 @@ +#ifndef _LINUX_SCHED_INIT_H +#define _LINUX_SCHED_INIT_H + +/* + * Scheduler init related prototypes: + */ + +extern void sched_init(void); +extern void sched_init_smp(void); + +#endif /* _LINUX_SCHED_INIT_H */ diff --git a/include/linux/sched/jobctl.h b/include/linux/sched/jobctl.h new file mode 100644 index 000000000000..016afa0fb3bb --- /dev/null +++ b/include/linux/sched/jobctl.h @@ -0,0 +1,36 @@ +#ifndef _LINUX_SCHED_JOBCTL_H +#define _LINUX_SCHED_JOBCTL_H + +#include <linux/types.h> + +struct task_struct; + +/* + * task->jobctl flags + */ +#define JOBCTL_STOP_SIGMASK 0xffff /* signr of the last group stop */ + +#define JOBCTL_STOP_DEQUEUED_BIT 16 /* stop signal dequeued */ +#define JOBCTL_STOP_PENDING_BIT 17 /* task should stop for group stop */ +#define JOBCTL_STOP_CONSUME_BIT 18 /* consume group stop count */ +#define JOBCTL_TRAP_STOP_BIT 19 /* trap for STOP */ +#define JOBCTL_TRAP_NOTIFY_BIT 20 /* trap for NOTIFY */ +#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */ +#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */ + +#define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT) +#define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT) +#define JOBCTL_STOP_CONSUME (1UL << JOBCTL_STOP_CONSUME_BIT) +#define JOBCTL_TRAP_STOP (1UL << JOBCTL_TRAP_STOP_BIT) +#define JOBCTL_TRAP_NOTIFY (1UL << JOBCTL_TRAP_NOTIFY_BIT) +#define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT) +#define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT) + +#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY) +#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK) + +extern bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask); +extern void task_clear_jobctl_trapping(struct task_struct *task); +extern void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask); + +#endif /* _LINUX_SCHED_JOBCTL_H */ diff --git a/include/linux/sched/loadavg.h b/include/linux/sched/loadavg.h new file mode 100644 index 000000000000..4264bc6b2c27 --- /dev/null +++ b/include/linux/sched/loadavg.h @@ -0,0 +1,31 @@ +#ifndef _LINUX_SCHED_LOADAVG_H +#define _LINUX_SCHED_LOADAVG_H + +/* + * These are the constant used to fake the fixed-point load-average + * counting. Some notes: + * - 11 bit fractions expand to 22 bits by the multiplies: this gives + * a load-average precision of 10 bits integer + 11 bits fractional + * - if you want to count load-averages more often, you need more + * precision, or rounding will get you. With 2-second counting freq, + * the EXP_n values would be 1981, 2034 and 2043 if still using only + * 11 bit fractions. + */ +extern unsigned long avenrun[]; /* Load averages */ +extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift); + +#define FSHIFT 11 /* nr of bits of precision */ +#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */ +#define LOAD_FREQ (5*HZ+1) /* 5 sec intervals */ +#define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */ +#define EXP_5 2014 /* 1/exp(5sec/5min) */ +#define EXP_15 2037 /* 1/exp(5sec/15min) */ + +#define CALC_LOAD(load,exp,n) \ + load *= exp; \ + load += n*(FIXED_1-exp); \ + load >>= FSHIFT; + +extern void calc_global_load(unsigned long ticks); + +#endif /* _LINUX_SCHED_LOADAVG_H */ diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h new file mode 100644 index 000000000000..830953ebb391 --- /dev/null +++ b/include/linux/sched/mm.h @@ -0,0 +1,174 @@ +#ifndef _LINUX_SCHED_MM_H +#define _LINUX_SCHED_MM_H + +#include <linux/kernel.h> +#include <linux/atomic.h> +#include <linux/sched.h> +#include <linux/mm_types.h> +#include <linux/gfp.h> + +/* + * Routines for handling mm_structs + */ +extern struct mm_struct * mm_alloc(void); + +/** + * mmgrab() - Pin a &struct mm_struct. + * @mm: The &struct mm_struct to pin. + * + * Make sure that @mm will not get freed even after the owning task + * exits. This doesn't guarantee that the associated address space + * will still exist later on and mmget_not_zero() has to be used before + * accessing it. + * + * This is a preferred way to to pin @mm for a longer/unbounded amount + * of time. + * + * Use mmdrop() to release the reference acquired by mmgrab(). + * + * See also <Documentation/vm/active_mm.txt> for an in-depth explanation + * of &mm_struct.mm_count vs &mm_struct.mm_users. + */ +static inline void mmgrab(struct mm_struct *mm) +{ + atomic_inc(&mm->mm_count); +} + +/* mmdrop drops the mm and the page tables */ +extern void __mmdrop(struct mm_struct *); +static inline void mmdrop(struct mm_struct *mm) +{ + if (unlikely(atomic_dec_and_test(&mm->mm_count))) + __mmdrop(mm); +} + +static inline void mmdrop_async_fn(struct work_struct *work) +{ + struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work); + __mmdrop(mm); +} + +static inline void mmdrop_async(struct mm_struct *mm) +{ + if (unlikely(atomic_dec_and_test(&mm->mm_count))) { + INIT_WORK(&mm->async_put_work, mmdrop_async_fn); + schedule_work(&mm->async_put_work); + } +} + +/** + * mmget() - Pin the address space associated with a &struct mm_struct. + * @mm: The address space to pin. + * + * Make sure that the address space of the given &struct mm_struct doesn't + * go away. This does not protect against parts of the address space being + * modified or freed, however. + * + * Never use this function to pin this address space for an + * unbounded/indefinite amount of time. + * + * Use mmput() to release the reference acquired by mmget(). + * + * See also <Documentation/vm/active_mm.txt> for an in-depth explanation + * of &mm_struct.mm_count vs &mm_struct.mm_users. + */ +static inline void mmget(struct mm_struct *mm) +{ + atomic_inc(&mm->mm_users); +} + +static inline bool mmget_not_zero(struct mm_struct *mm) +{ + return atomic_inc_not_zero(&mm->mm_users); +} + +/* mmput gets rid of the mappings and all user-space */ +extern void mmput(struct mm_struct *); +#ifdef CONFIG_MMU +/* same as above but performs the slow path from the async context. Can + * be called from the atomic context as well + */ +extern void mmput_async(struct mm_struct *); +#endif + +/* Grab a reference to a task's mm, if it is not already going away */ +extern struct mm_struct *get_task_mm(struct task_struct *task); +/* + * Grab a reference to a task's mm, if it is not already going away + * and ptrace_may_access with the mode parameter passed to it + * succeeds. + */ +extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode); +/* Remove the current tasks stale references to the old mm_struct */ +extern void mm_release(struct task_struct *, struct mm_struct *); + +#ifdef CONFIG_MEMCG +extern void mm_update_next_owner(struct mm_struct *mm); +#else +static inline void mm_update_next_owner(struct mm_struct *mm) +{ +} +#endif /* CONFIG_MEMCG */ + +#ifdef CONFIG_MMU +extern void arch_pick_mmap_layout(struct mm_struct *mm); +extern unsigned long +arch_get_unmapped_area(struct file *, unsigned long, unsigned long, + unsigned long, unsigned long); +extern unsigned long +arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags); +#else +static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} +#endif + +static inline bool in_vfork(struct task_struct *tsk) +{ + bool ret; + + /* + * need RCU to access ->real_parent if CLONE_VM was used along with + * CLONE_PARENT. + * + * We check real_parent->mm == tsk->mm because CLONE_VFORK does not + * imply CLONE_VM + * + * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus + * ->real_parent is not necessarily the task doing vfork(), so in + * theory we can't rely on task_lock() if we want to dereference it. + * + * And in this case we can't trust the real_parent->mm == tsk->mm + * check, it can be false negative. But we do not care, if init or + * another oom-unkillable task does this it should blame itself. + */ + rcu_read_lock(); + ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm; + rcu_read_unlock(); + + return ret; +} + +/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags + * __GFP_FS is also cleared as it implies __GFP_IO. + */ +static inline gfp_t memalloc_noio_flags(gfp_t flags) +{ + if (unlikely(current->flags & PF_MEMALLOC_NOIO)) + flags &= ~(__GFP_IO | __GFP_FS); + return flags; +} + +static inline unsigned int memalloc_noio_save(void) +{ + unsigned int flags = current->flags & PF_MEMALLOC_NOIO; + current->flags |= PF_MEMALLOC_NOIO; + return flags; +} + +static inline void memalloc_noio_restore(unsigned int flags) +{ + current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags; +} + +#endif /* _LINUX_SCHED_MM_H */ diff --git a/include/linux/sched/nohz.h b/include/linux/sched/nohz.h new file mode 100644 index 000000000000..4995b717500b --- /dev/null +++ b/include/linux/sched/nohz.h @@ -0,0 +1,43 @@ +#ifndef _LINUX_SCHED_NOHZ_H +#define _LINUX_SCHED_NOHZ_H + +/* + * This is the interface between the scheduler and nohz/dyntics: + */ + +#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) +extern void cpu_load_update_nohz_start(void); +extern void cpu_load_update_nohz_stop(void); +#else +static inline void cpu_load_update_nohz_start(void) { } +static inline void cpu_load_update_nohz_stop(void) { } +#endif + +#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) +extern void nohz_balance_enter_idle(int cpu); +extern void set_cpu_sd_state_idle(void); +extern int get_nohz_timer_target(void); +#else +static inline void nohz_balance_enter_idle(int cpu) { } +static inline void set_cpu_sd_state_idle(void) { } +#endif + +#ifdef CONFIG_NO_HZ_COMMON +void calc_load_enter_idle(void); +void calc_load_exit_idle(void); +#else +static inline void calc_load_enter_idle(void) { } +static inline void calc_load_exit_idle(void) { } +#endif /* CONFIG_NO_HZ_COMMON */ + +#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP) +extern void wake_up_nohz_cpu(int cpu); +#else +static inline void wake_up_nohz_cpu(int cpu) { } +#endif + +#ifdef CONFIG_NO_HZ_FULL +extern u64 scheduler_tick_max_deferment(void); +#endif + +#endif /* _LINUX_SCHED_NOHZ_H */ diff --git a/include/linux/sched/numa_balancing.h b/include/linux/sched/numa_balancing.h new file mode 100644 index 000000000000..35d5fc77b4be --- /dev/null +++ b/include/linux/sched/numa_balancing.h @@ -0,0 +1,46 @@ +#ifndef _LINUX_SCHED_NUMA_BALANCING_H +#define _LINUX_SCHED_NUMA_BALANCING_H + +/* + * This is the interface between the scheduler and the MM that + * implements memory access pattern based NUMA-balancing: + */ + +#include <linux/sched.h> + +#define TNF_MIGRATED 0x01 +#define TNF_NO_GROUP 0x02 +#define TNF_SHARED 0x04 +#define TNF_FAULT_LOCAL 0x08 +#define TNF_MIGRATE_FAIL 0x10 + +#ifdef CONFIG_NUMA_BALANCING +extern void task_numa_fault(int last_node, int node, int pages, int flags); +extern pid_t task_numa_group_id(struct task_struct *p); +extern void set_numabalancing_state(bool enabled); +extern void task_numa_free(struct task_struct *p); +extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page, + int src_nid, int dst_cpu); +#else +static inline void task_numa_fault(int last_node, int node, int pages, + int flags) +{ +} +static inline pid_t task_numa_group_id(struct task_struct *p) +{ + return 0; +} +static inline void set_numabalancing_state(bool enabled) +{ +} +static inline void task_numa_free(struct task_struct *p) +{ +} +static inline bool should_numa_migrate_memory(struct task_struct *p, + struct page *page, int src_nid, int dst_cpu) +{ + return true; +} +#endif + +#endif /* _LINUX_SCHED_NUMA_BALANCING_H */ diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h index d9cf5a5762d9..2cc450f6ec54 100644 --- a/include/linux/sched/prio.h +++ b/include/linux/sched/prio.h @@ -1,5 +1,5 @@ -#ifndef _SCHED_PRIO_H -#define _SCHED_PRIO_H +#ifndef _LINUX_SCHED_PRIO_H +#define _LINUX_SCHED_PRIO_H #define MAX_NICE 19 #define MIN_NICE -20 @@ -57,4 +57,4 @@ static inline long rlimit_to_nice(long prio) return (MAX_NICE - prio + 1); } -#endif /* _SCHED_PRIO_H */ +#endif /* _LINUX_SCHED_PRIO_H */ diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h index a30b172df6e1..3bd668414f61 100644 --- a/include/linux/sched/rt.h +++ b/include/linux/sched/rt.h @@ -1,7 +1,9 @@ -#ifndef _SCHED_RT_H -#define _SCHED_RT_H +#ifndef _LINUX_SCHED_RT_H +#define _LINUX_SCHED_RT_H -#include <linux/sched/prio.h> +#include <linux/sched.h> + +struct task_struct; static inline int rt_prio(int prio) { @@ -57,4 +59,4 @@ extern void normalize_rt_tasks(void); */ #define RR_TIMESLICE (100 * HZ / 1000) -#endif /* _SCHED_RT_H */ +#endif /* _LINUX_SCHED_RT_H */ diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h new file mode 100644 index 000000000000..2cf446704cd4 --- /dev/null +++ b/include/linux/sched/signal.h @@ -0,0 +1,613 @@ +#ifndef _LINUX_SCHED_SIGNAL_H +#define _LINUX_SCHED_SIGNAL_H + +#include <linux/rculist.h> +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/sched/jobctl.h> +#include <linux/sched/task.h> +#include <linux/cred.h> + +/* + * Types defining task->signal and task->sighand and APIs using them: + */ + +struct sighand_struct { + atomic_t count; + struct k_sigaction action[_NSIG]; + spinlock_t siglock; + wait_queue_head_t signalfd_wqh; +}; + +/* + * Per-process accounting stats: + */ +struct pacct_struct { + int ac_flag; + long ac_exitcode; + unsigned long ac_mem; + u64 ac_utime, ac_stime; + unsigned long ac_minflt, ac_majflt; +}; + +struct cpu_itimer { + u64 expires; + u64 incr; +}; + +/* + * This is the atomic variant of task_cputime, which can be used for + * storing and updating task_cputime statistics without locking. + */ +struct task_cputime_atomic { + atomic64_t utime; + atomic64_t stime; + atomic64_t sum_exec_runtime; +}; + +#define INIT_CPUTIME_ATOMIC \ + (struct task_cputime_atomic) { \ + .utime = ATOMIC64_INIT(0), \ + .stime = ATOMIC64_INIT(0), \ + .sum_exec_runtime = ATOMIC64_INIT(0), \ + } +/** + * struct thread_group_cputimer - thread group interval timer counts + * @cputime_atomic: atomic thread group interval timers. + * @running: true when there are timers running and + * @cputime_atomic receives updates. + * @checking_timer: true when a thread in the group is in the + * process of checking for thread group timers. + * + * This structure contains the version of task_cputime, above, that is + * used for thread group CPU timer calculations. + */ +struct thread_group_cputimer { + struct task_cputime_atomic cputime_atomic; + bool running; + bool checking_timer; +}; + +/* + * NOTE! "signal_struct" does not have its own + * locking, because a shared signal_struct always + * implies a shared sighand_struct, so locking + * sighand_struct is always a proper superset of + * the locking of signal_struct. + */ +struct signal_struct { + atomic_t sigcnt; + atomic_t live; + int nr_threads; + struct list_head thread_head; + + wait_queue_head_t wait_chldexit; /* for wait4() */ + + /* current thread group signal load-balancing target: */ + struct task_struct *curr_target; + + /* shared signal handling: */ + struct sigpending shared_pending; + + /* thread group exit support */ + int group_exit_code; + /* overloaded: + * - notify group_exit_task when ->count is equal to notify_count + * - everyone except group_exit_task is stopped during signal delivery + * of fatal signals, group_exit_task processes the signal. + */ + int notify_count; + struct task_struct *group_exit_task; + + /* thread group stop support, overloads group_exit_code too */ + int group_stop_count; + unsigned int flags; /* see SIGNAL_* flags below */ + + /* + * PR_SET_CHILD_SUBREAPER marks a process, like a service + * manager, to re-parent orphan (double-forking) child processes + * to this process instead of 'init'. The service manager is + * able to receive SIGCHLD signals and is able to investigate + * the process until it calls wait(). All children of this + * process will inherit a flag if they should look for a + * child_subreaper process at exit. + */ + unsigned int is_child_subreaper:1; + unsigned int has_child_subreaper:1; + +#ifdef CONFIG_POSIX_TIMERS + + /* POSIX.1b Interval Timers */ + int posix_timer_id; + struct list_head posix_timers; + + /* ITIMER_REAL timer for the process */ + struct hrtimer real_timer; + ktime_t it_real_incr; + + /* + * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use + * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these + * values are defined to 0 and 1 respectively + */ + struct cpu_itimer it[2]; + + /* + * Thread group totals for process CPU timers. + * See thread_group_cputimer(), et al, for details. + */ + struct thread_group_cputimer cputimer; + + /* Earliest-expiration cache. */ + struct task_cputime cputime_expires; + + struct list_head cpu_timers[3]; + +#endif + + struct pid *leader_pid; + +#ifdef CONFIG_NO_HZ_FULL + atomic_t tick_dep_mask; +#endif + + struct pid *tty_old_pgrp; + + /* boolean value for session group leader */ + int leader; + + struct tty_struct *tty; /* NULL if no tty */ + +#ifdef CONFIG_SCHED_AUTOGROUP + struct autogroup *autogroup; +#endif + /* + * Cumulative resource counters for dead threads in the group, + * and for reaped dead child processes forked by this group. + * Live threads maintain their own counters and add to these + * in __exit_signal, except for the group leader. + */ + seqlock_t stats_lock; + u64 utime, stime, cutime, cstime; + u64 gtime; + u64 cgtime; + struct prev_cputime prev_cputime; + unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; + unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; + unsigned long inblock, oublock, cinblock, coublock; + unsigned long maxrss, cmaxrss; + struct task_io_accounting ioac; + + /* + * Cumulative ns of schedule CPU time fo dead threads in the + * group, not including a zombie group leader, (This only differs + * from jiffies_to_ns(utime + stime) if sched_clock uses something + * other than jiffies.) + */ + unsigned long long sum_sched_runtime; + + /* + * We don't bother to synchronize most readers of this at all, + * because there is no reader checking a limit that actually needs + * to get both rlim_cur and rlim_max atomically, and either one + * alone is a single word that can safely be read normally. + * getrlimit/setrlimit use task_lock(current->group_leader) to + * protect this instead of the siglock, because they really + * have no need to disable irqs. + */ + struct rlimit rlim[RLIM_NLIMITS]; + +#ifdef CONFIG_BSD_PROCESS_ACCT + struct pacct_struct pacct; /* per-process accounting information */ +#endif +#ifdef CONFIG_TASKSTATS + struct taskstats *stats; +#endif +#ifdef CONFIG_AUDIT + unsigned audit_tty; + struct tty_audit_buf *tty_audit_buf; +#endif + + /* + * Thread is the potential origin of an oom condition; kill first on + * oom + */ + bool oom_flag_origin; + short oom_score_adj; /* OOM kill score adjustment */ + short oom_score_adj_min; /* OOM kill score adjustment min value. + * Only settable by CAP_SYS_RESOURCE. */ + struct mm_struct *oom_mm; /* recorded mm when the thread group got + * killed by the oom killer */ + + struct mutex cred_guard_mutex; /* guard against foreign influences on + * credential calculations + * (notably. ptrace) */ +}; + +/* + * Bits in flags field of signal_struct. + */ +#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ +#define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */ +#define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */ +#define SIGNAL_GROUP_COREDUMP 0x00000008 /* coredump in progress */ +/* + * Pending notifications to parent. + */ +#define SIGNAL_CLD_STOPPED 0x00000010 +#define SIGNAL_CLD_CONTINUED 0x00000020 +#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED) + +#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */ + +#define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \ + SIGNAL_STOP_CONTINUED) + +static inline void signal_set_stop_flags(struct signal_struct *sig, + unsigned int flags) +{ + WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP)); + sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags; +} + +/* If true, all threads except ->group_exit_task have pending SIGKILL */ +static inline int signal_group_exit(const struct signal_struct *sig) +{ + return (sig->flags & SIGNAL_GROUP_EXIT) || + (sig->group_exit_task != NULL); +} + +extern void flush_signals(struct task_struct *); +extern void ignore_signals(struct task_struct *); +extern void flush_signal_handlers(struct task_struct *, int force_default); +extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); + +static inline int kernel_dequeue_signal(siginfo_t *info) +{ + struct task_struct *tsk = current; + siginfo_t __info; + int ret; + + spin_lock_irq(&tsk->sighand->siglock); + ret = dequeue_signal(tsk, &tsk->blocked, info ?: &__info); + spin_unlock_irq(&tsk->sighand->siglock); + + return ret; +} + +static inline void kernel_signal_stop(void) +{ + spin_lock_irq(¤t->sighand->siglock); + if (current->jobctl & JOBCTL_STOP_DEQUEUED) + __set_current_state(TASK_STOPPED); + spin_unlock_irq(¤t->sighand->siglock); + + schedule(); +} +extern int send_sig_info(int, struct siginfo *, struct task_struct *); +extern int force_sigsegv(int, struct task_struct *); +extern int force_sig_info(int, struct siginfo *, struct task_struct *); +extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp); +extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid); +extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *, + const struct cred *, u32); +extern int kill_pgrp(struct pid *pid, int sig, int priv); +extern int kill_pid(struct pid *pid, int sig, int priv); +extern int kill_proc_info(int, struct siginfo *, pid_t); +extern __must_check bool do_notify_parent(struct task_struct *, int); +extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); +extern void force_sig(int, struct task_struct *); +extern int send_sig(int, struct task_struct *, int); +extern int zap_other_threads(struct task_struct *p); +extern struct sigqueue *sigqueue_alloc(void); +extern void sigqueue_free(struct sigqueue *); +extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group); +extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); + +static inline int restart_syscall(void) +{ + set_tsk_thread_flag(current, TIF_SIGPENDING); + return -ERESTARTNOINTR; +} + +static inline int signal_pending(struct task_struct *p) +{ + return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); +} + +static inline int __fatal_signal_pending(struct task_struct *p) +{ + return unlikely(sigismember(&p->pending.signal, SIGKILL)); +} + +static inline int fatal_signal_pending(struct task_struct *p) +{ + return signal_pending(p) && __fatal_signal_pending(p); +} + +static inline int signal_pending_state(long state, struct task_struct *p) +{ + if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL))) + return 0; + if (!signal_pending(p)) + return 0; + + return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); +} + +/* + * Reevaluate whether the task has signals pending delivery. + * Wake the task if so. + * This is required every time the blocked sigset_t changes. + * callers must hold sighand->siglock. + */ +extern void recalc_sigpending_and_wake(struct task_struct *t); +extern void recalc_sigpending(void); + +extern void signal_wake_up_state(struct task_struct *t, unsigned int state); + +static inline void signal_wake_up(struct task_struct *t, bool resume) +{ + signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0); +} +static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume) +{ + signal_wake_up_state(t, resume ? __TASK_TRACED : 0); +} + +#ifdef TIF_RESTORE_SIGMASK +/* + * Legacy restore_sigmask accessors. These are inefficient on + * SMP architectures because they require atomic operations. + */ + +/** + * set_restore_sigmask() - make sure saved_sigmask processing gets done + * + * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code + * will run before returning to user mode, to process the flag. For + * all callers, TIF_SIGPENDING is already set or it's no harm to set + * it. TIF_RESTORE_SIGMASK need not be in the set of bits that the + * arch code will notice on return to user mode, in case those bits + * are scarce. We set TIF_SIGPENDING here to ensure that the arch + * signal code always gets run when TIF_RESTORE_SIGMASK is set. + */ +static inline void set_restore_sigmask(void) +{ + set_thread_flag(TIF_RESTORE_SIGMASK); + WARN_ON(!test_thread_flag(TIF_SIGPENDING)); +} +static inline void clear_restore_sigmask(void) +{ + clear_thread_flag(TIF_RESTORE_SIGMASK); +} +static inline bool test_restore_sigmask(void) +{ + return test_thread_flag(TIF_RESTORE_SIGMASK); +} +static inline bool test_and_clear_restore_sigmask(void) +{ + return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK); +} + +#else /* TIF_RESTORE_SIGMASK */ + +/* Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. */ +static inline void set_restore_sigmask(void) +{ + current->restore_sigmask = true; + WARN_ON(!test_thread_flag(TIF_SIGPENDING)); +} +static inline void clear_restore_sigmask(void) +{ + current->restore_sigmask = false; +} +static inline bool test_restore_sigmask(void) +{ + return current->restore_sigmask; +} +static inline bool test_and_clear_restore_sigmask(void) +{ + if (!current->restore_sigmask) + return false; + current->restore_sigmask = false; + return true; +} +#endif + +static inline void restore_saved_sigmask(void) +{ + if (test_and_clear_restore_sigmask()) + __set_current_blocked(¤t->saved_sigmask); +} + +static inline sigset_t *sigmask_to_save(void) +{ + sigset_t *res = ¤t->blocked; + if (unlikely(test_restore_sigmask())) + res = ¤t->saved_sigmask; + return res; +} + +static inline int kill_cad_pid(int sig, int priv) +{ + return kill_pid(cad_pid, sig, priv); +} + +/* These can be the second arg to send_sig_info/send_group_sig_info. */ +#define SEND_SIG_NOINFO ((struct siginfo *) 0) +#define SEND_SIG_PRIV ((struct siginfo *) 1) +#define SEND_SIG_FORCED ((struct siginfo *) 2) + +/* + * True if we are on the alternate signal stack. + */ +static inline int on_sig_stack(unsigned long sp) +{ + /* + * If the signal stack is SS_AUTODISARM then, by construction, we + * can't be on the signal stack unless user code deliberately set + * SS_AUTODISARM when we were already on it. + * + * This improves reliability: if user state gets corrupted such that + * the stack pointer points very close to the end of the signal stack, + * then this check will enable the signal to be handled anyway. + */ + if (current->sas_ss_flags & SS_AUTODISARM) + return 0; + +#ifdef CONFIG_STACK_GROWSUP + return sp >= current->sas_ss_sp && + sp - current->sas_ss_sp < current->sas_ss_size; +#else + return sp > current->sas_ss_sp && + sp - current->sas_ss_sp <= current->sas_ss_size; +#endif +} + +static inline int sas_ss_flags(unsigned long sp) +{ + if (!current->sas_ss_size) + return SS_DISABLE; + + return on_sig_stack(sp) ? SS_ONSTACK : 0; +} + +static inline void sas_ss_reset(struct task_struct *p) +{ + p->sas_ss_sp = 0; + p->sas_ss_size = 0; + p->sas_ss_flags = SS_DISABLE; +} + +static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig) +{ + if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp)) +#ifdef CONFIG_STACK_GROWSUP + return current->sas_ss_sp; +#else + return current->sas_ss_sp + current->sas_ss_size; +#endif + return sp; +} + +extern void __cleanup_sighand(struct sighand_struct *); +extern void flush_itimer_signals(void); + +#define tasklist_empty() \ + list_empty(&init_task.tasks) + +#define next_task(p) \ + list_entry_rcu((p)->tasks.next, struct task_struct, tasks) + +#define for_each_process(p) \ + for (p = &init_task ; (p = next_task(p)) != &init_task ; ) + +extern bool current_is_single_threaded(void); + +/* + * Careful: do_each_thread/while_each_thread is a double loop so + * 'break' will not work as expected - use goto instead. + */ +#define do_each_thread(g, t) \ + for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do + +#define while_each_thread(g, t) \ + while ((t = next_thread(t)) != g) + +#define __for_each_thread(signal, t) \ + list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node) + +#define for_each_thread(p, t) \ + __for_each_thread((p)->signal, t) + +/* Careful: this is a double loop, 'break' won't work as expected. */ +#define for_each_process_thread(p, t) \ + for_each_process(p) for_each_thread(p, t) + +typedef int (*proc_visitor)(struct task_struct *p, void *data); +void walk_process_tree(struct task_struct *top, proc_visitor, void *); + +static inline int get_nr_threads(struct task_struct *tsk) +{ + return tsk->signal->nr_threads; +} + +static inline bool thread_group_leader(struct task_struct *p) +{ + return p->exit_signal >= 0; +} + +/* Do to the insanities of de_thread it is possible for a process + * to have the pid of the thread group leader without actually being + * the thread group leader. For iteration through the pids in proc + * all we care about is that we have a task with the appropriate + * pid, we don't actually care if we have the right task. + */ +static inline bool has_group_leader_pid(struct task_struct *p) +{ + return task_pid(p) == p->signal->leader_pid; +} + +static inline +bool same_thread_group(struct task_struct *p1, struct task_struct *p2) +{ + return p1->signal == p2->signal; +} + +static inline struct task_struct *next_thread(const struct task_struct *p) +{ + return list_entry_rcu(p->thread_group.next, + struct task_struct, thread_group); +} + +static inline int thread_group_empty(struct task_struct *p) +{ + return list_empty(&p->thread_group); +} + +#define delay_group_leader(p) \ + (thread_group_leader(p) && !thread_group_empty(p)) + +extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, + unsigned long *flags); + +static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk, + unsigned long *flags) +{ + struct sighand_struct *ret; + + ret = __lock_task_sighand(tsk, flags); + (void)__cond_lock(&tsk->sighand->siglock, ret); + return ret; +} + +static inline void unlock_task_sighand(struct task_struct *tsk, + unsigned long *flags) +{ + spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); +} + +static inline unsigned long task_rlimit(const struct task_struct *tsk, + unsigned int limit) +{ + return READ_ONCE(tsk->signal->rlim[limit].rlim_cur); +} + +static inline unsigned long task_rlimit_max(const struct task_struct *tsk, + unsigned int limit) +{ + return READ_ONCE(tsk->signal->rlim[limit].rlim_max); +} + +static inline unsigned long rlimit(unsigned int limit) +{ + return task_rlimit(current, limit); +} + +static inline unsigned long rlimit_max(unsigned int limit) +{ + return task_rlimit_max(current, limit); +} + +#endif /* _LINUX_SCHED_SIGNAL_H */ diff --git a/include/linux/sched/stat.h b/include/linux/sched/stat.h new file mode 100644 index 000000000000..141b74c53fad --- /dev/null +++ b/include/linux/sched/stat.h @@ -0,0 +1,40 @@ +#ifndef _LINUX_SCHED_STAT_H +#define _LINUX_SCHED_STAT_H + +#include <linux/percpu.h> + +/* + * Various counters maintained by the scheduler and fork(), + * exposed via /proc, sys.c or used by drivers via these APIs. + * + * ( Note that all these values are aquired without locking, + * so they can only be relied on in narrow circumstances. ) + */ + +extern unsigned long total_forks; +extern int nr_threads; +DECLARE_PER_CPU(unsigned long, process_counts); +extern int nr_processes(void); +extern unsigned long nr_running(void); +extern bool single_task_running(void); +extern unsigned long nr_iowait(void); +extern unsigned long nr_iowait_cpu(int cpu); +extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load); + +static inline int sched_info_on(void) +{ +#ifdef CONFIG_SCHEDSTATS + return 1; +#elif defined(CONFIG_TASK_DELAY_ACCT) + extern int delayacct_on; + return delayacct_on; +#else + return 0; +#endif +} + +#ifdef CONFIG_SCHEDSTATS +void force_schedstat_enabled(void); +#endif + +#endif /* _LINUX_SCHED_STAT_H */ diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index 49308e142aae..0f5ecd4d298e 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -1,5 +1,9 @@ -#ifndef _SCHED_SYSCTL_H -#define _SCHED_SYSCTL_H +#ifndef _LINUX_SCHED_SYSCTL_H +#define _LINUX_SCHED_SYSCTL_H + +#include <linux/types.h> + +struct ctl_table; #ifdef CONFIG_DETECT_HUNG_TASK extern int sysctl_hung_task_check_count; @@ -78,4 +82,4 @@ extern int sysctl_schedstats(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); -#endif /* _SCHED_SYSCTL_H */ +#endif /* _LINUX_SCHED_SYSCTL_H */ diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h new file mode 100644 index 000000000000..a978d7189cfd --- /dev/null +++ b/include/linux/sched/task.h @@ -0,0 +1,139 @@ +#ifndef _LINUX_SCHED_TASK_H +#define _LINUX_SCHED_TASK_H + +/* + * Interface between the scheduler and various task lifetime (fork()/exit()) + * functionality: + */ + +#include <linux/sched.h> + +struct task_struct; +union thread_union; + +/* + * This serializes "schedule()" and also protects + * the run-queue from deletions/modifications (but + * _adding_ to the beginning of the run-queue has + * a separate lock). + */ +extern rwlock_t tasklist_lock; +extern spinlock_t mmlist_lock; + +extern union thread_union init_thread_union; +extern struct task_struct init_task; + +#ifdef CONFIG_PROVE_RCU +extern int lockdep_tasklist_lock_is_held(void); +#endif /* #ifdef CONFIG_PROVE_RCU */ + +extern asmlinkage void schedule_tail(struct task_struct *prev); +extern void init_idle(struct task_struct *idle, int cpu); +extern void init_idle_bootup_task(struct task_struct *idle); + +extern int sched_fork(unsigned long clone_flags, struct task_struct *p); +extern void sched_dead(struct task_struct *p); + +void __noreturn do_task_dead(void); + +extern void proc_caches_init(void); + +extern void release_task(struct task_struct * p); + +#ifdef CONFIG_HAVE_COPY_THREAD_TLS +extern int copy_thread_tls(unsigned long, unsigned long, unsigned long, + struct task_struct *, unsigned long); +#else +extern int copy_thread(unsigned long, unsigned long, unsigned long, + struct task_struct *); + +/* Architectures that haven't opted into copy_thread_tls get the tls argument + * via pt_regs, so ignore the tls argument passed via C. */ +static inline int copy_thread_tls( + unsigned long clone_flags, unsigned long sp, unsigned long arg, + struct task_struct *p, unsigned long tls) +{ + return copy_thread(clone_flags, sp, arg, p); +} +#endif +extern void flush_thread(void); + +#ifdef CONFIG_HAVE_EXIT_THREAD +extern void exit_thread(struct task_struct *tsk); +#else +static inline void exit_thread(struct task_struct *tsk) +{ +} +#endif +extern void do_group_exit(int); + +extern void exit_files(struct task_struct *); +extern void exit_itimers(struct signal_struct *); + +extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long); +extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); +struct task_struct *fork_idle(int); +extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); + +extern void free_task(struct task_struct *tsk); + +/* sched_exec is called by processes performing an exec */ +#ifdef CONFIG_SMP +extern void sched_exec(void); +#else +#define sched_exec() {} +#endif + +#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) + +extern void __put_task_struct(struct task_struct *t); + +static inline void put_task_struct(struct task_struct *t) +{ + if (atomic_dec_and_test(&t->usage)) + __put_task_struct(t); +} + +struct task_struct *task_rcu_dereference(struct task_struct **ptask); +struct task_struct *try_get_task_struct(struct task_struct **ptask); + + +#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT +extern int arch_task_struct_size __read_mostly; +#else +# define arch_task_struct_size (sizeof(struct task_struct)) +#endif + +#ifdef CONFIG_VMAP_STACK +static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) +{ + return t->stack_vm_area; +} +#else +static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) +{ + return NULL; +} +#endif + +/* + * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring + * subscriptions and synchronises with wait4(). Also used in procfs. Also + * pins the final release of task.io_context. Also protects ->cpuset and + * ->cgroup.subsys[]. And ->vfork_done. + * + * Nests both inside and outside of read_lock(&tasklist_lock). + * It must not be nested with write_lock_irq(&tasklist_lock), + * neither inside nor outside. + */ +static inline void task_lock(struct task_struct *p) +{ + spin_lock(&p->alloc_lock); +} + +static inline void task_unlock(struct task_struct *p) +{ + spin_unlock(&p->alloc_lock); +} + +#endif /* _LINUX_SCHED_TASK_H */ diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h new file mode 100644 index 000000000000..df6ea6665b31 --- /dev/null +++ b/include/linux/sched/task_stack.h @@ -0,0 +1,121 @@ +#ifndef _LINUX_SCHED_TASK_STACK_H +#define _LINUX_SCHED_TASK_STACK_H + +/* + * task->stack (kernel stack) handling interfaces: + */ + +#include <linux/sched.h> +#include <linux/magic.h> + +#ifdef CONFIG_THREAD_INFO_IN_TASK + +/* + * When accessing the stack of a non-current task that might exit, use + * try_get_task_stack() instead. task_stack_page will return a pointer + * that could get freed out from under you. + */ +static inline void *task_stack_page(const struct task_struct *task) +{ + return task->stack; +} + +#define setup_thread_stack(new,old) do { } while(0) + +static inline unsigned long *end_of_stack(const struct task_struct *task) +{ + return task->stack; +} + +#elif !defined(__HAVE_THREAD_FUNCTIONS) + +#define task_stack_page(task) ((void *)(task)->stack) + +static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org) +{ + *task_thread_info(p) = *task_thread_info(org); + task_thread_info(p)->task = p; +} + +/* + * Return the address of the last usable long on the stack. + * + * When the stack grows down, this is just above the thread + * info struct. Going any lower will corrupt the threadinfo. + * + * When the stack grows up, this is the highest address. + * Beyond that position, we corrupt data on the next page. + */ +static inline unsigned long *end_of_stack(struct task_struct *p) +{ +#ifdef CONFIG_STACK_GROWSUP + return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1; +#else + return (unsigned long *)(task_thread_info(p) + 1); +#endif +} + +#endif + +#ifdef CONFIG_THREAD_INFO_IN_TASK +static inline void *try_get_task_stack(struct task_struct *tsk) +{ + return atomic_inc_not_zero(&tsk->stack_refcount) ? + task_stack_page(tsk) : NULL; +} + +extern void put_task_stack(struct task_struct *tsk); +#else +static inline void *try_get_task_stack(struct task_struct *tsk) +{ + return task_stack_page(tsk); +} + +static inline void put_task_stack(struct task_struct *tsk) {} +#endif + +#define task_stack_end_corrupted(task) \ + (*(end_of_stack(task)) != STACK_END_MAGIC) + +static inline int object_is_on_stack(void *obj) +{ + void *stack = task_stack_page(current); + + return (obj >= stack) && (obj < (stack + THREAD_SIZE)); +} + +extern void thread_stack_cache_init(void); + +#ifdef CONFIG_DEBUG_STACK_USAGE +static inline unsigned long stack_not_used(struct task_struct *p) +{ + unsigned long *n = end_of_stack(p); + + do { /* Skip over canary */ +# ifdef CONFIG_STACK_GROWSUP + n--; +# else + n++; +# endif + } while (!*n); + +# ifdef CONFIG_STACK_GROWSUP + return (unsigned long)end_of_stack(p) - (unsigned long)n; +# else + return (unsigned long)n - (unsigned long)end_of_stack(p); +# endif +} +#endif +extern void set_task_stack_end_magic(struct task_struct *tsk); + +#ifndef __HAVE_ARCH_KSTACK_END +static inline int kstack_end(void *addr) +{ + /* Reliable end of stack detection: + * Some APM bios versions misalign the stack + */ + return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*))); +} +#endif + +#endif /* _LINUX_SCHED_TASK_STACK_H */ diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h new file mode 100644 index 000000000000..7d065abc7a47 --- /dev/null +++ b/include/linux/sched/topology.h @@ -0,0 +1,226 @@ +#ifndef _LINUX_SCHED_TOPOLOGY_H +#define _LINUX_SCHED_TOPOLOGY_H + +#include <linux/topology.h> + +#include <linux/sched/idle.h> + +/* + * sched-domains (multiprocessor balancing) declarations: + */ +#ifdef CONFIG_SMP + +#define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */ +#define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */ +#define SD_BALANCE_EXEC 0x0004 /* Balance on exec */ +#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */ +#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */ +#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ +#define SD_ASYM_CPUCAPACITY 0x0040 /* Groups have different max cpu capacities */ +#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share cpu capacity */ +#define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */ +#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ +#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ +#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ +#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ +#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */ +#define SD_NUMA 0x4000 /* cross-node balancing */ + +/* + * Increase resolution of cpu_capacity calculations + */ +#define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT +#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT) + +#ifdef CONFIG_SCHED_SMT +static inline int cpu_smt_flags(void) +{ + return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; +} +#endif + +#ifdef CONFIG_SCHED_MC +static inline int cpu_core_flags(void) +{ + return SD_SHARE_PKG_RESOURCES; +} +#endif + +#ifdef CONFIG_NUMA +static inline int cpu_numa_flags(void) +{ + return SD_NUMA; +} +#endif + +extern int arch_asym_cpu_priority(int cpu); + +struct sched_domain_attr { + int relax_domain_level; +}; + +#define SD_ATTR_INIT (struct sched_domain_attr) { \ + .relax_domain_level = -1, \ +} + +extern int sched_domain_level_max; + +struct sched_group; + +struct sched_domain_shared { + atomic_t ref; + atomic_t nr_busy_cpus; + int has_idle_cores; +}; + +struct sched_domain { + /* These fields must be setup */ + struct sched_domain *parent; /* top domain must be null terminated */ + struct sched_domain *child; /* bottom domain must be null terminated */ + struct sched_group *groups; /* the balancing groups of the domain */ + unsigned long min_interval; /* Minimum balance interval ms */ + unsigned long max_interval; /* Maximum balance interval ms */ + unsigned int busy_factor; /* less balancing by factor if busy */ + unsigned int imbalance_pct; /* No balance until over watermark */ + unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ + unsigned int busy_idx; + unsigned int idle_idx; + unsigned int newidle_idx; + unsigned int wake_idx; + unsigned int forkexec_idx; + unsigned int smt_gain; + + int nohz_idle; /* NOHZ IDLE status */ + int flags; /* See SD_* */ + int level; + + /* Runtime fields. */ + unsigned long last_balance; /* init to jiffies. units in jiffies */ + unsigned int balance_interval; /* initialise to 1. units in ms. */ + unsigned int nr_balance_failed; /* initialise to 0 */ + + /* idle_balance() stats */ + u64 max_newidle_lb_cost; + unsigned long next_decay_max_lb_cost; + + u64 avg_scan_cost; /* select_idle_sibling */ + +#ifdef CONFIG_SCHEDSTATS + /* load_balance() stats */ + unsigned int lb_count[CPU_MAX_IDLE_TYPES]; + unsigned int lb_failed[CPU_MAX_IDLE_TYPES]; + unsigned int lb_balanced[CPU_MAX_IDLE_TYPES]; + unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES]; + unsigned int lb_gained[CPU_MAX_IDLE_TYPES]; + unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES]; + unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES]; + unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES]; + + /* Active load balancing */ + unsigned int alb_count; + unsigned int alb_failed; + unsigned int alb_pushed; + + /* SD_BALANCE_EXEC stats */ + unsigned int sbe_count; + unsigned int sbe_balanced; + unsigned int sbe_pushed; + + /* SD_BALANCE_FORK stats */ + unsigned int sbf_count; + unsigned int sbf_balanced; + unsigned int sbf_pushed; + + /* try_to_wake_up() stats */ + unsigned int ttwu_wake_remote; + unsigned int ttwu_move_affine; + unsigned int ttwu_move_balance; +#endif +#ifdef CONFIG_SCHED_DEBUG + char *name; +#endif + union { + void *private; /* used during construction */ + struct rcu_head rcu; /* used during destruction */ + }; + struct sched_domain_shared *shared; + + unsigned int span_weight; + /* + * Span of all CPUs in this domain. + * + * NOTE: this field is variable length. (Allocated dynamically + * by attaching extra space to the end of the structure, + * depending on how many CPUs the kernel has booted up with) + */ + unsigned long span[0]; +}; + +static inline struct cpumask *sched_domain_span(struct sched_domain *sd) +{ + return to_cpumask(sd->span); +} + +extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], + struct sched_domain_attr *dattr_new); + +/* Allocate an array of sched domains, for partition_sched_domains(). */ +cpumask_var_t *alloc_sched_domains(unsigned int ndoms); +void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); + +bool cpus_share_cache(int this_cpu, int that_cpu); + +typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); +typedef int (*sched_domain_flags_f)(void); + +#define SDTL_OVERLAP 0x01 + +struct sd_data { + struct sched_domain **__percpu sd; + struct sched_domain_shared **__percpu sds; + struct sched_group **__percpu sg; + struct sched_group_capacity **__percpu sgc; +}; + +struct sched_domain_topology_level { + sched_domain_mask_f mask; + sched_domain_flags_f sd_flags; + int flags; + int numa_level; + struct sd_data data; +#ifdef CONFIG_SCHED_DEBUG + char *name; +#endif +}; + +extern void set_sched_topology(struct sched_domain_topology_level *tl); + +#ifdef CONFIG_SCHED_DEBUG +# define SD_INIT_NAME(type) .name = #type +#else +# define SD_INIT_NAME(type) +#endif + +#else /* CONFIG_SMP */ + +struct sched_domain_attr; + +static inline void +partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], + struct sched_domain_attr *dattr_new) +{ +} + +static inline bool cpus_share_cache(int this_cpu, int that_cpu) +{ + return true; +} + +#endif /* !CONFIG_SMP */ + +static inline int task_node(const struct task_struct *p) +{ + return cpu_to_node(task_cpu(p)); +} + +#endif /* _LINUX_SCHED_TOPOLOGY_H */ diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h new file mode 100644 index 000000000000..5d5415e129d4 --- /dev/null +++ b/include/linux/sched/user.h @@ -0,0 +1,61 @@ +#ifndef _LINUX_SCHED_USER_H +#define _LINUX_SCHED_USER_H + +#include <linux/uidgid.h> +#include <linux/atomic.h> + +struct key; + +/* + * Some day this will be a full-fledged user tracking system.. + */ +struct user_struct { + atomic_t __count; /* reference count */ + atomic_t processes; /* How many processes does this user have? */ + atomic_t sigpending; /* How many pending signals does this user have? */ +#ifdef CONFIG_FANOTIFY + atomic_t fanotify_listeners; +#endif +#ifdef CONFIG_EPOLL + atomic_long_t epoll_watches; /* The number of file descriptors currently watched */ +#endif +#ifdef CONFIG_POSIX_MQUEUE + /* protected by mq_lock */ + unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */ +#endif + unsigned long locked_shm; /* How many pages of mlocked shm ? */ + unsigned long unix_inflight; /* How many files in flight in unix sockets */ + atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */ + +#ifdef CONFIG_KEYS + struct key *uid_keyring; /* UID specific keyring */ + struct key *session_keyring; /* UID's default session keyring */ +#endif + + /* Hash table maintenance information */ + struct hlist_node uidhash_node; + kuid_t uid; + +#if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL) + atomic_long_t locked_vm; +#endif +}; + +extern int uids_sysfs_init(void); + +extern struct user_struct *find_user(kuid_t); + +extern struct user_struct root_user; +#define INIT_USER (&root_user) + + +/* per-UID process charging. */ +extern struct user_struct * alloc_uid(kuid_t); +static inline struct user_struct *get_uid(struct user_struct *u) +{ + atomic_inc(&u->__count); + return u; +} +extern void free_uid(struct user_struct *); + +#endif /* _LINUX_SCHED_USER_H */ diff --git a/include/linux/sched/wake_q.h b/include/linux/sched/wake_q.h new file mode 100644 index 000000000000..d03d8a9047dc --- /dev/null +++ b/include/linux/sched/wake_q.h @@ -0,0 +1,53 @@ +#ifndef _LINUX_SCHED_WAKE_Q_H +#define _LINUX_SCHED_WAKE_Q_H + +/* + * Wake-queues are lists of tasks with a pending wakeup, whose + * callers have already marked the task as woken internally, + * and can thus carry on. A common use case is being able to + * do the wakeups once the corresponding user lock as been + * released. + * + * We hold reference to each task in the list across the wakeup, + * thus guaranteeing that the memory is still valid by the time + * the actual wakeups are performed in wake_up_q(). + * + * One per task suffices, because there's never a need for a task to be + * in two wake queues simultaneously; it is forbidden to abandon a task + * in a wake queue (a call to wake_up_q() _must_ follow), so if a task is + * already in a wake queue, the wakeup will happen soon and the second + * waker can just skip it. + * + * The DEFINE_WAKE_Q macro declares and initializes the list head. + * wake_up_q() does NOT reinitialize the list; it's expected to be + * called near the end of a function. Otherwise, the list can be + * re-initialized for later re-use by wake_q_init(). + * + * Note that this can cause spurious wakeups. schedule() callers + * must ensure the call is done inside a loop, confirming that the + * wakeup condition has in fact occurred. + */ + +#include <linux/sched.h> + +struct wake_q_head { + struct wake_q_node *first; + struct wake_q_node **lastp; +}; + +#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01) + +#define DEFINE_WAKE_Q(name) \ + struct wake_q_head name = { WAKE_Q_TAIL, &name.first } + +static inline void wake_q_init(struct wake_q_head *head) +{ + head->first = WAKE_Q_TAIL; + head->lastp = &head->first; +} + +extern void wake_q_add(struct wake_q_head *head, + struct task_struct *task); +extern void wake_up_q(struct wake_q_head *head); + +#endif /* _LINUX_SCHED_WAKE_Q_H */ diff --git a/include/linux/sched/xacct.h b/include/linux/sched/xacct.h new file mode 100644 index 000000000000..a28156a0d34a --- /dev/null +++ b/include/linux/sched/xacct.h @@ -0,0 +1,48 @@ +#ifndef _LINUX_SCHED_XACCT_H +#define _LINUX_SCHED_XACCT_H + +/* + * Extended task accounting methods: + */ + +#include <linux/sched.h> + +#ifdef CONFIG_TASK_XACCT +static inline void add_rchar(struct task_struct *tsk, ssize_t amt) +{ + tsk->ioac.rchar += amt; +} + +static inline void add_wchar(struct task_struct *tsk, ssize_t amt) +{ + tsk->ioac.wchar += amt; +} + +static inline void inc_syscr(struct task_struct *tsk) +{ + tsk->ioac.syscr++; +} + +static inline void inc_syscw(struct task_struct *tsk) +{ + tsk->ioac.syscw++; +} +#else +static inline void add_rchar(struct task_struct *tsk, ssize_t amt) +{ +} + +static inline void add_wchar(struct task_struct *tsk, ssize_t amt) +{ +} + +static inline void inc_syscr(struct task_struct *tsk) +{ +} + +static inline void inc_syscw(struct task_struct *tsk) +{ +} +#endif + +#endif /* _LINUX_SCHED_XACCT_H */ diff --git a/include/linux/security.h b/include/linux/security.h index d3868f2ebada..96899fad7016 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -140,8 +140,7 @@ struct request_sock; /* bprm->unsafe reasons */ #define LSM_UNSAFE_SHARE 1 #define LSM_UNSAFE_PTRACE 2 -#define LSM_UNSAFE_PTRACE_CAP 4 -#define LSM_UNSAFE_NO_NEW_PRIVS 8 +#define LSM_UNSAFE_NO_NEW_PRIVS 4 #ifdef CONFIG_MMU extern int mmap_min_addr_handler(struct ctl_table *table, int write, diff --git a/include/linux/sed-opal.h b/include/linux/sed-opal.h index deee23d012e7..04b124fca51e 100644 --- a/include/linux/sed-opal.h +++ b/include/linux/sed-opal.h @@ -27,6 +27,7 @@ typedef int (sec_send_recv)(void *data, u16 spsp, u8 secp, void *buffer, size_t len, bool send); #ifdef CONFIG_BLK_SED_OPAL +void free_opal_dev(struct opal_dev *dev); bool opal_unlock_from_suspend(struct opal_dev *dev); struct opal_dev *init_opal_dev(void *data, sec_send_recv *send_recv); int sed_ioctl(struct opal_dev *dev, unsigned int cmd, void __user *ioctl_ptr); @@ -51,6 +52,10 @@ static inline bool is_sed_ioctl(unsigned int cmd) return false; } #else +static inline void free_opal_dev(struct opal_dev *dev) +{ +} + static inline bool is_sed_ioctl(unsigned int cmd) { return false; diff --git a/include/linux/sem.h b/include/linux/sem.h index d0efd6e6c20a..4fc222f8755d 100644 --- a/include/linux/sem.h +++ b/include/linux/sem.h @@ -21,7 +21,7 @@ struct sem_array { struct list_head list_id; /* undo requests on this array */ int sem_nsems; /* no. of semaphores in array */ int complex_count; /* pending complex operations */ - bool complex_mode; /* no parallel simple ops */ + unsigned int use_global_lock;/* >0: global lock required */ }; #ifdef CONFIG_SYSVIPC diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index ff078e7043b6..a7d6bd2a918f 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -57,7 +57,14 @@ extern int shmem_zero_setup(struct vm_area_struct *); extern unsigned long shmem_get_unmapped_area(struct file *, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); extern int shmem_lock(struct file *file, int lock, struct user_struct *user); +#ifdef CONFIG_SHMEM extern bool shmem_mapping(struct address_space *mapping); +#else +static inline bool shmem_mapping(struct address_space *mapping) +{ + return false; +} +#endif /* CONFIG_SHMEM */ extern void shmem_unlock_mapping(struct address_space *mapping); extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, pgoff_t index, gfp_t gfp_mask); @@ -124,4 +131,15 @@ static inline bool shmem_huge_enabled(struct vm_area_struct *vma) } #endif +#ifdef CONFIG_SHMEM +extern int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd, + struct vm_area_struct *dst_vma, + unsigned long dst_addr, + unsigned long src_addr, + struct page **pagep); +#else +#define shmem_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \ + src_addr, pagep) ({ BUG(); 0; }) +#endif + #endif diff --git a/include/linux/signal.h b/include/linux/signal.h index 5308304993be..94ad6eea9550 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -1,32 +1,13 @@ #ifndef _LINUX_SIGNAL_H #define _LINUX_SIGNAL_H -#include <linux/list.h> #include <linux/bug.h> -#include <uapi/linux/signal.h> +#include <linux/signal_types.h> struct task_struct; /* for sysctl */ extern int print_fatal_signals; -/* - * Real Time signals may be queued. - */ - -struct sigqueue { - struct list_head list; - int flags; - siginfo_t info; - struct user_struct *user; -}; - -/* flags values. */ -#define SIGQUEUE_PREALLOC 1 - -struct sigpending { - struct list_head list; - sigset_t signal; -}; #ifndef HAVE_ARCH_COPY_SIGINFO @@ -272,42 +253,6 @@ extern void set_current_blocked(sigset_t *); extern void __set_current_blocked(const sigset_t *); extern int show_unhandled_signals; -struct sigaction { -#ifndef __ARCH_HAS_IRIX_SIGACTION - __sighandler_t sa_handler; - unsigned long sa_flags; -#else - unsigned int sa_flags; - __sighandler_t sa_handler; -#endif -#ifdef __ARCH_HAS_SA_RESTORER - __sigrestore_t sa_restorer; -#endif - sigset_t sa_mask; /* mask last for extensibility */ -}; - -struct k_sigaction { - struct sigaction sa; -#ifdef __ARCH_HAS_KA_RESTORER - __sigrestore_t ka_restorer; -#endif -}; - -#ifdef CONFIG_OLD_SIGACTION -struct old_sigaction { - __sighandler_t sa_handler; - old_sigset_t sa_mask; - unsigned long sa_flags; - __sigrestore_t sa_restorer; -}; -#endif - -struct ksignal { - struct k_sigaction ka; - siginfo_t info; - int sig; -}; - extern int get_signal(struct ksignal *ksig); extern void signal_setup_done(int failed, struct ksignal *ksig, int stepping); extern void exit_signals(struct task_struct *tsk); diff --git a/include/linux/signal_types.h b/include/linux/signal_types.h new file mode 100644 index 000000000000..16d862a3d8f3 --- /dev/null +++ b/include/linux/signal_types.h @@ -0,0 +1,66 @@ +#ifndef _LINUX_SIGNAL_TYPES_H +#define _LINUX_SIGNAL_TYPES_H + +/* + * Basic signal handling related data type definitions: + */ + +#include <linux/list.h> +#include <uapi/linux/signal.h> + +/* + * Real Time signals may be queued. + */ + +struct sigqueue { + struct list_head list; + int flags; + siginfo_t info; + struct user_struct *user; +}; + +/* flags values. */ +#define SIGQUEUE_PREALLOC 1 + +struct sigpending { + struct list_head list; + sigset_t signal; +}; + +struct sigaction { +#ifndef __ARCH_HAS_IRIX_SIGACTION + __sighandler_t sa_handler; + unsigned long sa_flags; +#else + unsigned int sa_flags; + __sighandler_t sa_handler; +#endif +#ifdef __ARCH_HAS_SA_RESTORER + __sigrestore_t sa_restorer; +#endif + sigset_t sa_mask; /* mask last for extensibility */ +}; + +struct k_sigaction { + struct sigaction sa; +#ifdef __ARCH_HAS_KA_RESTORER + __sigrestore_t ka_restorer; +#endif +}; + +#ifdef CONFIG_OLD_SIGACTION +struct old_sigaction { + __sighandler_t sa_handler; + old_sigset_t sa_mask; + unsigned long sa_flags; + __sigrestore_t sa_restorer; +}; +#endif + +struct ksignal { + struct k_sigaction ka; + siginfo_t info; + int sig; +}; + +#endif /* _LINUX_SIGNAL_TYPES_H */ diff --git a/include/linux/signalfd.h b/include/linux/signalfd.h index eadbe227c256..4985048640a7 100644 --- a/include/linux/signalfd.h +++ b/include/linux/signalfd.h @@ -8,7 +8,7 @@ #define _LINUX_SIGNALFD_H #include <uapi/linux/signalfd.h> - +#include <linux/sched/signal.h> #ifdef CONFIG_SIGNALFD diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 69ccd2636911..c776abd86937 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -34,6 +34,7 @@ #include <linux/dma-mapping.h> #include <linux/netdev_features.h> #include <linux/sched.h> +#include <linux/sched/clock.h> #include <net/flow_dissector.h> #include <linux/splice.h> #include <linux/in6.h> diff --git a/include/linux/slab.h b/include/linux/slab.h index 4c5363566815..3c37a8c51921 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -545,22 +545,49 @@ struct memcg_cache_array { * array to be accessed without taking any locks, on relocation we free the old * version only after a grace period. * - * Child caches will hold extra metadata needed for its operation. Fields are: + * Root and child caches hold different metadata. * - * @memcg: pointer to the memcg this cache belongs to - * @root_cache: pointer to the global, root cache, this cache was derived from + * @root_cache: Common to root and child caches. NULL for root, pointer to + * the root cache for children. * - * Both root and child caches of the same kind are linked into a list chained - * through @list. + * The following fields are specific to root caches. + * + * @memcg_caches: kmemcg ID indexed table of child caches. This table is + * used to index child cachces during allocation and cleared + * early during shutdown. + * + * @root_caches_node: List node for slab_root_caches list. + * + * @children: List of all child caches. While the child caches are also + * reachable through @memcg_caches, a child cache remains on + * this list until it is actually destroyed. + * + * The following fields are specific to child caches. + * + * @memcg: Pointer to the memcg this cache belongs to. + * + * @children_node: List node for @root_cache->children list. + * + * @kmem_caches_node: List node for @memcg->kmem_caches list. */ struct memcg_cache_params { - bool is_root_cache; - struct list_head list; + struct kmem_cache *root_cache; union { - struct memcg_cache_array __rcu *memcg_caches; + struct { + struct memcg_cache_array __rcu *memcg_caches; + struct list_head __root_caches_node; + struct list_head children; + }; struct { struct mem_cgroup *memcg; - struct kmem_cache *root_cache; + struct list_head children_node; + struct list_head kmem_caches_node; + + void (*deact_fn)(struct kmem_cache *); + union { + struct rcu_head deact_rcu_head; + struct work_struct deact_work; + }; }; }; }; diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 75f56c2ef2d4..07ef550c6627 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -113,9 +113,9 @@ struct kmem_cache { #ifdef CONFIG_SYSFS #define SLAB_SUPPORTS_SYSFS -void sysfs_slab_remove(struct kmem_cache *); +void sysfs_slab_release(struct kmem_cache *); #else -static inline void sysfs_slab_remove(struct kmem_cache *s) +static inline void sysfs_slab_release(struct kmem_cache *s) { } #endif diff --git a/include/linux/soc/qcom/mdt_loader.h b/include/linux/soc/qcom/mdt_loader.h new file mode 100644 index 000000000000..f423001db3a9 --- /dev/null +++ b/include/linux/soc/qcom/mdt_loader.h @@ -0,0 +1,18 @@ +#ifndef __QCOM_MDT_LOADER_H__ +#define __QCOM_MDT_LOADER_H__ + +#include <linux/types.h> + +#define QCOM_MDT_TYPE_MASK (7 << 24) +#define QCOM_MDT_TYPE_HASH (2 << 24) +#define QCOM_MDT_RELOCATABLE BIT(27) + +struct device; +struct firmware; + +ssize_t qcom_mdt_get_size(const struct firmware *fw); +int qcom_mdt_load(struct device *dev, const struct firmware *fw, + const char *fw_name, int pas_id, void *mem_region, + phys_addr_t mem_phys, size_t mem_size); + +#endif diff --git a/include/linux/soc/samsung/exynos-regs-pmu.h b/include/linux/soc/samsung/exynos-regs-pmu.h index d30186e2b609..49df0a01a2cc 100644 --- a/include/linux/soc/samsung/exynos-regs-pmu.h +++ b/include/linux/soc/samsung/exynos-regs-pmu.h @@ -7,7 +7,13 @@ * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. -*/ + * + * + * Notice: + * This is not a list of all Exynos Power Management Unit SFRs. + * There are too many of them, not mentioning subtle differences + * between SoCs. For now, put here only the used registers. + */ #ifndef __LINUX_SOC_EXYNOS_REGS_PMU_H #define __LINUX_SOC_EXYNOS_REGS_PMU_H __FILE__ @@ -38,7 +44,6 @@ #define EXYNOS_CORE_PO_RESET(n) ((1 << 4) << n) #define EXYNOS_WAKEUP_FROM_LOWPWR (1 << 28) #define EXYNOS_SWRESET 0x0400 -#define EXYNOS5440_SWRESET 0x00C4 #define S5P_WAKEUP_STAT 0x0600 #define S5P_EINT_WAKEUP_MASK 0x0604 @@ -136,12 +141,6 @@ #define EXYNOS_COMMON_OPTION(_nr) \ (EXYNOS_COMMON_CONFIGURATION(_nr) + 0x8) -#define EXYNOS_CORE_LOCAL_PWR_EN 0x3 - -#define EXYNOS_ARM_COMMON_STATUS 0x2504 -#define EXYNOS_COMMON_OPTION(_nr) \ - (EXYNOS_COMMON_CONFIGURATION(_nr) + 0x8) - #define EXYNOS_ARM_L2_CONFIGURATION 0x2600 #define EXYNOS_L2_CONFIGURATION(_nr) \ (EXYNOS_ARM_L2_CONFIGURATION + ((_nr) * 0x80)) @@ -149,17 +148,8 @@ (EXYNOS_L2_CONFIGURATION(_nr) + 0x4) #define EXYNOS_L2_OPTION(_nr) \ (EXYNOS_L2_CONFIGURATION(_nr) + 0x8) -#define EXYNOS_L2_COMMON_PWR_EN 0x3 -#define EXYNOS_ARM_CORE_X_STATUS_OFFSET 0x4 - -#define EXYNOS5_APLL_SYSCLK_CONFIGURATION 0x2A00 -#define EXYNOS5_APLL_SYSCLK_STATUS 0x2A04 - -#define EXYNOS5_ARM_L2_OPTION 0x2608 -#define EXYNOS5_USE_RETENTION BIT(4) - -#define EXYNOS5_L2RSTDISABLE_VALUE BIT(3) +#define EXYNOS_L2_USE_RETENTION BIT(4) #define S5P_PAD_RET_MAUDIO_OPTION 0x3028 #define S5P_PAD_RET_MMC2_OPTION 0x30c8 @@ -411,7 +401,6 @@ #define EXYNOS5_SATA_MEM_SYS_PWR_REG 0x11FC #define EXYNOS5_PAD_RETENTION_DRAM_SYS_PWR_REG 0x1200 #define EXYNOS5_PAD_RETENTION_MAU_SYS_PWR_REG 0x1204 -#define EXYNOS5_PAD_RETENTION_EFNAND_SYS_PWR_REG 0x1208 #define EXYNOS5_PAD_RETENTION_GPIO_SYS_PWR_REG 0x1220 #define EXYNOS5_PAD_RETENTION_UART_SYS_PWR_REG 0x1224 #define EXYNOS5_PAD_RETENTION_MMCA_SYS_PWR_REG 0x1228 @@ -485,7 +474,6 @@ #define EXYNOS5420_SWRESET_KFC_SEL 0x3 /* Only for EXYNOS5420 */ -#define EXYNOS5420_ISP_ARM_OPTION 0x2488 #define EXYNOS5420_L2RSTDISABLE_VALUE BIT(3) #define EXYNOS5420_LPI_MASK 0x0004 @@ -494,9 +482,6 @@ #define EXYNOS5420_ATB_KFC BIT(13) #define EXYNOS5420_ATB_ISP_ARM BIT(19) #define EXYNOS5420_EMULATION BIT(31) -#define ATB_ISP_ARM BIT(12) -#define ATB_KFC BIT(13) -#define ATB_NOC BIT(14) #define EXYNOS5420_ARM_INTR_SPREAD_ENABLE 0x0100 #define EXYNOS5420_ARM_INTR_SPREAD_USE_STANDBYWFI 0x0104 @@ -510,11 +495,6 @@ #define EXYNOS5420_KFC_CORE_RESET(_nr) \ ((EXYNOS5420_KFC_CORE_RESET0 | EXYNOS5420_KFC_ETM_RESET0) << (_nr)) -#define EXYNOS5420_BB_CON1 0x0784 -#define EXYNOS5420_BB_SEL_EN BIT(31) -#define EXYNOS5420_BB_PMOS_EN BIT(7) -#define EXYNOS5420_BB_1300X 0XF - #define EXYNOS5420_ARM_CORE2_SYS_PWR_REG 0x1020 #define EXYNOS5420_DIS_IRQ_ARM_CORE2_LOCAL_SYS_PWR_REG 0x1024 #define EXYNOS5420_DIS_IRQ_ARM_CORE2_CENTRAL_SYS_PWR_REG 0x1028 @@ -546,15 +526,6 @@ #define EXYNOS5420_SPLL_SYSCLK_SYS_PWR_REG 0x1178 #define EXYNOS5420_INTRAM_MEM_SYS_PWR_REG 0x11B8 #define EXYNOS5420_INTROM_MEM_SYS_PWR_REG 0x11BC -#define EXYNOS5420_ONENANDXL_MEM_SYS_PWR 0x11C0 -#define EXYNOS5420_USBDEV_MEM_SYS_PWR 0x11CC -#define EXYNOS5420_USBDEV1_MEM_SYS_PWR 0x11D0 -#define EXYNOS5420_SDMMC_MEM_SYS_PWR 0x11D4 -#define EXYNOS5420_CSSYS_MEM_SYS_PWR 0x11D8 -#define EXYNOS5420_SECSS_MEM_SYS_PWR 0x11DC -#define EXYNOS5420_ROTATOR_MEM_SYS_PWR 0x11E0 -#define EXYNOS5420_INTRAM_MEM_SYS_PWR 0x11E4 -#define EXYNOS5420_INTROM_MEM_SYS_PWR 0x11E8 #define EXYNOS5420_PAD_RETENTION_JTAG_SYS_PWR_REG 0x1208 #define EXYNOS5420_PAD_RETENTION_DRAM_SYS_PWR_REG 0x1210 #define EXYNOS5420_PAD_RETENTION_UART_SYS_PWR_REG 0x1214 @@ -605,13 +576,7 @@ #define EXYNOS5420_CMU_RESET_MSC_SYS_PWR_REG 0x159C #define EXYNOS5420_CMU_RESET_FSYS_SYS_PWR_REG 0x15A0 #define EXYNOS5420_SFR_AXI_CGDIS1 0x15E4 -#define EXYNOS_ARM_CORE2_CONFIGURATION 0x2100 -#define EXYNOS5420_ARM_CORE2_OPTION 0x2108 -#define EXYNOS_ARM_CORE3_CONFIGURATION 0x2180 -#define EXYNOS5420_ARM_CORE3_OPTION 0x2188 -#define EXYNOS5420_ARM_COMMON_STATUS 0x2504 #define EXYNOS5420_ARM_COMMON_OPTION 0x2508 -#define EXYNOS5420_KFC_COMMON_STATUS 0x2584 #define EXYNOS5420_KFC_COMMON_OPTION 0x2588 #define EXYNOS5420_LOGIC_RESET_DURATION3 0x2D1C @@ -626,33 +591,9 @@ #define EXYNOS_PAD_RET_DRAM_OPTION 0x3008 #define EXYNOS_PAD_RET_MAUDIO_OPTION 0x3028 #define EXYNOS_PAD_RET_JTAG_OPTION 0x3048 -#define EXYNOS_PAD_RET_GPIO_OPTION 0x3108 -#define EXYNOS_PAD_RET_UART_OPTION 0x3128 -#define EXYNOS_PAD_RET_MMCA_OPTION 0x3148 -#define EXYNOS_PAD_RET_MMCB_OPTION 0x3168 #define EXYNOS_PAD_RET_EBIA_OPTION 0x3188 #define EXYNOS_PAD_RET_EBIB_OPTION 0x31A8 -#define EXYNOS_PS_HOLD_CONTROL 0x330C - -/* For SYS_PWR_REG */ -#define EXYNOS_SYS_PWR_CFG BIT(0) - -#define EXYNOS5420_MFC_CONFIGURATION 0x4060 -#define EXYNOS5420_MFC_STATUS 0x4064 -#define EXYNOS5420_MFC_OPTION 0x4068 -#define EXYNOS5420_G3D_CONFIGURATION 0x4080 -#define EXYNOS5420_G3D_STATUS 0x4084 -#define EXYNOS5420_G3D_OPTION 0x4088 -#define EXYNOS5420_DISP0_CONFIGURATION 0x40A0 -#define EXYNOS5420_DISP0_STATUS 0x40A4 -#define EXYNOS5420_DISP0_OPTION 0x40A8 -#define EXYNOS5420_DISP1_CONFIGURATION 0x40C0 -#define EXYNOS5420_DISP1_STATUS 0x40C4 -#define EXYNOS5420_DISP1_OPTION 0x40C8 -#define EXYNOS5420_MAU_CONFIGURATION 0x40E0 -#define EXYNOS5420_MAU_STATUS 0x40E4 -#define EXYNOS5420_MAU_OPTION 0x40E8 #define EXYNOS5420_FSYS2_OPTION 0x4168 #define EXYNOS5420_PSGEN_OPTION 0x4188 @@ -690,4 +631,20 @@ | EXYNOS5420_KFC_USE_STANDBY_WFI2 \ | EXYNOS5420_KFC_USE_STANDBY_WFI3) +/* For EXYNOS5433 */ +#define EXYNOS5433_PAD_RETENTION_AUD_OPTION (0x3028) +#define EXYNOS5433_PAD_RETENTION_MMC2_OPTION (0x30C8) +#define EXYNOS5433_PAD_RETENTION_TOP_OPTION (0x3108) +#define EXYNOS5433_PAD_RETENTION_UART_OPTION (0x3128) +#define EXYNOS5433_PAD_RETENTION_MMC0_OPTION (0x3148) +#define EXYNOS5433_PAD_RETENTION_MMC1_OPTION (0x3168) +#define EXYNOS5433_PAD_RETENTION_EBIA_OPTION (0x3188) +#define EXYNOS5433_PAD_RETENTION_EBIB_OPTION (0x31A8) +#define EXYNOS5433_PAD_RETENTION_SPI_OPTION (0x31C8) +#define EXYNOS5433_PAD_RETENTION_MIF_OPTION (0x31E8) +#define EXYNOS5433_PAD_RETENTION_USBXTI_OPTION (0x3228) +#define EXYNOS5433_PAD_RETENTION_BOOTLDO_OPTION (0x3248) +#define EXYNOS5433_PAD_RETENTION_UFS_OPTION (0x3268) +#define EXYNOS5433_PAD_RETENTION_FSYSGENIO_OPTION (0x32A8) + #endif /* __LINUX_SOC_EXYNOS_REGS_PMU_H */ diff --git a/include/linux/spi/flash.h b/include/linux/spi/flash.h index 3f22932e67a4..f4199e758f97 100644 --- a/include/linux/spi/flash.h +++ b/include/linux/spi/flash.h @@ -7,7 +7,7 @@ struct mtd_partition; * struct flash_platform_data: board-specific flash data * @name: optional flash device name (eg, as used with mtdparts=) * @parts: optional array of mtd_partitions for static partitioning - * @nr_parts: number of mtd_partitions for static partitoning + * @nr_parts: number of mtd_partitions for static partitioning * @type: optional flash device type (e.g. m25p80 vs m25p64), for use * with chips that can't be queried for JEDEC or other IDs * diff --git a/include/linux/spi/tsc2005.h b/include/linux/spi/tsc2005.h deleted file mode 100644 index 563b3b1799a8..000000000000 --- a/include/linux/spi/tsc2005.h +++ /dev/null @@ -1,34 +0,0 @@ -/* - * This file is part of TSC2005 touchscreen driver - * - * Copyright (C) 2009-2010 Nokia Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef _LINUX_SPI_TSC2005_H -#define _LINUX_SPI_TSC2005_H - -#include <linux/types.h> - -struct tsc2005_platform_data { - int ts_pressure_max; - int ts_pressure_fudge; - int ts_x_max; - int ts_x_fudge; - int ts_y_max; - int ts_y_fudge; - int ts_x_plate_ohm; - unsigned int esd_timeout_ms; - void (*set_reset)(bool enable); -}; - -#endif diff --git a/include/linux/stat.h b/include/linux/stat.h index 075cb0c7eb2a..c76e524fb34b 100644 --- a/include/linux/stat.h +++ b/include/linux/stat.h @@ -18,20 +18,32 @@ #include <linux/time.h> #include <linux/uidgid.h> +#define KSTAT_QUERY_FLAGS (AT_STATX_SYNC_TYPE) + struct kstat { - u64 ino; - dev_t dev; + u32 result_mask; /* What fields the user got */ umode_t mode; unsigned int nlink; + uint32_t blksize; /* Preferred I/O size */ + u64 attributes; +#define KSTAT_ATTR_FS_IOC_FLAGS \ + (STATX_ATTR_COMPRESSED | \ + STATX_ATTR_IMMUTABLE | \ + STATX_ATTR_APPEND | \ + STATX_ATTR_NODUMP | \ + STATX_ATTR_ENCRYPTED \ + )/* Attrs corresponding to FS_*_FL flags */ + u64 ino; + dev_t dev; + dev_t rdev; kuid_t uid; kgid_t gid; - dev_t rdev; loff_t size; - struct timespec atime; + struct timespec atime; struct timespec mtime; struct timespec ctime; - unsigned long blksize; - unsigned long long blocks; + struct timespec btime; /* File creation time */ + u64 blocks; }; #endif diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h index b1bc62ba20a2..8fd3504946ad 100644 --- a/include/linux/sunrpc/auth.h +++ b/include/linux/sunrpc/auth.h @@ -32,6 +32,7 @@ */ #define UNX_MAXNODENAME __NEW_UTS_LEN #define UNX_CALLSLACK (21 + XDR_QUADLEN(UNX_MAXNODENAME)) +#define UNX_NGROUPS 16 struct rpcsec_gss_info; @@ -63,9 +64,6 @@ struct rpc_cred { struct rcu_head cr_rcu; struct rpc_auth * cr_auth; const struct rpc_credops *cr_ops; -#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) - unsigned long cr_magic; /* 0x0f4aa4f0 */ -#endif unsigned long cr_expire; /* when to gc */ unsigned long cr_flags; /* various flags */ atomic_t cr_count; /* ref count */ @@ -79,8 +77,6 @@ struct rpc_cred { #define RPCAUTH_CRED_HASHED 2 #define RPCAUTH_CRED_NEGATIVE 3 -#define RPCAUTH_CRED_MAGIC 0x0f4aa4f0 - /* rpc_auth au_flags */ #define RPCAUTH_AUTH_NO_CRKEY_TIMEOUT 0x0001 /* underlying cred has no key timeout */ diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h index 8a511c0985aa..270bad0e1bed 100644 --- a/include/linux/sunrpc/cache.h +++ b/include/linux/sunrpc/cache.h @@ -63,15 +63,6 @@ struct cache_head { #define CACHE_NEW_EXPIRY 120 /* keep new things pending confirmation for 120 seconds */ -struct cache_detail_procfs { - struct proc_dir_entry *proc_ent; - struct proc_dir_entry *flush_ent, *channel_ent, *content_ent; -}; - -struct cache_detail_pipefs { - struct dentry *dir; -}; - struct cache_detail { struct module * owner; int hash_size; @@ -123,9 +114,9 @@ struct cache_detail { time_t last_warn; /* when we last warned about no readers */ union { - struct cache_detail_procfs procfs; - struct cache_detail_pipefs pipefs; - } u; + struct proc_dir_entry *procfs; + struct dentry *pipefs; + }; struct net *net; }; @@ -204,8 +195,11 @@ static inline void cache_put(struct cache_head *h, struct cache_detail *cd) kref_put(&h->ref, cd->cache_put); } -static inline int cache_is_expired(struct cache_detail *detail, struct cache_head *h) +static inline bool cache_is_expired(struct cache_detail *detail, struct cache_head *h) { + if (!test_bit(CACHE_VALID, &h->flags)) + return false; + return (h->expiry_time < seconds_since_boot()) || (detail->flush_time >= h->last_refresh); } @@ -227,6 +221,7 @@ extern void sunrpc_destroy_cache_detail(struct cache_detail *cd); extern int sunrpc_cache_register_pipefs(struct dentry *parent, const char *, umode_t, struct cache_detail *); extern void sunrpc_cache_unregister_pipefs(struct cache_detail *); +extern void sunrpc_cache_unhash(struct cache_detail *, struct cache_head *); /* Must store cache_detail in seq_file->private if using next three functions */ extern void *cache_seq_start(struct seq_file *file, loff_t *pos); diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 333ad11b3dd9..6095ecba0dde 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -182,7 +182,6 @@ int rpc_protocol(struct rpc_clnt *); struct net * rpc_net_ns(struct rpc_clnt *); size_t rpc_max_payload(struct rpc_clnt *); size_t rpc_max_bc_payload(struct rpc_clnt *); -unsigned long rpc_get_timeout(struct rpc_clnt *clnt); void rpc_force_rebind(struct rpc_clnt *); size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t); const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t); @@ -202,8 +201,9 @@ int rpc_clnt_add_xprt(struct rpc_clnt *, struct xprt_create *, struct rpc_xprt *, void *), void *data); -void rpc_cap_max_reconnect_timeout(struct rpc_clnt *clnt, - unsigned long timeo); +void rpc_set_connect_timeout(struct rpc_clnt *clnt, + unsigned long connect_timeout, + unsigned long reconnect_timeout); int rpc_clnt_setup_test_and_add_xprt(struct rpc_clnt *, struct rpc_xprt_switch *, diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h index 59a7889e15db..8da0f37f3bdc 100644 --- a/include/linux/sunrpc/debug.h +++ b/include/linux/sunrpc/debug.h @@ -20,33 +20,55 @@ extern unsigned int nfsd_debug; extern unsigned int nlm_debug; #endif -#define dprintk(args...) dfprintk(FACILITY, ## args) -#define dprintk_rcu(args...) dfprintk_rcu(FACILITY, ## args) +#define dprintk(fmt, ...) \ + dfprintk(FACILITY, fmt, ##__VA_ARGS__) +#define dprintk_cont(fmt, ...) \ + dfprintk_cont(FACILITY, fmt, ##__VA_ARGS__) +#define dprintk_rcu(fmt, ...) \ + dfprintk_rcu(FACILITY, fmt, ##__VA_ARGS__) +#define dprintk_rcu_cont(fmt, ...) \ + dfprintk_rcu_cont(FACILITY, fmt, ##__VA_ARGS__) #undef ifdebug #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) # define ifdebug(fac) if (unlikely(rpc_debug & RPCDBG_##fac)) -# define dfprintk(fac, args...) \ - do { \ - ifdebug(fac) \ - printk(KERN_DEFAULT args); \ - } while (0) - -# define dfprintk_rcu(fac, args...) \ - do { \ - ifdebug(fac) { \ - rcu_read_lock(); \ - printk(KERN_DEFAULT args); \ - rcu_read_unlock(); \ - } \ - } while (0) +# define dfprintk(fac, fmt, ...) \ +do { \ + ifdebug(fac) \ + printk(KERN_DEFAULT fmt, ##__VA_ARGS__); \ +} while (0) + +# define dfprintk_cont(fac, fmt, ...) \ +do { \ + ifdebug(fac) \ + printk(KERN_CONT fmt, ##__VA_ARGS__); \ +} while (0) + +# define dfprintk_rcu(fac, fmt, ...) \ +do { \ + ifdebug(fac) { \ + rcu_read_lock(); \ + printk(KERN_DEFAULT fmt, ##__VA_ARGS__); \ + rcu_read_unlock(); \ + } \ +} while (0) + +# define dfprintk_rcu_cont(fac, fmt, ...) \ +do { \ + ifdebug(fac) { \ + rcu_read_lock(); \ + printk(KERN_CONT fmt, ##__VA_ARGS__); \ + rcu_read_unlock(); \ + } \ +} while (0) # define RPC_IFDEBUG(x) x #else # define ifdebug(fac) if (0) -# define dfprintk(fac, args...) do {} while (0) -# define dfprintk_rcu(fac, args...) do {} while (0) +# define dfprintk(fac, fmt, ...) do {} while (0) +# define dfprintk_cont(fac, fmt, ...) do {} while (0) +# define dfprintk_rcu(fac, fmt, ...) do {} while (0) # define RPC_IFDEBUG(x) #endif diff --git a/include/linux/sunrpc/rpc_rdma.h b/include/linux/sunrpc/rpc_rdma.h index cfda6adcf33c..245fc59b7324 100644 --- a/include/linux/sunrpc/rpc_rdma.h +++ b/include/linux/sunrpc/rpc_rdma.h @@ -110,6 +110,15 @@ struct rpcrdma_msg { }; /* + * XDR sizes, in quads + */ +enum { + rpcrdma_fixed_maxsz = 4, + rpcrdma_segment_maxsz = 4, + rpcrdma_readchunk_maxsz = 2 + rpcrdma_segment_maxsz, +}; + +/* * Smallest RPC/RDMA header: rm_xid through rm_type, then rm_nochunks */ #define RPCRDMA_HDRLEN_MIN (sizeof(__be32) * 7) diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 7321ae933867..e770abeed32d 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -400,10 +400,14 @@ struct svc_version { struct svc_procedure * vs_proc; /* per-procedure info */ u32 vs_xdrsize; /* xdrsize needed for this version */ - unsigned int vs_hidden : 1, /* Don't register with portmapper. - * Only used for nfsacl so far. */ - vs_rpcb_optnl:1;/* Don't care the result of register. - * Only used for nfsv4. */ + /* Don't register with rpcbind */ + bool vs_hidden; + + /* Don't care if the rpcbind registration fails */ + bool vs_rpcb_optnl; + + /* Need xprt with congestion control */ + bool vs_need_cong_ctrl; /* Override dispatch function (e.g. when caching replies). * A return value of 0 means drop the request. diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 757fb963696c..b105f73e3ca2 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -70,7 +70,7 @@ extern atomic_t rdma_stat_sq_prod; * completes. */ struct svc_rdma_op_ctxt { - struct list_head free; + struct list_head list; struct svc_rdma_op_ctxt *read_hdr; struct svc_rdma_fastreg_mr *frmr; int hdr_count; @@ -78,7 +78,6 @@ struct svc_rdma_op_ctxt { struct ib_cqe cqe; struct ib_cqe reg_cqe; struct ib_cqe inv_cqe; - struct list_head dto_q; u32 byte_len; u32 position; struct svcxprt_rdma *xprt; @@ -141,7 +140,8 @@ struct svcxprt_rdma { atomic_t sc_sq_avail; /* SQEs ready to be consumed */ unsigned int sc_sq_depth; /* Depth of SQ */ unsigned int sc_rq_depth; /* Depth of RQ */ - u32 sc_max_requests; /* Forward credits */ + __be32 sc_fc_credits; /* Forward credits */ + u32 sc_max_requests; /* Max requests */ u32 sc_max_bc_requests;/* Backward credits */ int sc_max_req_size; /* Size of each RQ WR buf */ @@ -171,7 +171,6 @@ struct svcxprt_rdma { wait_queue_head_t sc_send_wait; /* SQ exhaustion waitlist */ unsigned long sc_flags; - struct list_head sc_dto_q; /* DTO tasklet I/O pending Q */ struct list_head sc_read_complete_q; struct work_struct sc_work; }; @@ -214,11 +213,7 @@ extern void svc_rdma_xdr_encode_write_list(struct rpcrdma_msg *, int); extern void svc_rdma_xdr_encode_reply_array(struct rpcrdma_write_array *, int); extern void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *, int, __be32, __be64, u32); -extern void svc_rdma_xdr_encode_reply_header(struct svcxprt_rdma *, - struct rpcrdma_msg *, - struct rpcrdma_msg *, - enum rpcrdma_proc); -extern int svc_rdma_xdr_get_reply_hdr_len(struct rpcrdma_msg *); +extern unsigned int svc_rdma_xdr_get_reply_hdr_len(__be32 *rdma_resp); /* svc_rdma_recvfrom.c */ extern int svc_rdma_recvfrom(struct svc_rqst *); diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index 7440290f64ac..ddb7f94a9d06 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -67,6 +67,7 @@ struct svc_xprt { #define XPT_CACHE_AUTH 11 /* cache auth info */ #define XPT_LOCAL 12 /* connection from loopback interface */ #define XPT_KILL_TEMP 13 /* call xpo_kill_temp_xprt before closing */ +#define XPT_CONG_CTRL 14 /* has congestion control */ struct svc_serv *xpt_server; /* service for transport */ atomic_t xpt_reserved; /* space on outq that is rsvd */ diff --git a/include/linux/sunrpc/types.h b/include/linux/sunrpc/types.h index d222f47550af..11a7536c0fd2 100644 --- a/include/linux/sunrpc/types.h +++ b/include/linux/sunrpc/types.h @@ -10,6 +10,7 @@ #define _LINUX_SUNRPC_TYPES_H_ #include <linux/timer.h> +#include <linux/sched/signal.h> #include <linux/workqueue.h> #include <linux/sunrpc/debug.h> #include <linux/list.h> diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 56c48c884a24..054c8cde18f3 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -242,6 +242,185 @@ extern unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len); extern void xdr_enter_page(struct xdr_stream *xdr, unsigned int len); extern int xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, int (*actor)(struct scatterlist *, void *), void *data); +ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str, + size_t maxlen, gfp_t gfp_flags); +/** + * xdr_align_size - Calculate padded size of an object + * @n: Size of an object being XDR encoded (in bytes) + * + * Return value: + * Size (in bytes) of the object including xdr padding + */ +static inline size_t +xdr_align_size(size_t n) +{ + const size_t mask = sizeof(__u32) - 1; + + return (n + mask) & ~mask; +} + +/** + * xdr_stream_encode_u32 - Encode a 32-bit integer + * @xdr: pointer to xdr_stream + * @n: integer to encode + * + * Return values: + * On success, returns length in bytes of XDR buffer consumed + * %-EMSGSIZE on XDR buffer overflow + */ +static inline ssize_t +xdr_stream_encode_u32(struct xdr_stream *xdr, __u32 n) +{ + const size_t len = sizeof(n); + __be32 *p = xdr_reserve_space(xdr, len); + + if (unlikely(!p)) + return -EMSGSIZE; + *p = cpu_to_be32(n); + return len; +} + +/** + * xdr_stream_encode_u64 - Encode a 64-bit integer + * @xdr: pointer to xdr_stream + * @n: 64-bit integer to encode + * + * Return values: + * On success, returns length in bytes of XDR buffer consumed + * %-EMSGSIZE on XDR buffer overflow + */ +static inline ssize_t +xdr_stream_encode_u64(struct xdr_stream *xdr, __u64 n) +{ + const size_t len = sizeof(n); + __be32 *p = xdr_reserve_space(xdr, len); + + if (unlikely(!p)) + return -EMSGSIZE; + xdr_encode_hyper(p, n); + return len; +} + +/** + * xdr_stream_encode_opaque_fixed - Encode fixed length opaque xdr data + * @xdr: pointer to xdr_stream + * @ptr: pointer to opaque data object + * @len: size of object pointed to by @ptr + * + * Return values: + * On success, returns length in bytes of XDR buffer consumed + * %-EMSGSIZE on XDR buffer overflow + */ +static inline ssize_t +xdr_stream_encode_opaque_fixed(struct xdr_stream *xdr, const void *ptr, size_t len) +{ + __be32 *p = xdr_reserve_space(xdr, len); + + if (unlikely(!p)) + return -EMSGSIZE; + xdr_encode_opaque_fixed(p, ptr, len); + return xdr_align_size(len); +} + +/** + * xdr_stream_encode_opaque - Encode variable length opaque xdr data + * @xdr: pointer to xdr_stream + * @ptr: pointer to opaque data object + * @len: size of object pointed to by @ptr + * + * Return values: + * On success, returns length in bytes of XDR buffer consumed + * %-EMSGSIZE on XDR buffer overflow + */ +static inline ssize_t +xdr_stream_encode_opaque(struct xdr_stream *xdr, const void *ptr, size_t len) +{ + size_t count = sizeof(__u32) + xdr_align_size(len); + __be32 *p = xdr_reserve_space(xdr, count); + + if (unlikely(!p)) + return -EMSGSIZE; + xdr_encode_opaque(p, ptr, len); + return count; +} + +/** + * xdr_stream_decode_u32 - Decode a 32-bit integer + * @xdr: pointer to xdr_stream + * @ptr: location to store integer + * + * Return values: + * %0 on success + * %-EBADMSG on XDR buffer overflow + */ +static inline ssize_t +xdr_stream_decode_u32(struct xdr_stream *xdr, __u32 *ptr) +{ + const size_t count = sizeof(*ptr); + __be32 *p = xdr_inline_decode(xdr, count); + + if (unlikely(!p)) + return -EBADMSG; + *ptr = be32_to_cpup(p); + return 0; +} + +/** + * xdr_stream_decode_opaque_fixed - Decode fixed length opaque xdr data + * @xdr: pointer to xdr_stream + * @ptr: location to store data + * @len: size of buffer pointed to by @ptr + * + * Return values: + * On success, returns size of object stored in @ptr + * %-EBADMSG on XDR buffer overflow + */ +static inline ssize_t +xdr_stream_decode_opaque_fixed(struct xdr_stream *xdr, void *ptr, size_t len) +{ + __be32 *p = xdr_inline_decode(xdr, len); + + if (unlikely(!p)) + return -EBADMSG; + xdr_decode_opaque_fixed(p, ptr, len); + return len; +} + +/** + * xdr_stream_decode_opaque_inline - Decode variable length opaque xdr data + * @xdr: pointer to xdr_stream + * @ptr: location to store pointer to opaque data + * @maxlen: maximum acceptable object size + * + * Note: the pointer stored in @ptr cannot be assumed valid after the XDR + * buffer has been destroyed, or even after calling xdr_inline_decode() + * on @xdr. It is therefore expected that the object it points to should + * be processed immediately. + * + * Return values: + * On success, returns size of object stored in *@ptr + * %-EBADMSG on XDR buffer overflow + * %-EMSGSIZE if the size of the object would exceed @maxlen + */ +static inline ssize_t +xdr_stream_decode_opaque_inline(struct xdr_stream *xdr, void **ptr, size_t maxlen) +{ + __be32 *p; + __u32 len; + + *ptr = NULL; + if (unlikely(xdr_stream_decode_u32(xdr, &len) < 0)) + return -EBADMSG; + if (len != 0) { + p = xdr_inline_decode(xdr, len); + if (unlikely(!p)) + return -EBADMSG; + if (unlikely(len > maxlen)) + return -EMSGSIZE; + *ptr = p; + } + return len; +} #endif /* __KERNEL__ */ #endif /* _SUNRPC_XDR_H_ */ diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index a5da60b24d83..eab1c749e192 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -137,6 +137,9 @@ struct rpc_xprt_ops { void (*release_request)(struct rpc_task *task); void (*close)(struct rpc_xprt *xprt); void (*destroy)(struct rpc_xprt *xprt); + void (*set_connect_timeout)(struct rpc_xprt *xprt, + unsigned long connect_timeout, + unsigned long reconnect_timeout); void (*print_stats)(struct rpc_xprt *xprt, struct seq_file *seq); int (*enable_swap)(struct rpc_xprt *xprt); void (*disable_swap)(struct rpc_xprt *xprt); @@ -221,6 +224,7 @@ struct rpc_xprt { struct timer_list timer; unsigned long last_used, idle_timeout, + connect_timeout, max_reconnect_timeout; /* diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h index bef3fb0abb8f..c9959d7e3579 100644 --- a/include/linux/sunrpc/xprtsock.h +++ b/include/linux/sunrpc/xprtsock.h @@ -55,6 +55,8 @@ struct sock_xprt { size_t rcvsize, sndsize; + struct rpc_timeout tcp_timeout; + /* * Saved socket callback addresses */ @@ -81,6 +83,7 @@ struct sock_xprt { #define XPRT_SOCK_CONNECTING 1U #define XPRT_SOCK_DATA_READY (2) +#define XPRT_SOCK_UPD_TIMEOUT (3) #endif /* __KERNEL__ */ diff --git a/include/linux/swap.h b/include/linux/swap.h index 7f47b7098b1b..45e91dd6716d 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -27,6 +27,7 @@ struct bio; #define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \ SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \ SWAP_FLAG_DISCARD_PAGES) +#define SWAP_BATCH 64 static inline int current_is_kswapd(void) { @@ -176,6 +177,12 @@ enum { * protected by swap_info_struct.lock. */ struct swap_cluster_info { + spinlock_t lock; /* + * Protect swap_cluster_info fields + * and swap_info_struct->swap_map + * elements correspond to the swap + * cluster + */ unsigned int data:24; unsigned int flags:8; }; @@ -337,8 +344,13 @@ int generic_swapfile_activate(struct swap_info_struct *, struct file *, sector_t *); /* linux/mm/swap_state.c */ -extern struct address_space swapper_spaces[]; -#define swap_address_space(entry) (&swapper_spaces[swp_type(entry)]) +/* One swap address space for each 64M swap space */ +#define SWAP_ADDRESS_SPACE_SHIFT 14 +#define SWAP_ADDRESS_SPACE_PAGES (1 << SWAP_ADDRESS_SPACE_SHIFT) +extern struct address_space *swapper_spaces[]; +#define swap_address_space(entry) \ + (&swapper_spaces[swp_type(entry)][swp_offset(entry) \ + >> SWAP_ADDRESS_SPACE_SHIFT]) extern unsigned long total_swapcache_pages(void); extern void show_swap_cache_info(void); extern int add_to_swap(struct page *, struct list_head *list); @@ -360,6 +372,7 @@ extern struct page *swapin_readahead(swp_entry_t, gfp_t, /* linux/mm/swapfile.c */ extern atomic_long_t nr_swap_pages; extern long total_swap_pages; +extern bool has_usable_swap(void); /* Swap 50% full? Release swapcache more aggressively.. */ static inline bool vm_swap_full(void) @@ -375,23 +388,31 @@ static inline long get_nr_swap_pages(void) extern void si_swapinfo(struct sysinfo *); extern swp_entry_t get_swap_page(void); extern swp_entry_t get_swap_page_of_type(int); +extern int get_swap_pages(int n, swp_entry_t swp_entries[]); extern int add_swap_count_continuation(swp_entry_t, gfp_t); extern void swap_shmem_alloc(swp_entry_t); extern int swap_duplicate(swp_entry_t); extern int swapcache_prepare(swp_entry_t); extern void swap_free(swp_entry_t); extern void swapcache_free(swp_entry_t); +extern void swapcache_free_entries(swp_entry_t *entries, int n); extern int free_swap_and_cache(swp_entry_t); extern int swap_type_of(dev_t, sector_t, struct block_device **); extern unsigned int count_swap_pages(int, int); extern sector_t map_swap_page(struct page *, struct block_device **); extern sector_t swapdev_block(int, pgoff_t); extern int page_swapcount(struct page *); +extern int __swp_swapcount(swp_entry_t entry); extern int swp_swapcount(swp_entry_t entry); extern struct swap_info_struct *page_swap_info(struct page *); extern bool reuse_swap_page(struct page *, int *); extern int try_to_free_swap(struct page *); struct backing_dev_info; +extern int init_swap_address_space(unsigned int type, unsigned long nr_pages); +extern void exit_swap_address_space(unsigned int type); + +extern int get_swap_slots(int n, swp_entry_t *slots); +extern void swapcache_free_batch(swp_entry_t *entries, int n); #else /* CONFIG_SWAP */ @@ -479,6 +500,11 @@ static inline int page_swapcount(struct page *page) return 0; } +static inline int __swp_swapcount(swp_entry_t entry) +{ + return 0; +} + static inline int swp_swapcount(swp_entry_t entry) { return 0; diff --git a/include/linux/swap_slots.h b/include/linux/swap_slots.h new file mode 100644 index 000000000000..6ef92d17633d --- /dev/null +++ b/include/linux/swap_slots.h @@ -0,0 +1,30 @@ +#ifndef _LINUX_SWAP_SLOTS_H +#define _LINUX_SWAP_SLOTS_H + +#include <linux/swap.h> +#include <linux/spinlock.h> +#include <linux/mutex.h> + +#define SWAP_SLOTS_CACHE_SIZE SWAP_BATCH +#define THRESHOLD_ACTIVATE_SWAP_SLOTS_CACHE (5*SWAP_SLOTS_CACHE_SIZE) +#define THRESHOLD_DEACTIVATE_SWAP_SLOTS_CACHE (2*SWAP_SLOTS_CACHE_SIZE) + +struct swap_slots_cache { + bool lock_initialized; + struct mutex alloc_lock; /* protects slots, nr, cur */ + swp_entry_t *slots; + int nr; + int cur; + spinlock_t free_lock; /* protects slots_ret, n_ret */ + swp_entry_t *slots_ret; + int n_ret; +}; + +void disable_swap_slots_cache_lock(void); +void reenable_swap_slots_cache_unlock(void); +int enable_swap_slots_cache(void); +int free_swap_slot(swp_entry_t entry); + +extern bool swap_slot_cache_enabled; + +#endif /* _LINUX_SWAP_SLOTS_H */ diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 91a740f6b884..980c3c9b06f8 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -48,6 +48,7 @@ struct stat; struct stat64; struct statfs; struct statfs64; +struct statx; struct __sysctl_args; struct sysinfo; struct timespec; @@ -902,5 +903,7 @@ asmlinkage long sys_pkey_mprotect(unsigned long start, size_t len, unsigned long prot, int pkey); asmlinkage long sys_pkey_alloc(unsigned long flags, unsigned long init_val); asmlinkage long sys_pkey_free(int pkey); +asmlinkage long sys_statx(int dfd, const char __user *path, unsigned flags, + unsigned mask, struct statx __user *buffer); #endif diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index adf4e51cf597..b7e82049fec7 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -143,6 +143,7 @@ struct ctl_table_header struct ctl_table_set *set; struct ctl_dir *parent; struct ctl_node *node; + struct list_head inodes; /* head for proc_inode->sysctl_inodes */ }; struct ctl_dir { diff --git a/include/linux/taskstats_kern.h b/include/linux/taskstats_kern.h index 58de6edf751f..e2a5daf8d14f 100644 --- a/include/linux/taskstats_kern.h +++ b/include/linux/taskstats_kern.h @@ -8,7 +8,7 @@ #define _LINUX_TASKSTATS_KERN_H #include <linux/taskstats.h> -#include <linux/sched.h> +#include <linux/sched/signal.h> #include <linux/slab.h> #ifdef CONFIG_TASKSTATS diff --git a/include/linux/thermal.h b/include/linux/thermal.h index e275e98bdceb..dab11f97e1c6 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -194,7 +194,7 @@ struct thermal_attr { * @governor: pointer to the governor for this thermal zone * @governor_data: private pointer for governor data * @thermal_instances: list of &struct thermal_instance of this thermal zone - * @idr: &struct idr to generate unique id for this zone's cooling + * @ida: &struct ida to generate unique id for this zone's cooling * devices * @lock: lock to protect thermal_instances list * @node: node in thermal_tz_list (in thermal_core.c) @@ -227,7 +227,7 @@ struct thermal_zone_device { struct thermal_governor *governor; void *governor_data; struct list_head thermal_instances; - struct idr idr; + struct ida ida; struct mutex lock; struct list_head node; struct delayed_work poll_queue; diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index d2e804e15c3e..b598cbc7b576 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h @@ -8,6 +8,10 @@ void timekeeping_init(void); extern int timekeeping_suspended; +/* Architecture timer tick functions: */ +extern void update_process_times(int user); +extern void xtime_update(unsigned long ticks); + /* * Get and set timeofday */ diff --git a/include/linux/timer.h b/include/linux/timer.h index 5a209b84fd9e..e6789b8757d5 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -61,6 +61,8 @@ struct timer_list { #define TIMER_ARRAYSHIFT 22 #define TIMER_ARRAYMASK 0xFFC00000 +#define TIMER_TRACE_FLAGMASK (TIMER_MIGRATING | TIMER_DEFERRABLE | TIMER_PINNED | TIMER_IRQSAFE) + #define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \ .entry = { .next = TIMER_ENTRY_STATIC }, \ .function = (_function), \ @@ -210,7 +212,7 @@ struct hrtimer; extern enum hrtimer_restart it_real_fn(struct hrtimer *); #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) -#include <linux/sysctl.h> +struct ctl_table; extern unsigned int sysctl_timer_migration; int timer_migration_handler(struct ctl_table *table, int write, diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 0f165507495c..0af63c4381b9 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -23,6 +23,10 @@ const char *trace_print_symbols_seq(struct trace_seq *p, unsigned long val, const struct trace_print_flags *symbol_array); #if BITS_PER_LONG == 32 +const char *trace_print_flags_seq_u64(struct trace_seq *p, const char *delim, + unsigned long long flags, + const struct trace_print_flags_u64 *flag_array); + const char *trace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val, const struct trace_print_flags_u64 diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h index eb209d4523f5..be765234c0a2 100644 --- a/include/linux/user_namespace.h +++ b/include/linux/user_namespace.h @@ -5,6 +5,9 @@ #include <linux/nsproxy.h> #include <linux/ns_common.h> #include <linux/sched.h> +#include <linux/workqueue.h> +#include <linux/rwsem.h> +#include <linux/sysctl.h> #include <linux/err.h> #define UID_GID_MAP_MAX_EXTENTS 5 @@ -32,6 +35,10 @@ enum ucount_type { UCOUNT_NET_NAMESPACES, UCOUNT_MNT_NAMESPACES, UCOUNT_CGROUP_NAMESPACES, +#ifdef CONFIG_INOTIFY_USER + UCOUNT_INOTIFY_INSTANCES, + UCOUNT_INOTIFY_WATCHES, +#endif UCOUNT_COUNTS, }; diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index 11b92b047a1e..0468548acebf 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -52,6 +52,28 @@ static inline bool userfaultfd_armed(struct vm_area_struct *vma) return vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP); } +extern int dup_userfaultfd(struct vm_area_struct *, struct list_head *); +extern void dup_userfaultfd_complete(struct list_head *); + +extern void mremap_userfaultfd_prep(struct vm_area_struct *, + struct vm_userfaultfd_ctx *); +extern void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *, + unsigned long from, unsigned long to, + unsigned long len); + +extern void userfaultfd_remove(struct vm_area_struct *vma, + struct vm_area_struct **prev, + unsigned long start, + unsigned long end); + +extern int userfaultfd_unmap_prep(struct vm_area_struct *vma, + unsigned long start, unsigned long end, + struct list_head *uf); +extern void userfaultfd_unmap_complete(struct mm_struct *mm, + struct list_head *uf); + +extern void userfaultfd_exit(struct mm_struct *mm); + #else /* CONFIG_USERFAULTFD */ /* mm helpers */ @@ -76,6 +98,51 @@ static inline bool userfaultfd_armed(struct vm_area_struct *vma) return false; } +static inline int dup_userfaultfd(struct vm_area_struct *vma, + struct list_head *l) +{ + return 0; +} + +static inline void dup_userfaultfd_complete(struct list_head *l) +{ +} + +static inline void mremap_userfaultfd_prep(struct vm_area_struct *vma, + struct vm_userfaultfd_ctx *ctx) +{ +} + +static inline void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *ctx, + unsigned long from, + unsigned long to, + unsigned long len) +{ +} + +static inline void userfaultfd_remove(struct vm_area_struct *vma, + struct vm_area_struct **prev, + unsigned long start, + unsigned long end) +{ +} + +static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma, + unsigned long start, unsigned long end, + struct list_head *uf) +{ + return 0; +} + +static inline void userfaultfd_unmap_complete(struct mm_struct *mm, + struct list_head *uf) +{ +} + +static inline void userfaultfd_exit(struct mm_struct *mm) +{ +} + #endif /* CONFIG_USERFAULTFD */ #endif /* _LINUX_USERFAULTFD_K_H */ diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 26c155bb639b..8355bab175e1 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -7,6 +7,8 @@ #include <linux/virtio_byteorder.h> #include <uapi/linux/virtio_config.h> +struct irq_affinity; + /** * virtio_config_ops - operations for configuring a virtio device * @get: read the value of a configuration field @@ -56,6 +58,7 @@ * This returns a pointer to the bus name a la pci_name from which * the caller can then copy. * @set_vq_affinity: set the affinity for a virtqueue. + * @get_vq_affinity: get the affinity for a virtqueue (optional). */ typedef void vq_callback_t(struct virtqueue *); struct virtio_config_ops { @@ -68,14 +71,15 @@ struct virtio_config_ops { void (*set_status)(struct virtio_device *vdev, u8 status); void (*reset)(struct virtio_device *vdev); int (*find_vqs)(struct virtio_device *, unsigned nvqs, - struct virtqueue *vqs[], - vq_callback_t *callbacks[], - const char * const names[]); + struct virtqueue *vqs[], vq_callback_t *callbacks[], + const char * const names[], struct irq_affinity *desc); void (*del_vqs)(struct virtio_device *); u64 (*get_features)(struct virtio_device *vdev); int (*finalize_features)(struct virtio_device *vdev); const char *(*bus_name)(struct virtio_device *vdev); int (*set_vq_affinity)(struct virtqueue *vq, int cpu); + const struct cpumask *(*get_vq_affinity)(struct virtio_device *vdev, + int index); }; /* If driver didn't advertise the feature, it will never appear. */ @@ -169,7 +173,7 @@ struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev, vq_callback_t *callbacks[] = { c }; const char *names[] = { n }; struct virtqueue *vq; - int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names); + int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names, NULL); if (err < 0) return ERR_PTR(err); return vq; diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 4d6ec58a8d45..6aa1b6cb5828 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -56,6 +56,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, COMPACTISOLATED, COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS, KCOMPACTD_WAKE, + KCOMPACTD_MIGRATE_SCANNED, KCOMPACTD_FREE_SCANNED, #endif #ifdef CONFIG_HUGETLB_PAGE HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL, diff --git a/include/linux/vmacache.h b/include/linux/vmacache.h index c3fa0fd43949..1081db987391 100644 --- a/include/linux/vmacache.h +++ b/include/linux/vmacache.h @@ -12,7 +12,7 @@ static inline void vmacache_flush(struct task_struct *tsk) { - memset(tsk->vmacache, 0, sizeof(tsk->vmacache)); + memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas)); } extern void vmacache_flush_all(struct mm_struct *mm); diff --git a/include/linux/wait.h b/include/linux/wait.h index 1421132e9086..aacb1282d19a 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -6,6 +6,7 @@ #include <linux/list.h> #include <linux/stddef.h> #include <linux/spinlock.h> + #include <asm/current.h> #include <uapi/linux/wait.h> diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h index 35a4d8185b51..a786e5e8973b 100644 --- a/include/linux/watchdog.h +++ b/include/linux/watchdog.h @@ -117,6 +117,7 @@ struct watchdog_device { #define WDOG_NO_WAY_OUT 1 /* Is 'nowayout' feature set ? */ #define WDOG_STOP_ON_REBOOT 2 /* Should be stopped on reboot */ #define WDOG_HW_RUNNING 3 /* True if HW watchdog running */ +#define WDOG_STOP_ON_UNREGISTER 4 /* Should be stopped on unregister */ struct list_head deferred; }; @@ -151,6 +152,12 @@ static inline void watchdog_stop_on_reboot(struct watchdog_device *wdd) set_bit(WDOG_STOP_ON_REBOOT, &wdd->status); } +/* Use the following function to stop the watchdog when unregistering it */ +static inline void watchdog_stop_on_unregister(struct watchdog_device *wdd) +{ + set_bit(WDOG_STOP_ON_UNREGISTER, &wdd->status); +} + /* Use the following function to check if a timeout value is invalid */ static inline bool watchdog_timeout_invalid(struct watchdog_device *wdd, unsigned int t) { diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index a26cc437293c..bde063cefd04 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -106,9 +106,9 @@ struct work_struct { #endif }; -#define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL) +#define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL) #define WORK_DATA_STATIC_INIT() \ - ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC) + ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC)) struct delayed_work { struct work_struct work; diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 5527d910ba3d..a3c0cbd7c888 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -46,7 +46,7 @@ enum writeback_sync_modes { */ enum wb_reason { WB_REASON_BACKGROUND, - WB_REASON_TRY_TO_FREE_PAGES, + WB_REASON_VMSCAN, WB_REASON_SYNC, WB_REASON_PERIODIC, WB_REASON_LAPTOP_TIMER, diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h index e1006b391cdc..bee1404391dd 100644 --- a/include/media/v4l2-ctrls.h +++ b/include/media/v4l2-ctrls.h @@ -174,10 +174,10 @@ typedef void (*v4l2_ctrl_notify_fnc)(struct v4l2_ctrl *ctrl, void *priv); * not freed when the control is deleted. Should this be needed * then a new internal bitfield can be added to tell the framework * to free this pointer. - * @p_cur: The control's current value represented via an union with + * @p_cur: The control's current value represented via a union with * provides a standard way of accessing control types * through a pointer. - * @p_new: The control's new value represented via an union with provides + * @p_new: The control's new value represented via a union with provides * a standard way of accessing control types * through a pointer. */ diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h index 574ff2ae94be..6cd94e5ee113 100644 --- a/include/media/v4l2-ioctl.h +++ b/include/media/v4l2-ioctl.h @@ -12,6 +12,7 @@ #include <linux/poll.h> #include <linux/fs.h> #include <linux/mutex.h> +#include <linux/sched/signal.h> #include <linux/compiler.h> /* need __user */ #include <linux/videodev2.h> diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h index 27dfe85772b1..b8eb51a661e5 100644 --- a/include/net/9p/9p.h +++ b/include/net/9p/9p.h @@ -402,10 +402,10 @@ struct p9_wstat { u32 atime; u32 mtime; u64 length; - char *name; - char *uid; - char *gid; - char *muid; + const char *name; + const char *uid; + const char *gid; + const char *muid; char *extension; /* 9p2000.u extensions */ kuid_t n_uid; /* 9p2000.u extensions */ kgid_t n_gid; /* 9p2000.u extensions */ diff --git a/include/net/9p/client.h b/include/net/9p/client.h index c6b97e58cf84..b582339ccef5 100644 --- a/include/net/9p/client.h +++ b/include/net/9p/client.h @@ -223,16 +223,16 @@ void p9_client_destroy(struct p9_client *clnt); void p9_client_disconnect(struct p9_client *clnt); void p9_client_begin_disconnect(struct p9_client *clnt); struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid, - char *uname, kuid_t n_uname, char *aname); + const char *uname, kuid_t n_uname, const char *aname); struct p9_fid *p9_client_walk(struct p9_fid *oldfid, uint16_t nwname, - char **wnames, int clone); + const unsigned char * const *wnames, int clone); int p9_client_open(struct p9_fid *fid, int mode); -int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode, +int p9_client_fcreate(struct p9_fid *fid, const char *name, u32 perm, int mode, char *extension); -int p9_client_link(struct p9_fid *fid, struct p9_fid *oldfid, char *newname); -int p9_client_symlink(struct p9_fid *fid, char *name, char *symname, kgid_t gid, - struct p9_qid *qid); -int p9_client_create_dotl(struct p9_fid *ofid, char *name, u32 flags, u32 mode, +int p9_client_link(struct p9_fid *fid, struct p9_fid *oldfid, const char *newname); +int p9_client_symlink(struct p9_fid *fid, const char *name, const char *symname, + kgid_t gid, struct p9_qid *qid); +int p9_client_create_dotl(struct p9_fid *ofid, const char *name, u32 flags, u32 mode, kgid_t gid, struct p9_qid *qid); int p9_client_clunk(struct p9_fid *fid); int p9_client_fsync(struct p9_fid *fid, int datasync); @@ -250,9 +250,9 @@ int p9_client_setattr(struct p9_fid *fid, struct p9_iattr_dotl *attr); struct p9_stat_dotl *p9_client_getattr_dotl(struct p9_fid *fid, u64 request_mask); -int p9_client_mknod_dotl(struct p9_fid *oldfid, char *name, int mode, +int p9_client_mknod_dotl(struct p9_fid *oldfid, const char *name, int mode, dev_t rdev, kgid_t gid, struct p9_qid *); -int p9_client_mkdir_dotl(struct p9_fid *fid, char *name, int mode, +int p9_client_mkdir_dotl(struct p9_fid *fid, const char *name, int mode, kgid_t gid, struct p9_qid *); int p9_client_lock_dotl(struct p9_fid *fid, struct p9_flock *flock, u8 *status); int p9_client_getlock_dotl(struct p9_fid *fid, struct p9_getlock *fl); diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 90708f68cc02..95ccc1eef558 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -26,6 +26,8 @@ #define __HCI_CORE_H #include <linux/leds.h> +#include <linux/rculist.h> + #include <net/bluetooth/hci.h> #include <net/bluetooth/hci_sock.h> diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h index b8d637225a07..c0452de83086 100644 --- a/include/net/busy_poll.h +++ b/include/net/busy_poll.h @@ -25,6 +25,8 @@ #define _LINUX_NET_BUSY_POLL_H #include <linux/netdevice.h> +#include <linux/sched/clock.h> +#include <linux/sched/signal.h> #include <net/ip.h> #ifdef CONFIG_NET_RX_BUSY_POLL diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index c92dc03c8528..ead1aa6d003e 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1948,7 +1948,7 @@ struct cfg80211_deauth_request { * struct cfg80211_disassoc_request - Disassociation request data * * This structure provides information needed to complete IEEE 802.11 - * disassocation. + * disassociation. * * @bss: the BSS to disassociate from * @ie: Extra IEs to add to Disassociation frame or %NULL diff --git a/include/net/mac80211.h b/include/net/mac80211.h index b9a08cd1d97d..a3bab3c5ecfb 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -3392,7 +3392,7 @@ enum ieee80211_reconfig_type { * since there won't be any time to beacon before the switch anyway. * @pre_channel_switch: This is an optional callback that is called * before a channel switch procedure is started (ie. when a STA - * gets a CSA or an userspace initiated channel-switch), allowing + * gets a CSA or a userspace initiated channel-switch), allowing * the driver to prepare for the channel switch. * @post_channel_switch: This is an optional callback that is called * after a channel switch procedure is completed, allowing the diff --git a/include/net/scm.h b/include/net/scm.h index 59fa93c01d2a..142ea9e7a6d0 100644 --- a/include/net/scm.h +++ b/include/net/scm.h @@ -3,6 +3,7 @@ #include <linux/limits.h> #include <linux/net.h> +#include <linux/cred.h> #include <linux/security.h> #include <linux/pid.h> #include <linux/nsproxy.h> diff --git a/include/rdma/ib.h b/include/rdma/ib.h index a6b93706b0fc..9b4c22a36931 100644 --- a/include/rdma/ib.h +++ b/include/rdma/ib.h @@ -35,6 +35,7 @@ #include <linux/types.h> #include <linux/sched.h> +#include <linux/cred.h> struct ib_addr { union { diff --git a/include/rdma/ib_cache.h b/include/rdma/ib_cache.h index e30f19bd4a41..385ec88ee9e5 100644 --- a/include/rdma/ib_cache.h +++ b/include/rdma/ib_cache.h @@ -165,4 +165,17 @@ int ib_get_cached_lmc(struct ib_device *device, u8 port_num, u8 *lmc); +/** + * ib_get_cached_port_state - Returns a cached port state table entry + * @device: The device to query. + * @port_num: The port number of the device to query. + * @port_state: port_state for the specified port for that device. + * + * ib_get_cached_port_state() fetches the specified port_state table entry stored in + * the local software cache. + */ +int ib_get_cached_port_state(struct ib_device *device, + u8 port_num, + enum ib_port_state *port_active); + #endif /* _IB_CACHE_H */ diff --git a/include/rdma/ib_hdrs.h b/include/rdma/ib_hdrs.h index 408439fe911e..c755325f0831 100644 --- a/include/rdma/ib_hdrs.h +++ b/include/rdma/ib_hdrs.h @@ -75,6 +75,12 @@ #define IB_GRH_FLOW_SHIFT 0 #define IB_GRH_NEXT_HDR 0x1B +#define IB_AETH_CREDIT_SHIFT 24 +#define IB_AETH_CREDIT_MASK 0x1F +#define IB_AETH_CREDIT_INVAL 0x1F +#define IB_AETH_NAK_SHIFT 29 +#define IB_MSN_MASK 0xFFFFFF + struct ib_reth { __be64 vaddr; /* potentially unaligned */ __be32 rkey; diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h index 5ee7aab95eb8..fd0e53219f93 100644 --- a/include/rdma/ib_sa.h +++ b/include/rdma/ib_sa.h @@ -153,12 +153,12 @@ struct ib_sa_path_rec { union ib_gid sgid; __be16 dlid; __be16 slid; - int raw_traffic; + u8 raw_traffic; /* reserved */ __be32 flow_label; u8 hop_limit; u8 traffic_class; - int reversible; + u8 reversible; u8 numb_path; __be16 pkey; __be16 qos_class; @@ -220,7 +220,7 @@ struct ib_sa_mcmember_rec { u8 hop_limit; u8 scope; u8 join_state; - int proxy_join; + u8 proxy_join; }; /* Service Record Component Mask Sec 15.2.5.14 Ver 1.1 */ diff --git a/include/rdma/ib_umem_odp.h b/include/rdma/ib_umem_odp.h index 3da0b167041b..542cd8b3414c 100644 --- a/include/rdma/ib_umem_odp.h +++ b/include/rdma/ib_umem_odp.h @@ -79,11 +79,15 @@ struct ib_umem_odp { struct completion notifier_completion; int dying; + struct work_struct work; }; #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem); +struct ib_umem *ib_alloc_odp_umem(struct ib_ucontext *context, + unsigned long addr, + size_t size); void ib_umem_odp_release(struct ib_umem *umem); @@ -117,10 +121,12 @@ typedef int (*umem_call_back)(struct ib_umem *item, u64 start, u64 end, int rbt_ib_umem_for_each_in_range(struct rb_root *root, u64 start, u64 end, umem_call_back cb, void *cookie); -struct umem_odp_node *rbt_ib_umem_iter_first(struct rb_root *root, - u64 start, u64 last); -struct umem_odp_node *rbt_ib_umem_iter_next(struct umem_odp_node *node, - u64 start, u64 last); +/* + * Find first region intersecting with address range. + * Return NULL if not found + */ +struct ib_umem_odp *rbt_ib_umem_lookup(struct rb_root *root, + u64 addr, u64 length); static inline int ib_umem_mmu_notifier_retry(struct ib_umem *item, unsigned long mmu_seq) @@ -153,6 +159,13 @@ static inline int ib_umem_odp_get(struct ib_ucontext *context, return -EINVAL; } +static inline struct ib_umem *ib_alloc_odp_umem(struct ib_ucontext *context, + unsigned long addr, + size_t size) +{ + return ERR_PTR(-EINVAL); +} + static inline void ib_umem_odp_release(struct ib_umem *umem) {} #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index b567e4452a47..0f1813c13687 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -60,6 +60,7 @@ #include <linux/atomic.h> #include <linux/mmu_notifier.h> #include <linux/uaccess.h> +#include <linux/cgroup_rdma.h> extern struct workqueue_struct *ib_wq; extern struct workqueue_struct *ib_comp_wq; @@ -207,6 +208,7 @@ enum ib_device_cap_flags { IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23), IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24), IB_DEVICE_RC_IP_CSUM = (1 << 25), + /* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */ IB_DEVICE_RAW_IP_CSUM = (1 << 26), /* * Devices should set IB_DEVICE_CROSS_CHANNEL if they @@ -220,6 +222,7 @@ enum ib_device_cap_flags { IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31), IB_DEVICE_SG_GAPS_REG = (1ULL << 32), IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33), + /* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */ IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34), }; @@ -241,7 +244,8 @@ enum ib_atomic_cap { }; enum ib_odp_general_cap_bits { - IB_ODP_SUPPORT = 1 << 0, + IB_ODP_SUPPORT = 1 << 0, + IB_ODP_SUPPORT_IMPLICIT = 1 << 1, }; enum ib_odp_transport_cap_bits { @@ -330,6 +334,7 @@ struct ib_device_attr { uint64_t hca_core_clock; /* in KHZ */ struct ib_rss_caps rss_caps; u32 max_wq_type_rq; + u32 raw_packet_caps; /* Use ib_raw_packet_caps enum */ }; enum ib_mtu { @@ -499,6 +504,8 @@ static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct( #define RDMA_CORE_CAP_PROT_ROCE 0x00200000 #define RDMA_CORE_CAP_PROT_IWARP 0x00400000 #define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000 +#define RDMA_CORE_CAP_PROT_RAW_PACKET 0x01000000 +#define RDMA_CORE_CAP_PROT_USNIC 0x02000000 #define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \ | RDMA_CORE_CAP_IB_MAD \ @@ -522,6 +529,10 @@ static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct( #define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \ | RDMA_CORE_CAP_OPA_MAD) +#define RDMA_CORE_PORT_RAW_PACKET (RDMA_CORE_CAP_PROT_RAW_PACKET) + +#define RDMA_CORE_PORT_USNIC (RDMA_CORE_CAP_PROT_USNIC) + struct ib_port_attr { u64 subnet_prefix; enum ib_port_state state; @@ -1019,6 +1030,7 @@ enum ib_qp_create_flags { IB_QP_CREATE_SIGNATURE_EN = 1 << 6, IB_QP_CREATE_USE_GFP_NOIO = 1 << 7, IB_QP_CREATE_SCATTER_FCS = 1 << 8, + IB_QP_CREATE_CVLAN_STRIPPING = 1 << 9, /* reserve bits 26-31 for low level drivers' internal use */ IB_QP_CREATE_RESERVED_START = 1 << 26, IB_QP_CREATE_RESERVED_END = 1 << 31, @@ -1345,6 +1357,12 @@ struct ib_fmr_attr { struct ib_umem; +struct ib_rdmacg_object { +#ifdef CONFIG_CGROUP_RDMA + struct rdma_cgroup *cg; /* owner rdma cgroup */ +#endif +}; + struct ib_ucontext { struct ib_device *device; struct list_head pd_list; @@ -1377,6 +1395,8 @@ struct ib_ucontext { struct list_head no_private_counters; int odp_mrs_count; #endif + + struct ib_rdmacg_object cg_obj; }; struct ib_uobject { @@ -1384,6 +1404,7 @@ struct ib_uobject { struct ib_ucontext *context; /* associated user context */ void *object; /* containing object */ struct list_head list; /* link to context's list */ + struct ib_rdmacg_object cg_obj; /* rdmacg object */ int id; /* index into kernel idr */ struct kref ref; struct rw_semaphore mutex; /* protects .live */ @@ -1470,6 +1491,18 @@ struct ib_srq { } ext; }; +enum ib_raw_packet_caps { + /* Strip cvlan from incoming packet and report it in the matching work + * completion is supported. + */ + IB_RAW_PACKET_CAP_CVLAN_STRIPPING = (1 << 0), + /* Scatter FCS field of an incoming packet to host memory is supported. + */ + IB_RAW_PACKET_CAP_SCATTER_FCS = (1 << 1), + /* Checksum offloads are supported (for both send and receive). */ + IB_RAW_PACKET_CAP_IP_CSUM = (1 << 2), +}; + enum ib_wq_type { IB_WQT_RQ }; @@ -1493,6 +1526,11 @@ struct ib_wq { atomic_t usecnt; }; +enum ib_wq_flags { + IB_WQ_FLAGS_CVLAN_STRIPPING = 1 << 0, + IB_WQ_FLAGS_SCATTER_FCS = 1 << 1, +}; + struct ib_wq_init_attr { void *wq_context; enum ib_wq_type wq_type; @@ -1500,16 +1538,20 @@ struct ib_wq_init_attr { u32 max_sge; struct ib_cq *cq; void (*event_handler)(struct ib_event *, void *); + u32 create_flags; /* Use enum ib_wq_flags */ }; enum ib_wq_attr_mask { - IB_WQ_STATE = 1 << 0, - IB_WQ_CUR_STATE = 1 << 1, + IB_WQ_STATE = 1 << 0, + IB_WQ_CUR_STATE = 1 << 1, + IB_WQ_FLAGS = 1 << 2, }; struct ib_wq_attr { enum ib_wq_state wq_state; enum ib_wq_state curr_wq_state; + u32 flags; /* Use enum ib_wq_flags */ + u32 flags_mask; /* Use enum ib_wq_flags */ }; struct ib_rwq_ind_table { @@ -1618,6 +1660,8 @@ enum ib_flow_spec_type { IB_FLOW_SPEC_UDP = 0x41, IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50, IB_FLOW_SPEC_INNER = 0x100, + /* Actions */ + IB_FLOW_SPEC_ACTION_TAG = 0x1000, }; #define IB_FLOW_SPEC_LAYER_MASK 0xF0 #define IB_FLOW_SPEC_SUPPORT_LAYERS 8 @@ -1740,6 +1784,12 @@ struct ib_flow_spec_tunnel { struct ib_flow_tunnel_filter mask; }; +struct ib_flow_spec_action_tag { + enum ib_flow_spec_type type; + u16 size; + u32 tag_id; +}; + union ib_flow_spec { struct { u32 type; @@ -1751,6 +1801,7 @@ union ib_flow_spec { struct ib_flow_spec_tcp_udp tcp_udp; struct ib_flow_spec_ipv6 ipv6; struct ib_flow_spec_tunnel tunnel; + struct ib_flow_spec_action_tag flow_tag; }; struct ib_flow_attr { @@ -1789,59 +1840,17 @@ enum ib_mad_result { #define IB_DEVICE_NAME_MAX 64 +struct ib_port_cache { + struct ib_pkey_cache *pkey; + struct ib_gid_table *gid; + u8 lmc; + enum ib_port_state port_state; +}; + struct ib_cache { rwlock_t lock; struct ib_event_handler event_handler; - struct ib_pkey_cache **pkey_cache; - struct ib_gid_table **gid_cache; - u8 *lmc_cache; -}; - -struct ib_dma_mapping_ops { - int (*mapping_error)(struct ib_device *dev, - u64 dma_addr); - u64 (*map_single)(struct ib_device *dev, - void *ptr, size_t size, - enum dma_data_direction direction); - void (*unmap_single)(struct ib_device *dev, - u64 addr, size_t size, - enum dma_data_direction direction); - u64 (*map_page)(struct ib_device *dev, - struct page *page, unsigned long offset, - size_t size, - enum dma_data_direction direction); - void (*unmap_page)(struct ib_device *dev, - u64 addr, size_t size, - enum dma_data_direction direction); - int (*map_sg)(struct ib_device *dev, - struct scatterlist *sg, int nents, - enum dma_data_direction direction); - void (*unmap_sg)(struct ib_device *dev, - struct scatterlist *sg, int nents, - enum dma_data_direction direction); - int (*map_sg_attrs)(struct ib_device *dev, - struct scatterlist *sg, int nents, - enum dma_data_direction direction, - unsigned long attrs); - void (*unmap_sg_attrs)(struct ib_device *dev, - struct scatterlist *sg, int nents, - enum dma_data_direction direction, - unsigned long attrs); - void (*sync_single_for_cpu)(struct ib_device *dev, - u64 dma_handle, - size_t size, - enum dma_data_direction dir); - void (*sync_single_for_device)(struct ib_device *dev, - u64 dma_handle, - size_t size, - enum dma_data_direction dir); - void *(*alloc_coherent)(struct ib_device *dev, - size_t size, - u64 *dma_handle, - gfp_t flag); - void (*free_coherent)(struct ib_device *dev, - size_t size, void *cpu_addr, - u64 dma_handle); + struct ib_port_cache *ports; }; struct iw_cm_verbs; @@ -1854,8 +1863,6 @@ struct ib_port_immutable { }; struct ib_device { - struct device *dma_device; - char name[IB_DEVICE_NAME_MAX]; struct list_head event_handler_list; @@ -2105,7 +2112,6 @@ struct ib_device { struct ib_rwq_ind_table_init_attr *init_attr, struct ib_udata *udata); int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table); - struct ib_dma_mapping_ops *dma_ops; struct module *owner; struct device dev; @@ -2132,6 +2138,10 @@ struct ib_device { struct attribute_group *hw_stats_ag; struct rdma_hw_stats *hw_stats; +#ifdef CONFIG_CGROUP_RDMA + struct rdmacg_device cg_device; +#endif + /** * The following mandatory functions are used only at device * registration. Keep functions such as these at the end of this @@ -2289,6 +2299,13 @@ static inline u8 rdma_end_port(const struct ib_device *device) return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt; } +static inline int rdma_is_port_valid(const struct ib_device *device, + unsigned int port) +{ + return (port >= rdma_start_port(device) && + port <= rdma_end_port(device)); +} + static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num) { return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB; @@ -2321,6 +2338,16 @@ static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num) rdma_protocol_roce(device, port_num); } +static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num) +{ + return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_RAW_PACKET; +} + +static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num) +{ + return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_USNIC; +} + /** * rdma_cap_ib_mad - Check if the port of a device supports Infiniband * Management Datagrams. @@ -2980,9 +3007,7 @@ static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt) */ static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) { - if (dev->dma_ops) - return dev->dma_ops->mapping_error(dev, dma_addr); - return dma_mapping_error(dev->dma_device, dma_addr); + return dma_mapping_error(&dev->dev, dma_addr); } /** @@ -2996,9 +3021,7 @@ static inline u64 ib_dma_map_single(struct ib_device *dev, void *cpu_addr, size_t size, enum dma_data_direction direction) { - if (dev->dma_ops) - return dev->dma_ops->map_single(dev, cpu_addr, size, direction); - return dma_map_single(dev->dma_device, cpu_addr, size, direction); + return dma_map_single(&dev->dev, cpu_addr, size, direction); } /** @@ -3012,28 +3035,7 @@ static inline void ib_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size, enum dma_data_direction direction) { - if (dev->dma_ops) - dev->dma_ops->unmap_single(dev, addr, size, direction); - else - dma_unmap_single(dev->dma_device, addr, size, direction); -} - -static inline u64 ib_dma_map_single_attrs(struct ib_device *dev, - void *cpu_addr, size_t size, - enum dma_data_direction direction, - unsigned long dma_attrs) -{ - return dma_map_single_attrs(dev->dma_device, cpu_addr, size, - direction, dma_attrs); -} - -static inline void ib_dma_unmap_single_attrs(struct ib_device *dev, - u64 addr, size_t size, - enum dma_data_direction direction, - unsigned long dma_attrs) -{ - return dma_unmap_single_attrs(dev->dma_device, addr, size, - direction, dma_attrs); + dma_unmap_single(&dev->dev, addr, size, direction); } /** @@ -3050,9 +3052,7 @@ static inline u64 ib_dma_map_page(struct ib_device *dev, size_t size, enum dma_data_direction direction) { - if (dev->dma_ops) - return dev->dma_ops->map_page(dev, page, offset, size, direction); - return dma_map_page(dev->dma_device, page, offset, size, direction); + return dma_map_page(&dev->dev, page, offset, size, direction); } /** @@ -3066,10 +3066,7 @@ static inline void ib_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size, enum dma_data_direction direction) { - if (dev->dma_ops) - dev->dma_ops->unmap_page(dev, addr, size, direction); - else - dma_unmap_page(dev->dma_device, addr, size, direction); + dma_unmap_page(&dev->dev, addr, size, direction); } /** @@ -3083,9 +3080,7 @@ static inline int ib_dma_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction) { - if (dev->dma_ops) - return dev->dma_ops->map_sg(dev, sg, nents, direction); - return dma_map_sg(dev->dma_device, sg, nents, direction); + return dma_map_sg(&dev->dev, sg, nents, direction); } /** @@ -3099,10 +3094,7 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction) { - if (dev->dma_ops) - dev->dma_ops->unmap_sg(dev, sg, nents, direction); - else - dma_unmap_sg(dev->dma_device, sg, nents, direction); + dma_unmap_sg(&dev->dev, sg, nents, direction); } static inline int ib_dma_map_sg_attrs(struct ib_device *dev, @@ -3110,12 +3102,7 @@ static inline int ib_dma_map_sg_attrs(struct ib_device *dev, enum dma_data_direction direction, unsigned long dma_attrs) { - if (dev->dma_ops) - return dev->dma_ops->map_sg_attrs(dev, sg, nents, direction, - dma_attrs); - else - return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, - dma_attrs); + return dma_map_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs); } static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, @@ -3123,12 +3110,7 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, enum dma_data_direction direction, unsigned long dma_attrs) { - if (dev->dma_ops) - return dev->dma_ops->unmap_sg_attrs(dev, sg, nents, direction, - dma_attrs); - else - dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, - dma_attrs); + dma_unmap_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs); } /** * ib_sg_dma_address - Return the DMA address from a scatter/gather entry @@ -3170,10 +3152,7 @@ static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev, size_t size, enum dma_data_direction dir) { - if (dev->dma_ops) - dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir); - else - dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); + dma_sync_single_for_cpu(&dev->dev, addr, size, dir); } /** @@ -3188,10 +3167,7 @@ static inline void ib_dma_sync_single_for_device(struct ib_device *dev, size_t size, enum dma_data_direction dir) { - if (dev->dma_ops) - dev->dma_ops->sync_single_for_device(dev, addr, size, dir); - else - dma_sync_single_for_device(dev->dma_device, addr, size, dir); + dma_sync_single_for_device(&dev->dev, addr, size, dir); } /** @@ -3203,19 +3179,10 @@ static inline void ib_dma_sync_single_for_device(struct ib_device *dev, */ static inline void *ib_dma_alloc_coherent(struct ib_device *dev, size_t size, - u64 *dma_handle, + dma_addr_t *dma_handle, gfp_t flag) { - if (dev->dma_ops) - return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag); - else { - dma_addr_t handle; - void *ret; - - ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag); - *dma_handle = handle; - return ret; - } + return dma_alloc_coherent(&dev->dev, size, dma_handle, flag); } /** @@ -3227,12 +3194,9 @@ static inline void *ib_dma_alloc_coherent(struct ib_device *dev, */ static inline void ib_dma_free_coherent(struct ib_device *dev, size_t size, void *cpu_addr, - u64 dma_handle) + dma_addr_t dma_handle) { - if (dev->dma_ops) - dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); - else - dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle); + dma_free_coherent(&dev->dev, size, cpu_addr, dma_handle); } /** diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h index 861e23eaebda..8fc1ca7b6f23 100644 --- a/include/rdma/rdma_vt.h +++ b/include/rdma/rdma_vt.h @@ -164,7 +164,7 @@ struct rvt_driver_params { /* Protection domain */ struct rvt_pd { struct ib_pd ibpd; - int user; /* non-zero if created from user space */ + bool user; }; /* Address handle */ @@ -335,6 +335,8 @@ struct rvt_driver_provided { /* Notify driver a mad agent has been removed */ void (*notify_free_mad_agent)(struct rvt_dev_info *rdi, int port_idx); + /* Notify driver to restart rc */ + void (*notify_restart_rc)(struct rvt_qp *qp, u32 psn, int wait); }; struct rvt_dev_info { @@ -483,6 +485,23 @@ static inline struct rvt_qp *rvt_lookup_qpn(struct rvt_dev_info *rdi, return qp; } +/** + * rvt_mod_retry_timer - mod a retry timer + * @qp - the QP + * Modify a potentially already running retry timer + */ +static inline void rvt_mod_retry_timer(struct rvt_qp *qp) +{ + struct ib_qp *ibqp = &qp->ibqp; + struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); + + lockdep_assert_held(&qp->s_lock); + qp->s_flags |= RVT_S_TIMER; + /* 4.096 usec. * (1 << qp->timeout) */ + mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies + + rdi->busy_jiffies); +} + struct rvt_dev_info *rvt_alloc_device(size_t size, int nports); void rvt_dealloc_device(struct rvt_dev_info *rdi); int rvt_register_device(struct rvt_dev_info *rvd); diff --git a/include/rdma/rdmavt_mr.h b/include/rdma/rdmavt_mr.h index de59de28b6a2..f418bd5571a5 100644 --- a/include/rdma/rdmavt_mr.h +++ b/include/rdma/rdmavt_mr.h @@ -52,6 +52,7 @@ * For Memory Regions. This stuff should probably be moved into rdmavt/mr.h once * drivers no longer need access to the MR directly. */ +#include <linux/percpu-refcount.h> /* * A segment is a linear region of low physical memory. @@ -79,11 +80,11 @@ struct rvt_mregion { int access_flags; u32 max_segs; /* number of rvt_segs in all the arrays */ u32 mapsz; /* size of the map array */ + atomic_t lkey_invalid; /* true if current lkey is invalid */ u8 page_shift; /* 0 - non unform/non powerof2 sizes */ u8 lkey_published; /* in global table */ - atomic_t lkey_invalid; /* true if current lkey is invalid */ + struct percpu_ref refcount; struct completion comp; /* complete when refcount goes to zero */ - atomic_t refcount; struct rvt_segarray *map[0]; /* the segments */ }; @@ -123,13 +124,12 @@ struct rvt_sge_state { static inline void rvt_put_mr(struct rvt_mregion *mr) { - if (unlikely(atomic_dec_and_test(&mr->refcount))) - complete(&mr->comp); + percpu_ref_put(&mr->refcount); } static inline void rvt_get_mr(struct rvt_mregion *mr) { - atomic_inc(&mr->refcount); + percpu_ref_get(&mr->refcount); } static inline void rvt_put_ss(struct rvt_sge_state *ss) @@ -141,4 +141,54 @@ static inline void rvt_put_ss(struct rvt_sge_state *ss) } } +static inline u32 rvt_get_sge_length(struct rvt_sge *sge, u32 length) +{ + u32 len = sge->length; + + if (len > length) + len = length; + if (len > sge->sge_length) + len = sge->sge_length; + + return len; +} + +static inline void rvt_update_sge(struct rvt_sge_state *ss, u32 length, + bool release) +{ + struct rvt_sge *sge = &ss->sge; + + sge->vaddr += length; + sge->length -= length; + sge->sge_length -= length; + if (sge->sge_length == 0) { + if (release) + rvt_put_mr(sge->mr); + if (--ss->num_sge) + *sge = *ss->sg_list++; + } else if (sge->length == 0 && sge->mr->lkey) { + if (++sge->n >= RVT_SEGSZ) { + if (++sge->m >= sge->mr->mapsz) + return; + sge->n = 0; + } + sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; + sge->length = sge->mr->map[sge->m]->segs[sge->n].length; + } +} + +static inline void rvt_skip_sge(struct rvt_sge_state *ss, u32 length, + bool release) +{ + struct rvt_sge *sge = &ss->sge; + + while (length) { + u32 len = rvt_get_sge_length(sge, length); + + WARN_ON_ONCE(len == 0); + rvt_update_sge(ss, len, release); + length -= len; + } +} + #endif /* DEF_RDMAVT_INCMRH */ diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h index f3dbd157ae5c..f3816396c76a 100644 --- a/include/rdma/rdmavt_qp.h +++ b/include/rdma/rdmavt_qp.h @@ -144,6 +144,8 @@ #define RVT_FLUSH_RECV 0x40 #define RVT_PROCESS_OR_FLUSH_SEND \ (RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND) +#define RVT_SEND_OR_FLUSH_OR_RECV_OK \ + (RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND | RVT_PROCESS_RECV_OK) /* * Internal send flags @@ -370,6 +372,7 @@ struct rvt_qp { struct rvt_sge_state s_ack_rdma_sge; struct timer_list s_timer; + struct hrtimer s_rnr_timer; atomic_t local_ops_pending; /* number of fast_reg/local_inv reqs */ @@ -467,6 +470,15 @@ static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n) } /** + * rvt_is_user_qp - return if this is user mode QP + * @qp - the target QP + */ +static inline bool rvt_is_user_qp(struct rvt_qp *qp) +{ + return !!qp->pid; +} + +/** * rvt_get_qp - get a QP reference * @qp - the QP to hold */ @@ -582,6 +594,32 @@ static inline void rvt_qp_swqe_complete( } } +/* + * Compare the lower 24 bits of the msn values. + * Returns an integer <, ==, or > than zero. + */ +static inline int rvt_cmp_msn(u32 a, u32 b) +{ + return (((int)a) - ((int)b)) << 8; +} + +/** + * rvt_compute_aeth - compute the AETH (syndrome + MSN) + * @qp: the queue pair to compute the AETH for + * + * Returns the AETH. + */ +__be32 rvt_compute_aeth(struct rvt_qp *qp); + +/** + * rvt_get_credit - flush the send work queue of a QP + * @qp: the qp who's send work queue to flush + * @aeth: the Acknowledge Extended Transport Header + * + * The QP s_lock should be held. + */ +void rvt_get_credit(struct rvt_qp *qp, u32 aeth); + /** * @qp - the qp pair * @len - the length @@ -607,6 +645,14 @@ static inline u32 rvt_div_mtu(struct rvt_qp *qp, u32 len) extern const int ib_rvt_state_ops[]; struct rvt_dev_info; +void rvt_comm_est(struct rvt_qp *qp); int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err); +void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err); +unsigned long rvt_rnr_tbl_to_usec(u32 index); +enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t); +void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth); +void rvt_del_timers_sync(struct rvt_qp *qp); +void rvt_stop_rc_timers(struct rvt_qp *qp); +void rvt_add_retry_timer(struct rvt_qp *qp); #endif /* DEF_RDMAVT_INCQP_H */ diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 8990e580b278..6f22b39f1b0c 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -315,6 +315,7 @@ extern void scsi_remove_device(struct scsi_device *); extern int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh); void scsi_attach_vpd(struct scsi_device *sdev); +extern struct scsi_device *scsi_device_from_queue(struct request_queue *q); extern int scsi_device_get(struct scsi_device *); extern void scsi_device_put(struct scsi_device *); extern struct scsi_device *scsi_device_lookup(struct Scsi_Host *, @@ -409,19 +410,16 @@ extern int scsi_is_target_device(const struct device *); extern void scsi_sanitize_inquiry_string(unsigned char *s, int len); extern int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, int data_direction, void *buffer, unsigned bufflen, - unsigned char *sense, int timeout, int retries, - u64 flags, int *resid); -extern int scsi_execute_req_flags(struct scsi_device *sdev, - const unsigned char *cmd, int data_direction, void *buffer, - unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout, - int retries, int *resid, u64 flags, req_flags_t rq_flags); + unsigned char *sense, struct scsi_sense_hdr *sshdr, + int timeout, int retries, u64 flags, + req_flags_t rq_flags, int *resid); static inline int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd, int data_direction, void *buffer, unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout, int retries, int *resid) { - return scsi_execute_req_flags(sdev, cmd, data_direction, buffer, - bufflen, sshdr, timeout, retries, resid, 0, 0); + return scsi_execute(sdev, cmd, data_direction, buffer, + bufflen, NULL, sshdr, timeout, retries, 0, 0, resid); } extern void sdev_disable_disk_events(struct scsi_device *sdev); extern void sdev_enable_disk_events(struct scsi_device *sdev); diff --git a/include/sound/control.h b/include/sound/control.h index 21d047f229a1..bd7246de58e7 100644 --- a/include/sound/control.h +++ b/include/sound/control.h @@ -22,6 +22,7 @@ * */ +#include <linux/wait.h> #include <sound/asound.h> #define snd_kcontrol_chip(kcontrol) ((kcontrol)->private_data) diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h index 1c8f9e1ef2a5..67be2445941a 100644 --- a/include/sound/dmaengine_pcm.h +++ b/include/sound/dmaengine_pcm.h @@ -71,6 +71,7 @@ struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream) * @slave_id: Slave requester id for the DMA channel. * @filter_data: Custom DMA channel filter data, this will usually be used when * requesting the DMA channel. + * @chan_name: Custom channel name to use when requesting DMA channel. * @fifo_size: FIFO size of the DAI controller in bytes * @flags: PCM_DAI flags, only SND_DMAENGINE_PCM_DAI_FLAG_PACK for now */ @@ -80,6 +81,7 @@ struct snd_dmaengine_dai_dma_data { u32 maxburst; unsigned int slave_id; void *filter_data; + const char *chan_name; unsigned int fifo_size; unsigned int flags; }; @@ -105,6 +107,10 @@ void snd_dmaengine_pcm_set_config_from_dai_data( * playback. */ #define SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX BIT(3) +/* + * The PCM streams have custom channel names specified. + */ +#define SND_DMAENGINE_PCM_FLAG_CUSTOM_CHANNEL_NAME BIT(4) /** * struct snd_dmaengine_pcm_config - Configuration data for dmaengine based PCM diff --git a/include/sound/pcm.h b/include/sound/pcm.h index af1fb37c6b26..361749e60799 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h @@ -570,6 +570,15 @@ int snd_pcm_stop_xrun(struct snd_pcm_substream *substream); #ifdef CONFIG_PM int snd_pcm_suspend(struct snd_pcm_substream *substream); int snd_pcm_suspend_all(struct snd_pcm *pcm); +#else +static inline int snd_pcm_suspend(struct snd_pcm_substream *substream) +{ + return 0; +} +static inline int snd_pcm_suspend_all(struct snd_pcm *pcm) +{ + return 0; +} #endif int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream, unsigned int cmd, void *arg); int snd_pcm_open_substream(struct snd_pcm *pcm, int stream, struct file *file, diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h index f730b91e472f..492a3ca7f17b 100644 --- a/include/sound/rawmidi.h +++ b/include/sound/rawmidi.h @@ -103,7 +103,7 @@ struct snd_rawmidi_substream { struct snd_rawmidi_runtime *runtime; struct pid *pid; /* hardware layer */ - struct snd_rawmidi_ops *ops; + const struct snd_rawmidi_ops *ops; }; struct snd_rawmidi_file { @@ -155,7 +155,7 @@ int snd_rawmidi_new(struct snd_card *card, char *id, int device, int output_count, int input_count, struct snd_rawmidi **rmidi); void snd_rawmidi_set_ops(struct snd_rawmidi *rmidi, int stream, - struct snd_rawmidi_ops *ops); + const struct snd_rawmidi_ops *ops); /* callbacks */ diff --git a/include/sound/rt5665.h b/include/sound/rt5665.h index 963229e71dc7..963229e71dc7 100755..100644 --- a/include/sound/rt5665.h +++ b/include/sound/rt5665.h diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h index 64e90ca9ad32..af58d2362975 100644 --- a/include/sound/simple_card_utils.h +++ b/include/sound/simple_card_utils.h @@ -34,11 +34,12 @@ int asoc_simple_card_set_dailink_name(struct device *dev, int asoc_simple_card_parse_card_name(struct snd_soc_card *card, char *prefix); -#define asoc_simple_card_parse_clk_cpu(node, dai_link, simple_dai) \ - asoc_simple_card_parse_clk(node, dai_link->cpu_of_node, simple_dai) -#define asoc_simple_card_parse_clk_codec(node, dai_link, simple_dai) \ - asoc_simple_card_parse_clk(node, dai_link->codec_of_node, simple_dai) -int asoc_simple_card_parse_clk(struct device_node *node, +#define asoc_simple_card_parse_clk_cpu(dev, node, dai_link, simple_dai) \ + asoc_simple_card_parse_clk(dev, node, dai_link->cpu_of_node, simple_dai) +#define asoc_simple_card_parse_clk_codec(dev, node, dai_link, simple_dai) \ + asoc_simple_card_parse_clk(dev, node, dai_link->codec_of_node, simple_dai) +int asoc_simple_card_parse_clk(struct device *dev, + struct device_node *node, struct device_node *dai_of_node, struct asoc_simple_dai *simple_dai); diff --git a/include/sound/snd_wavefront.h b/include/sound/snd_wavefront.h index 35e94b3d1ec7..cd0bab1ef6f1 100644 --- a/include/sound/snd_wavefront.h +++ b/include/sound/snd_wavefront.h @@ -37,8 +37,8 @@ struct _snd_wavefront_midi { #define MPU_ACK 0xFE #define UART_MODE_ON 0x3F -extern struct snd_rawmidi_ops snd_wavefront_midi_output; -extern struct snd_rawmidi_ops snd_wavefront_midi_input; +extern const struct snd_rawmidi_ops snd_wavefront_midi_output; +extern const struct snd_rawmidi_ops snd_wavefront_midi_input; extern void snd_wavefront_midi_enable_virtual (snd_wavefront_card_t *); extern void snd_wavefront_midi_disable_virtual (snd_wavefront_card_t *); diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h index 200e1f04c166..58acd00cae19 100644 --- a/include/sound/soc-dai.h +++ b/include/sound/soc-dai.h @@ -256,6 +256,9 @@ struct snd_soc_dai_driver { int (*resume)(struct snd_soc_dai *dai); /* compress dai */ int (*compress_new)(struct snd_soc_pcm_runtime *rtd, int num); + /* Optional Callback used at pcm creation*/ + int (*pcm_new)(struct snd_soc_pcm_runtime *rtd, + struct snd_soc_dai *dai); /* DAI is also used for the control bus */ bool bus_control; diff --git a/include/sound/soc.h b/include/sound/soc.h index b86168a21d56..cdfb55f7aede 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -497,6 +497,8 @@ void snd_soc_runtime_deactivate(struct snd_soc_pcm_runtime *rtd, int stream); int snd_soc_runtime_set_dai_fmt(struct snd_soc_pcm_runtime *rtd, unsigned int dai_fmt); +int snd_soc_set_dmi_name(struct snd_soc_card *card, const char *flavour); + /* Utility functions to get clock rates from various things */ int snd_soc_calc_frame_size(int sample_size, int channels, int tdm_slots); int snd_soc_params_to_frame_size(struct snd_pcm_hw_params *params); @@ -507,9 +509,6 @@ int snd_soc_params_to_bclk(struct snd_pcm_hw_params *parms); int snd_soc_set_runtime_hwparams(struct snd_pcm_substream *substream, const struct snd_pcm_hardware *hw); -int snd_soc_platform_trigger(struct snd_pcm_substream *substream, - int cmd, struct snd_soc_platform *platform); - int soc_dai_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai); @@ -785,6 +784,10 @@ struct snd_soc_component_driver { int (*suspend)(struct snd_soc_component *); int (*resume)(struct snd_soc_component *); + /* pcm creation and destruction */ + int (*pcm_new)(struct snd_soc_pcm_runtime *); + void (*pcm_free)(struct snd_pcm *); + /* DT */ int (*of_xlate_dai_name)(struct snd_soc_component *component, struct of_phandle_args *args, @@ -859,6 +862,8 @@ struct snd_soc_component { void (*remove)(struct snd_soc_component *); int (*suspend)(struct snd_soc_component *); int (*resume)(struct snd_soc_component *); + int (*pcm_new)(struct snd_soc_pcm_runtime *); + void (*pcm_free)(struct snd_pcm *); /* machine specific init */ int (*init)(struct snd_soc_component *component); @@ -941,20 +946,11 @@ struct snd_soc_platform_driver { int (*pcm_new)(struct snd_soc_pcm_runtime *); void (*pcm_free)(struct snd_pcm *); - /* - * For platform caused delay reporting. - * Optional. - */ - snd_pcm_sframes_t (*delay)(struct snd_pcm_substream *, - struct snd_soc_dai *); - /* platform stream pcm ops */ const struct snd_pcm_ops *ops; /* platform stream compress ops */ const struct snd_compr_ops *compr_ops; - - int (*bespoke_trigger)(struct snd_pcm_substream *, int); }; struct snd_soc_dai_link_component { @@ -1099,6 +1095,8 @@ struct snd_soc_card { const char *name; const char *long_name; const char *driver_name; + char dmi_longname[80]; + struct device *dev; struct snd_card *snd_card; struct module *owner; @@ -1647,37 +1645,21 @@ static inline struct snd_soc_platform *snd_soc_kcontrol_platform( int snd_soc_util_init(void); void snd_soc_util_exit(void); -#define snd_soc_of_parse_card_name(card, propname) \ - snd_soc_of_parse_card_name_from_node(card, NULL, propname) -int snd_soc_of_parse_card_name_from_node(struct snd_soc_card *card, - struct device_node *np, - const char *propname); -#define snd_soc_of_parse_audio_simple_widgets(card, propname)\ - snd_soc_of_parse_audio_simple_widgets_from_node(card, NULL, propname) -int snd_soc_of_parse_audio_simple_widgets_from_node(struct snd_soc_card *card, - struct device_node *np, - const char *propname); - +int snd_soc_of_parse_card_name(struct snd_soc_card *card, + const char *propname); +int snd_soc_of_parse_audio_simple_widgets(struct snd_soc_card *card, + const char *propname); int snd_soc_of_parse_tdm_slot(struct device_node *np, unsigned int *tx_mask, unsigned int *rx_mask, unsigned int *slots, unsigned int *slot_width); -#define snd_soc_of_parse_audio_prefix(card, codec_conf, of_node, propname) \ - snd_soc_of_parse_audio_prefix_from_node(card, NULL, codec_conf, \ - of_node, propname) -void snd_soc_of_parse_audio_prefix_from_node(struct snd_soc_card *card, - struct device_node *np, +void snd_soc_of_parse_audio_prefix(struct snd_soc_card *card, struct snd_soc_codec_conf *codec_conf, struct device_node *of_node, const char *propname); - -#define snd_soc_of_parse_audio_routing(card, propname) \ - snd_soc_of_parse_audio_routing_from_node(card, NULL, propname) -int snd_soc_of_parse_audio_routing_from_node(struct snd_soc_card *card, - struct device_node *np, - const char *propname); - +int snd_soc_of_parse_audio_routing(struct snd_soc_card *card, + const char *propname); unsigned int snd_soc_of_parse_daifmt(struct device_node *np, const char *prefix, struct device_node **bitclkmaster, diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h index 1277e9ba0318..ff1a4f4cd66d 100644 --- a/include/target/iscsi/iscsi_transport.h +++ b/include/target/iscsi/iscsi_transport.h @@ -55,8 +55,12 @@ extern int iscsit_setup_scsi_cmd(struct iscsi_conn *, struct iscsi_cmd *, extern void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *); extern int iscsit_process_scsi_cmd(struct iscsi_conn *, struct iscsi_cmd *, struct iscsi_scsi_req *); -extern int iscsit_check_dataout_hdr(struct iscsi_conn *, unsigned char *, - struct iscsi_cmd **); +extern int +__iscsit_check_dataout_hdr(struct iscsi_conn *, void *, + struct iscsi_cmd *, u32, bool *); +extern int +iscsit_check_dataout_hdr(struct iscsi_conn *conn, void *buf, + struct iscsi_cmd **out_cmd); extern int iscsit_check_dataout_payload(struct iscsi_cmd *, struct iscsi_data *, bool); extern int iscsit_setup_nop_out(struct iscsi_conn *, struct iscsi_cmd *, @@ -125,6 +129,9 @@ extern void iscsit_release_cmd(struct iscsi_cmd *); extern void iscsit_free_cmd(struct iscsi_cmd *, bool); extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *, struct iscsi_conn *, u8); +extern struct iscsi_cmd * +iscsit_find_cmd_from_itt_or_dump(struct iscsi_conn *conn, + itt_t init_task_tag, u32 length); /* * From iscsi_target_nego.c diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index da854fb4530f..37c274e61acc 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -4,7 +4,9 @@ #include <linux/configfs.h> /* struct config_group */ #include <linux/dma-direction.h> /* enum dma_data_direction */ #include <linux/percpu_ida.h> /* struct percpu_ida */ +#include <linux/percpu-refcount.h> #include <linux/semaphore.h> /* struct semaphore */ +#include <linux/completion.h> #define TARGET_CORE_VERSION "v5.0" @@ -197,6 +199,7 @@ enum tcm_tmreq_table { TMR_LUN_RESET = 5, TMR_TARGET_WARM_RESET = 6, TMR_TARGET_COLD_RESET = 7, + TMR_UNKNOWN = 0xff, }; /* fabric independent task management response values */ @@ -397,7 +400,6 @@ struct se_tmr_req { void *fabric_tmr_ptr; struct se_cmd *task_cmd; struct se_device *tmr_dev; - struct se_lun *tmr_lun; struct list_head tmr_list; }; @@ -488,8 +490,6 @@ struct se_cmd { #define CMD_T_COMPLETE (1 << 2) #define CMD_T_SENT (1 << 4) #define CMD_T_STOP (1 << 5) -#define CMD_T_DEV_ACTIVE (1 << 7) -#define CMD_T_BUSY (1 << 9) #define CMD_T_TAS (1 << 10) #define CMD_T_FABRIC_STOP (1 << 11) spinlock_t t_state_lock; @@ -732,6 +732,7 @@ struct se_lun { struct config_group lun_group; struct se_port_stat_grps port_stat_grps; struct completion lun_ref_comp; + struct completion lun_shutdown_comp; struct percpu_ref lun_ref; struct list_head lun_dev_link; struct hlist_node link; @@ -767,6 +768,8 @@ struct se_device { u32 dev_index; u64 creation_time; atomic_long_t num_resets; + atomic_long_t aborts_complete; + atomic_long_t aborts_no_task; atomic_long_t num_cmds; atomic_long_t read_bytes; atomic_long_t write_bytes; @@ -914,6 +917,7 @@ static inline struct se_portal_group *param_to_tpg(struct config_item *item) struct se_wwn { struct target_fabric_configfs *wwn_tf; + void *priv; struct config_group wwn_group; struct config_group fabric_stat_group; }; diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h index 358041bad1da..d7dd1427fe0d 100644 --- a/include/target/target_core_fabric.h +++ b/include/target/target_core_fabric.h @@ -47,7 +47,7 @@ struct target_core_fabric_ops { u32 (*tpg_get_inst_index)(struct se_portal_group *); /* * Optional to release struct se_cmd and fabric dependent allocated - * I/O descriptor in transport_cmd_check_stop(). + * I/O descriptor after command execution has finished. * * Returning 1 will signal a descriptor has been released. * Returning 0 will signal a descriptor has not been released. diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 88d18a8ceb59..a3c3cab643a9 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -184,7 +184,7 @@ DEFINE_EVENT(btrfs__inode, btrfs_inode_evict, TRACE_EVENT_CONDITION(btrfs_get_extent, - TP_PROTO(struct btrfs_root *root, struct inode *inode, + TP_PROTO(struct btrfs_root *root, struct btrfs_inode *inode, struct extent_map *map), TP_ARGS(root, inode, map), diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h index cbdb90b6b308..0a18ab6483ff 100644 --- a/include/trace/events/compaction.h +++ b/include/trace/events/compaction.h @@ -9,62 +9,6 @@ #include <linux/tracepoint.h> #include <trace/events/mmflags.h> -#define COMPACTION_STATUS \ - EM( COMPACT_SKIPPED, "skipped") \ - EM( COMPACT_DEFERRED, "deferred") \ - EM( COMPACT_CONTINUE, "continue") \ - EM( COMPACT_SUCCESS, "success") \ - EM( COMPACT_PARTIAL_SKIPPED, "partial_skipped") \ - EM( COMPACT_COMPLETE, "complete") \ - EM( COMPACT_NO_SUITABLE_PAGE, "no_suitable_page") \ - EM( COMPACT_NOT_SUITABLE_ZONE, "not_suitable_zone") \ - EMe(COMPACT_CONTENDED, "contended") - -#ifdef CONFIG_ZONE_DMA -#define IFDEF_ZONE_DMA(X) X -#else -#define IFDEF_ZONE_DMA(X) -#endif - -#ifdef CONFIG_ZONE_DMA32 -#define IFDEF_ZONE_DMA32(X) X -#else -#define IFDEF_ZONE_DMA32(X) -#endif - -#ifdef CONFIG_HIGHMEM -#define IFDEF_ZONE_HIGHMEM(X) X -#else -#define IFDEF_ZONE_HIGHMEM(X) -#endif - -#define ZONE_TYPE \ - IFDEF_ZONE_DMA( EM (ZONE_DMA, "DMA")) \ - IFDEF_ZONE_DMA32( EM (ZONE_DMA32, "DMA32")) \ - EM (ZONE_NORMAL, "Normal") \ - IFDEF_ZONE_HIGHMEM( EM (ZONE_HIGHMEM,"HighMem")) \ - EMe(ZONE_MOVABLE,"Movable") - -/* - * First define the enums in the above macros to be exported to userspace - * via TRACE_DEFINE_ENUM(). - */ -#undef EM -#undef EMe -#define EM(a, b) TRACE_DEFINE_ENUM(a); -#define EMe(a, b) TRACE_DEFINE_ENUM(a); - -COMPACTION_STATUS -ZONE_TYPE - -/* - * Now redefine the EM() and EMe() macros to map the enums to the strings - * that will be printed in the output. - */ -#undef EM -#undef EMe -#define EM(a, b) {a, b}, -#define EMe(a, b) {a, b} DECLARE_EVENT_CLASS(mm_compaction_isolate_template, @@ -187,6 +131,7 @@ TRACE_EVENT(mm_compaction_begin, __entry->sync ? "sync" : "async") ); +#ifdef CONFIG_COMPACTION TRACE_EVENT(mm_compaction_end, TP_PROTO(unsigned long zone_start, unsigned long migrate_pfn, unsigned long free_pfn, unsigned long zone_end, bool sync, @@ -220,6 +165,7 @@ TRACE_EVENT(mm_compaction_end, __entry->sync ? "sync" : "async", __print_symbolic(__entry->status, COMPACTION_STATUS)) ); +#endif TRACE_EVENT(mm_compaction_try_to_compact_pages, @@ -248,6 +194,7 @@ TRACE_EVENT(mm_compaction_try_to_compact_pages, __entry->prio) ); +#ifdef CONFIG_COMPACTION DECLARE_EVENT_CLASS(mm_compaction_suitable_template, TP_PROTO(struct zone *zone, @@ -295,7 +242,6 @@ DEFINE_EVENT(mm_compaction_suitable_template, mm_compaction_suitable, TP_ARGS(zone, order, ret) ); -#ifdef CONFIG_COMPACTION DECLARE_EVENT_CLASS(mm_compaction_defer_template, TP_PROTO(struct zone *zone, int order), diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index 01b3c9869a0d..c80fcad0a6c9 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -6,8 +6,8 @@ #include <linux/tracepoint.h> -#define show_dev(entry) MAJOR(entry->dev), MINOR(entry->dev) -#define show_dev_ino(entry) show_dev(entry), (unsigned long)entry->ino +#define show_dev(dev) MAJOR(dev), MINOR(dev) +#define show_dev_ino(entry) show_dev(entry->dev), (unsigned long)entry->ino TRACE_DEFINE_ENUM(NODE); TRACE_DEFINE_ENUM(DATA); @@ -55,25 +55,35 @@ TRACE_DEFINE_ENUM(CP_DISCARD); { IPU, "IN-PLACE" }, \ { OPU, "OUT-OF-PLACE" }) -#define F2FS_BIO_FLAG_MASK(t) (t & (REQ_RAHEAD | REQ_PREFLUSH | REQ_FUA)) -#define F2FS_BIO_EXTRA_MASK(t) (t & (REQ_META | REQ_PRIO)) - -#define show_bio_type(op_flags) show_bio_op_flags(op_flags), \ - show_bio_extra(op_flags) +#define F2FS_OP_FLAGS (REQ_RAHEAD | REQ_SYNC | REQ_PREFLUSH | REQ_META |\ + REQ_PRIO) +#define F2FS_BIO_FLAG_MASK(t) (t & F2FS_OP_FLAGS) + +#define show_bio_type(op,op_flags) show_bio_op(op), \ + show_bio_op_flags(op_flags) + +#define show_bio_op(op) \ + __print_symbolic(op, \ + { REQ_OP_READ, "READ" }, \ + { REQ_OP_WRITE, "WRITE" }, \ + { REQ_OP_FLUSH, "FLUSH" }, \ + { REQ_OP_DISCARD, "DISCARD" }, \ + { REQ_OP_ZONE_REPORT, "ZONE_REPORT" }, \ + { REQ_OP_SECURE_ERASE, "SECURE_ERASE" }, \ + { REQ_OP_ZONE_RESET, "ZONE_RESET" }, \ + { REQ_OP_WRITE_SAME, "WRITE_SAME" }, \ + { REQ_OP_WRITE_ZEROES, "WRITE_ZEROES" }) #define show_bio_op_flags(flags) \ __print_symbolic(F2FS_BIO_FLAG_MASK(flags), \ - { 0, "WRITE" }, \ - { REQ_RAHEAD, "READAHEAD" }, \ - { REQ_SYNC, "REQ_SYNC" }, \ - { REQ_PREFLUSH, "REQ_PREFLUSH" }, \ - { REQ_FUA, "REQ_FUA" }) - -#define show_bio_extra(type) \ - __print_symbolic(F2FS_BIO_EXTRA_MASK(type), \ + { REQ_RAHEAD, "(RA)" }, \ + { REQ_SYNC, "(S)" }, \ + { REQ_SYNC | REQ_PRIO, "(SP)" }, \ { REQ_META, "(M)" }, \ - { REQ_PRIO, "(P)" }, \ { REQ_META | REQ_PRIO, "(MP)" }, \ + { REQ_SYNC | REQ_PREFLUSH , "(SF)" }, \ + { REQ_SYNC | REQ_META | REQ_PRIO, "(SMP)" }, \ + { REQ_PREFLUSH | REQ_META | REQ_PRIO, "(FMP)" }, \ { 0, " \b" }) #define show_data_type(type) \ @@ -235,7 +245,7 @@ TRACE_EVENT(f2fs_sync_fs, ), TP_printk("dev = (%d,%d), superblock is %s, wait = %d", - show_dev(__entry), + show_dev(__entry->dev), __entry->dirty ? "dirty" : "not dirty", __entry->wait) ); @@ -305,6 +315,13 @@ DEFINE_EVENT(f2fs__inode_exit, f2fs_unlink_exit, TP_ARGS(inode, ret) ); +DEFINE_EVENT(f2fs__inode_exit, f2fs_drop_inode, + + TP_PROTO(struct inode *inode, int ret), + + TP_ARGS(inode, ret) +); + DEFINE_EVENT(f2fs__inode, f2fs_truncate, TP_PROTO(struct inode *inode), @@ -534,7 +551,7 @@ TRACE_EVENT(f2fs_background_gc, ), TP_printk("dev = (%d,%d), wait_ms = %ld, prefree = %u, free = %u", - show_dev(__entry), + show_dev(__entry->dev), __entry->wait_ms, __entry->prefree, __entry->free) @@ -555,6 +572,7 @@ TRACE_EVENT(f2fs_get_victim, __field(int, alloc_mode) __field(int, gc_mode) __field(unsigned int, victim) + __field(unsigned int, cost) __field(unsigned int, ofs_unit) __field(unsigned int, pre_victim) __field(unsigned int, prefree) @@ -568,20 +586,23 @@ TRACE_EVENT(f2fs_get_victim, __entry->alloc_mode = p->alloc_mode; __entry->gc_mode = p->gc_mode; __entry->victim = p->min_segno; + __entry->cost = p->min_cost; __entry->ofs_unit = p->ofs_unit; __entry->pre_victim = pre_victim; __entry->prefree = prefree; __entry->free = free; ), - TP_printk("dev = (%d,%d), type = %s, policy = (%s, %s, %s), victim = %u " - "ofs_unit = %u, pre_victim_secno = %d, prefree = %u, free = %u", - show_dev(__entry), + TP_printk("dev = (%d,%d), type = %s, policy = (%s, %s, %s), " + "victim = %u, cost = %u, ofs_unit = %u, " + "pre_victim_secno = %d, prefree = %u, free = %u", + show_dev(__entry->dev), show_data_type(__entry->type), show_gc_type(__entry->gc_type), show_alloc_mode(__entry->alloc_mode), show_victim_policy(__entry->gc_mode), __entry->victim, + __entry->cost, __entry->ofs_unit, (int)__entry->pre_victim, __entry->prefree, @@ -713,7 +734,7 @@ TRACE_EVENT(f2fs_reserve_new_blocks, ), TP_printk("dev = (%d,%d), nid = %u, ofs_in_node = %u, count = %llu", - show_dev(__entry), + show_dev(__entry->dev), (unsigned int)__entry->nid, __entry->ofs_in_node, (unsigned long long)__entry->count) @@ -753,7 +774,7 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio, (unsigned long)__entry->index, (unsigned long long)__entry->old_blkaddr, (unsigned long long)__entry->new_blkaddr, - show_bio_type(__entry->op_flags), + show_bio_type(__entry->op, __entry->op_flags), show_block_type(__entry->type)) ); @@ -775,15 +796,15 @@ DEFINE_EVENT_CONDITION(f2fs__submit_page_bio, f2fs_submit_page_mbio, TP_CONDITION(page->mapping) ); -DECLARE_EVENT_CLASS(f2fs__submit_bio, +DECLARE_EVENT_CLASS(f2fs__bio, - TP_PROTO(struct super_block *sb, struct f2fs_io_info *fio, - struct bio *bio), + TP_PROTO(struct super_block *sb, int type, struct bio *bio), - TP_ARGS(sb, fio, bio), + TP_ARGS(sb, type, bio), TP_STRUCT__entry( __field(dev_t, dev) + __field(dev_t, target) __field(int, op) __field(int, op_flags) __field(int, type) @@ -793,37 +814,55 @@ DECLARE_EVENT_CLASS(f2fs__submit_bio, TP_fast_assign( __entry->dev = sb->s_dev; - __entry->op = fio->op; - __entry->op_flags = fio->op_flags; - __entry->type = fio->type; + __entry->target = bio->bi_bdev->bd_dev; + __entry->op = bio_op(bio); + __entry->op_flags = bio->bi_opf; + __entry->type = type; __entry->sector = bio->bi_iter.bi_sector; __entry->size = bio->bi_iter.bi_size; ), - TP_printk("dev = (%d,%d), rw = %s%s, %s, sector = %lld, size = %u", - show_dev(__entry), - show_bio_type(__entry->op_flags), + TP_printk("dev = (%d,%d)/(%d,%d), rw = %s%s, %s, sector = %lld, size = %u", + show_dev(__entry->target), + show_dev(__entry->dev), + show_bio_type(__entry->op, __entry->op_flags), show_block_type(__entry->type), (unsigned long long)__entry->sector, __entry->size) ); -DEFINE_EVENT_CONDITION(f2fs__submit_bio, f2fs_submit_write_bio, +DEFINE_EVENT_CONDITION(f2fs__bio, f2fs_prepare_write_bio, + + TP_PROTO(struct super_block *sb, int type, struct bio *bio), + + TP_ARGS(sb, type, bio), + + TP_CONDITION(bio) +); + +DEFINE_EVENT_CONDITION(f2fs__bio, f2fs_prepare_read_bio, - TP_PROTO(struct super_block *sb, struct f2fs_io_info *fio, - struct bio *bio), + TP_PROTO(struct super_block *sb, int type, struct bio *bio), - TP_ARGS(sb, fio, bio), + TP_ARGS(sb, type, bio), TP_CONDITION(bio) ); -DEFINE_EVENT_CONDITION(f2fs__submit_bio, f2fs_submit_read_bio, +DEFINE_EVENT_CONDITION(f2fs__bio, f2fs_submit_read_bio, - TP_PROTO(struct super_block *sb, struct f2fs_io_info *fio, - struct bio *bio), + TP_PROTO(struct super_block *sb, int type, struct bio *bio), - TP_ARGS(sb, fio, bio), + TP_ARGS(sb, type, bio), + + TP_CONDITION(bio) +); + +DEFINE_EVENT_CONDITION(f2fs__bio, f2fs_submit_write_bio, + + TP_PROTO(struct super_block *sb, int type, struct bio *bio), + + TP_ARGS(sb, type, bio), TP_CONDITION(bio) ); @@ -1082,16 +1121,16 @@ TRACE_EVENT(f2fs_write_checkpoint, ), TP_printk("dev = (%d,%d), checkpoint for %s, state = %s", - show_dev(__entry), + show_dev(__entry->dev), show_cpreason(__entry->reason), __entry->msg) ); TRACE_EVENT(f2fs_issue_discard, - TP_PROTO(struct super_block *sb, block_t blkstart, block_t blklen), + TP_PROTO(struct block_device *dev, block_t blkstart, block_t blklen), - TP_ARGS(sb, blkstart, blklen), + TP_ARGS(dev, blkstart, blklen), TP_STRUCT__entry( __field(dev_t, dev) @@ -1100,22 +1139,22 @@ TRACE_EVENT(f2fs_issue_discard, ), TP_fast_assign( - __entry->dev = sb->s_dev; + __entry->dev = dev->bd_dev; __entry->blkstart = blkstart; __entry->blklen = blklen; ), TP_printk("dev = (%d,%d), blkstart = 0x%llx, blklen = 0x%llx", - show_dev(__entry), + show_dev(__entry->dev), (unsigned long long)__entry->blkstart, (unsigned long long)__entry->blklen) ); TRACE_EVENT(f2fs_issue_reset_zone, - TP_PROTO(struct super_block *sb, block_t blkstart), + TP_PROTO(struct block_device *dev, block_t blkstart), - TP_ARGS(sb, blkstart), + TP_ARGS(dev, blkstart), TP_STRUCT__entry( __field(dev_t, dev) @@ -1123,21 +1162,21 @@ TRACE_EVENT(f2fs_issue_reset_zone, ), TP_fast_assign( - __entry->dev = sb->s_dev; + __entry->dev = dev->bd_dev; __entry->blkstart = blkstart; ), TP_printk("dev = (%d,%d), reset zone at block = 0x%llx", - show_dev(__entry), + show_dev(__entry->dev), (unsigned long long)__entry->blkstart) ); TRACE_EVENT(f2fs_issue_flush, - TP_PROTO(struct super_block *sb, unsigned int nobarrier, + TP_PROTO(struct block_device *dev, unsigned int nobarrier, unsigned int flush_merge), - TP_ARGS(sb, nobarrier, flush_merge), + TP_ARGS(dev, nobarrier, flush_merge), TP_STRUCT__entry( __field(dev_t, dev) @@ -1146,13 +1185,13 @@ TRACE_EVENT(f2fs_issue_flush, ), TP_fast_assign( - __entry->dev = sb->s_dev; + __entry->dev = dev->bd_dev; __entry->nobarrier = nobarrier; __entry->flush_merge = flush_merge; ), TP_printk("dev = (%d,%d), %s %s", - show_dev(__entry), + show_dev(__entry->dev), __entry->nobarrier ? "skip (nobarrier)" : "issue", __entry->flush_merge ? " with flush_merge" : "") ); @@ -1267,7 +1306,7 @@ TRACE_EVENT(f2fs_shrink_extent_tree, ), TP_printk("dev = (%d,%d), shrunk: node_cnt = %u, tree_cnt = %u", - show_dev(__entry), + show_dev(__entry->dev), __entry->node_cnt, __entry->tree_cnt) ); @@ -1314,7 +1353,7 @@ DECLARE_EVENT_CLASS(f2fs_sync_dirty_inodes, ), TP_printk("dev = (%d,%d), %s, dirty count = %lld", - show_dev(__entry), + show_dev(__entry->dev), show_file_type(__entry->type), __entry->count) ); diff --git a/include/trace/events/fs_dax.h b/include/trace/events/fs_dax.h new file mode 100644 index 000000000000..c566ddc87f73 --- /dev/null +++ b/include/trace/events/fs_dax.h @@ -0,0 +1,156 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM fs_dax + +#if !defined(_TRACE_FS_DAX_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_FS_DAX_H + +#include <linux/tracepoint.h> + +DECLARE_EVENT_CLASS(dax_pmd_fault_class, + TP_PROTO(struct inode *inode, struct vm_fault *vmf, + pgoff_t max_pgoff, int result), + TP_ARGS(inode, vmf, max_pgoff, result), + TP_STRUCT__entry( + __field(unsigned long, ino) + __field(unsigned long, vm_start) + __field(unsigned long, vm_end) + __field(unsigned long, vm_flags) + __field(unsigned long, address) + __field(pgoff_t, pgoff) + __field(pgoff_t, max_pgoff) + __field(dev_t, dev) + __field(unsigned int, flags) + __field(int, result) + ), + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->vm_start = vmf->vma->vm_start; + __entry->vm_end = vmf->vma->vm_end; + __entry->vm_flags = vmf->vma->vm_flags; + __entry->address = vmf->address; + __entry->flags = vmf->flags; + __entry->pgoff = vmf->pgoff; + __entry->max_pgoff = max_pgoff; + __entry->result = result; + ), + TP_printk("dev %d:%d ino %#lx %s %s address %#lx vm_start " + "%#lx vm_end %#lx pgoff %#lx max_pgoff %#lx %s", + MAJOR(__entry->dev), + MINOR(__entry->dev), + __entry->ino, + __entry->vm_flags & VM_SHARED ? "shared" : "private", + __print_flags(__entry->flags, "|", FAULT_FLAG_TRACE), + __entry->address, + __entry->vm_start, + __entry->vm_end, + __entry->pgoff, + __entry->max_pgoff, + __print_flags(__entry->result, "|", VM_FAULT_RESULT_TRACE) + ) +) + +#define DEFINE_PMD_FAULT_EVENT(name) \ +DEFINE_EVENT(dax_pmd_fault_class, name, \ + TP_PROTO(struct inode *inode, struct vm_fault *vmf, \ + pgoff_t max_pgoff, int result), \ + TP_ARGS(inode, vmf, max_pgoff, result)) + +DEFINE_PMD_FAULT_EVENT(dax_pmd_fault); +DEFINE_PMD_FAULT_EVENT(dax_pmd_fault_done); + +DECLARE_EVENT_CLASS(dax_pmd_load_hole_class, + TP_PROTO(struct inode *inode, struct vm_fault *vmf, + struct page *zero_page, + void *radix_entry), + TP_ARGS(inode, vmf, zero_page, radix_entry), + TP_STRUCT__entry( + __field(unsigned long, ino) + __field(unsigned long, vm_flags) + __field(unsigned long, address) + __field(struct page *, zero_page) + __field(void *, radix_entry) + __field(dev_t, dev) + ), + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->vm_flags = vmf->vma->vm_flags; + __entry->address = vmf->address; + __entry->zero_page = zero_page; + __entry->radix_entry = radix_entry; + ), + TP_printk("dev %d:%d ino %#lx %s address %#lx zero_page %p " + "radix_entry %#lx", + MAJOR(__entry->dev), + MINOR(__entry->dev), + __entry->ino, + __entry->vm_flags & VM_SHARED ? "shared" : "private", + __entry->address, + __entry->zero_page, + (unsigned long)__entry->radix_entry + ) +) + +#define DEFINE_PMD_LOAD_HOLE_EVENT(name) \ +DEFINE_EVENT(dax_pmd_load_hole_class, name, \ + TP_PROTO(struct inode *inode, struct vm_fault *vmf, \ + struct page *zero_page, void *radix_entry), \ + TP_ARGS(inode, vmf, zero_page, radix_entry)) + +DEFINE_PMD_LOAD_HOLE_EVENT(dax_pmd_load_hole); +DEFINE_PMD_LOAD_HOLE_EVENT(dax_pmd_load_hole_fallback); + +DECLARE_EVENT_CLASS(dax_pmd_insert_mapping_class, + TP_PROTO(struct inode *inode, struct vm_fault *vmf, + long length, pfn_t pfn, void *radix_entry), + TP_ARGS(inode, vmf, length, pfn, radix_entry), + TP_STRUCT__entry( + __field(unsigned long, ino) + __field(unsigned long, vm_flags) + __field(unsigned long, address) + __field(long, length) + __field(u64, pfn_val) + __field(void *, radix_entry) + __field(dev_t, dev) + __field(int, write) + ), + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->vm_flags = vmf->vma->vm_flags; + __entry->address = vmf->address; + __entry->write = vmf->flags & FAULT_FLAG_WRITE; + __entry->length = length; + __entry->pfn_val = pfn.val; + __entry->radix_entry = radix_entry; + ), + TP_printk("dev %d:%d ino %#lx %s %s address %#lx length %#lx " + "pfn %#llx %s radix_entry %#lx", + MAJOR(__entry->dev), + MINOR(__entry->dev), + __entry->ino, + __entry->vm_flags & VM_SHARED ? "shared" : "private", + __entry->write ? "write" : "read", + __entry->address, + __entry->length, + __entry->pfn_val & ~PFN_FLAGS_MASK, + __print_flags_u64(__entry->pfn_val & PFN_FLAGS_MASK, "|", + PFN_FLAGS_TRACE), + (unsigned long)__entry->radix_entry + ) +) + +#define DEFINE_PMD_INSERT_MAPPING_EVENT(name) \ +DEFINE_EVENT(dax_pmd_insert_mapping_class, name, \ + TP_PROTO(struct inode *inode, struct vm_fault *vmf, \ + long length, pfn_t pfn, void *radix_entry), \ + TP_ARGS(inode, vmf, length, pfn, radix_entry)) + +DEFINE_PMD_INSERT_MAPPING_EVENT(dax_pmd_insert_mapping); +DEFINE_PMD_INSERT_MAPPING_EVENT(dax_pmd_insert_mapping_fallback); + +#endif /* _TRACE_FS_DAX_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index 15bf875d0e4a..304ff94363b2 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -1,3 +1,6 @@ +#include <linux/node.h> +#include <linux/mmzone.h> +#include <linux/compaction.h> /* * The order of these masks is important. Matching masks will be seen * first and the left over flags will end up showing by themselves. @@ -171,3 +174,98 @@ IF_HAVE_VM_SOFTDIRTY(VM_SOFTDIRTY, "softdirty" ) \ (flags) ? __print_flags(flags, "|", \ __def_vmaflag_names \ ) : "none" + +#ifdef CONFIG_COMPACTION +#define COMPACTION_STATUS \ + EM( COMPACT_SKIPPED, "skipped") \ + EM( COMPACT_DEFERRED, "deferred") \ + EM( COMPACT_CONTINUE, "continue") \ + EM( COMPACT_SUCCESS, "success") \ + EM( COMPACT_PARTIAL_SKIPPED, "partial_skipped") \ + EM( COMPACT_COMPLETE, "complete") \ + EM( COMPACT_NO_SUITABLE_PAGE, "no_suitable_page") \ + EM( COMPACT_NOT_SUITABLE_ZONE, "not_suitable_zone") \ + EMe(COMPACT_CONTENDED, "contended") + +/* High-level compaction status feedback */ +#define COMPACTION_FAILED 1 +#define COMPACTION_WITHDRAWN 2 +#define COMPACTION_PROGRESS 3 + +#define compact_result_to_feedback(result) \ +({ \ + enum compact_result __result = result; \ + (compaction_failed(__result)) ? COMPACTION_FAILED : \ + (compaction_withdrawn(__result)) ? COMPACTION_WITHDRAWN : COMPACTION_PROGRESS; \ +}) + +#define COMPACTION_FEEDBACK \ + EM(COMPACTION_FAILED, "failed") \ + EM(COMPACTION_WITHDRAWN, "withdrawn") \ + EMe(COMPACTION_PROGRESS, "progress") + +#define COMPACTION_PRIORITY \ + EM(COMPACT_PRIO_SYNC_FULL, "COMPACT_PRIO_SYNC_FULL") \ + EM(COMPACT_PRIO_SYNC_LIGHT, "COMPACT_PRIO_SYNC_LIGHT") \ + EMe(COMPACT_PRIO_ASYNC, "COMPACT_PRIO_ASYNC") +#else +#define COMPACTION_STATUS +#define COMPACTION_PRIORITY +#define COMPACTION_FEEDBACK +#endif + +#ifdef CONFIG_ZONE_DMA +#define IFDEF_ZONE_DMA(X) X +#else +#define IFDEF_ZONE_DMA(X) +#endif + +#ifdef CONFIG_ZONE_DMA32 +#define IFDEF_ZONE_DMA32(X) X +#else +#define IFDEF_ZONE_DMA32(X) +#endif + +#ifdef CONFIG_HIGHMEM +#define IFDEF_ZONE_HIGHMEM(X) X +#else +#define IFDEF_ZONE_HIGHMEM(X) +#endif + +#define ZONE_TYPE \ + IFDEF_ZONE_DMA( EM (ZONE_DMA, "DMA")) \ + IFDEF_ZONE_DMA32( EM (ZONE_DMA32, "DMA32")) \ + EM (ZONE_NORMAL, "Normal") \ + IFDEF_ZONE_HIGHMEM( EM (ZONE_HIGHMEM,"HighMem")) \ + EMe(ZONE_MOVABLE,"Movable") + +#define LRU_NAMES \ + EM (LRU_INACTIVE_ANON, "inactive_anon") \ + EM (LRU_ACTIVE_ANON, "active_anon") \ + EM (LRU_INACTIVE_FILE, "inactive_file") \ + EM (LRU_ACTIVE_FILE, "active_file") \ + EMe(LRU_UNEVICTABLE, "unevictable") + +/* + * First define the enums in the above macros to be exported to userspace + * via TRACE_DEFINE_ENUM(). + */ +#undef EM +#undef EMe +#define EM(a, b) TRACE_DEFINE_ENUM(a); +#define EMe(a, b) TRACE_DEFINE_ENUM(a); + +COMPACTION_STATUS +COMPACTION_PRIORITY +COMPACTION_FEEDBACK +ZONE_TYPE +LRU_NAMES + +/* + * Now redefine the EM() and EMe() macros to map the enums to the strings + * that will be printed in the output. + */ +#undef EM +#undef EMe +#define EM(a, b) {a, b}, +#define EMe(a, b) {a, b} diff --git a/include/trace/events/oom.h b/include/trace/events/oom.h index 1e974983757e..38baeb27221a 100644 --- a/include/trace/events/oom.h +++ b/include/trace/events/oom.h @@ -4,6 +4,7 @@ #if !defined(_TRACE_OOM_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_OOM_H #include <linux/tracepoint.h> +#include <trace/events/mmflags.h> TRACE_EVENT(oom_score_adj_update, @@ -27,6 +28,86 @@ TRACE_EVENT(oom_score_adj_update, __entry->pid, __entry->comm, __entry->oom_score_adj) ); +TRACE_EVENT(reclaim_retry_zone, + + TP_PROTO(struct zoneref *zoneref, + int order, + unsigned long reclaimable, + unsigned long available, + unsigned long min_wmark, + int no_progress_loops, + bool wmark_check), + + TP_ARGS(zoneref, order, reclaimable, available, min_wmark, no_progress_loops, wmark_check), + + TP_STRUCT__entry( + __field( int, node) + __field( int, zone_idx) + __field( int, order) + __field( unsigned long, reclaimable) + __field( unsigned long, available) + __field( unsigned long, min_wmark) + __field( int, no_progress_loops) + __field( bool, wmark_check) + ), + + TP_fast_assign( + __entry->node = zone_to_nid(zoneref->zone); + __entry->zone_idx = zoneref->zone_idx; + __entry->order = order; + __entry->reclaimable = reclaimable; + __entry->available = available; + __entry->min_wmark = min_wmark; + __entry->no_progress_loops = no_progress_loops; + __entry->wmark_check = wmark_check; + ), + + TP_printk("node=%d zone=%-8s order=%d reclaimable=%lu available=%lu min_wmark=%lu no_progress_loops=%d wmark_check=%d", + __entry->node, __print_symbolic(__entry->zone_idx, ZONE_TYPE), + __entry->order, + __entry->reclaimable, __entry->available, __entry->min_wmark, + __entry->no_progress_loops, + __entry->wmark_check) +); + +#ifdef CONFIG_COMPACTION +TRACE_EVENT(compact_retry, + + TP_PROTO(int order, + enum compact_priority priority, + enum compact_result result, + int retries, + int max_retries, + bool ret), + + TP_ARGS(order, priority, result, retries, max_retries, ret), + + TP_STRUCT__entry( + __field( int, order) + __field( int, priority) + __field( int, result) + __field( int, retries) + __field( int, max_retries) + __field( bool, ret) + ), + + TP_fast_assign( + __entry->order = order; + __entry->priority = priority; + __entry->result = compact_result_to_feedback(result); + __entry->retries = retries; + __entry->max_retries = max_retries; + __entry->ret = ret; + ), + + TP_printk("order=%d priority=%s compaction_result=%s retries=%d max_retries=%d should_retry=%d", + __entry->order, + __print_symbolic(__entry->priority, COMPACTION_PRIORITY), + __print_symbolic(__entry->result, COMPACTION_FEEDBACK), + __entry->retries, __entry->max_retries, + __entry->ret) +); +#endif /* CONFIG_COMPACTION */ #endif /* This part must be outside protection */ diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 9b90c57517a9..9e3ef6c99e4b 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -4,7 +4,7 @@ #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_SCHED_H -#include <linux/sched.h> +#include <linux/sched/numa_balancing.h> #include <linux/tracepoint.h> #include <linux/binfmts.h> diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h index 1bca99dbb98f..80787eafba99 100644 --- a/include/trace/events/timer.h +++ b/include/trace/events/timer.h @@ -36,6 +36,13 @@ DEFINE_EVENT(timer_class, timer_init, TP_ARGS(timer) ); +#define decode_timer_flags(flags) \ + __print_flags(flags, "|", \ + { TIMER_MIGRATING, "M" }, \ + { TIMER_DEFERRABLE, "D" }, \ + { TIMER_PINNED, "P" }, \ + { TIMER_IRQSAFE, "I" }) + /** * timer_start - called when the timer is started * @timer: pointer to struct timer_list @@ -65,9 +72,12 @@ TRACE_EVENT(timer_start, __entry->flags = flags; ), - TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld] flags=0x%08x", + TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld] cpu=%u idx=%u flags=%s", __entry->timer, __entry->function, __entry->expires, - (long)__entry->expires - __entry->now, __entry->flags) + (long)__entry->expires - __entry->now, + __entry->flags & TIMER_CPUMASK, + __entry->flags >> TIMER_ARRAYSHIFT, + decode_timer_flags(__entry->flags & TIMER_TRACE_FLAGMASK)) ); /** diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h index c88fd0934e7e..27e8a5c77579 100644 --- a/include/trace/events/vmscan.h +++ b/include/trace/events/vmscan.h @@ -15,6 +15,7 @@ #define RECLAIM_WB_MIXED 0x0010u #define RECLAIM_WB_SYNC 0x0004u /* Unused, all reclaim async */ #define RECLAIM_WB_ASYNC 0x0008u +#define RECLAIM_WB_LRU (RECLAIM_WB_ANON|RECLAIM_WB_FILE) #define show_reclaim_flags(flags) \ (flags) ? __print_flags(flags, "|", \ @@ -269,26 +270,27 @@ TRACE_EVENT(mm_shrink_slab_end, __entry->retval) ); -DECLARE_EVENT_CLASS(mm_vmscan_lru_isolate_template, - +TRACE_EVENT(mm_vmscan_lru_isolate, TP_PROTO(int classzone_idx, int order, unsigned long nr_requested, unsigned long nr_scanned, + unsigned long nr_skipped, unsigned long nr_taken, isolate_mode_t isolate_mode, - int file), + int lru), - TP_ARGS(classzone_idx, order, nr_requested, nr_scanned, nr_taken, isolate_mode, file), + TP_ARGS(classzone_idx, order, nr_requested, nr_scanned, nr_skipped, nr_taken, isolate_mode, lru), TP_STRUCT__entry( __field(int, classzone_idx) __field(int, order) __field(unsigned long, nr_requested) __field(unsigned long, nr_scanned) + __field(unsigned long, nr_skipped) __field(unsigned long, nr_taken) __field(isolate_mode_t, isolate_mode) - __field(int, file) + __field(int, lru) ), TP_fast_assign( @@ -296,47 +298,21 @@ DECLARE_EVENT_CLASS(mm_vmscan_lru_isolate_template, __entry->order = order; __entry->nr_requested = nr_requested; __entry->nr_scanned = nr_scanned; + __entry->nr_skipped = nr_skipped; __entry->nr_taken = nr_taken; __entry->isolate_mode = isolate_mode; - __entry->file = file; + __entry->lru = lru; ), - TP_printk("isolate_mode=%d classzone=%d order=%d nr_requested=%lu nr_scanned=%lu nr_taken=%lu file=%d", + TP_printk("isolate_mode=%d classzone=%d order=%d nr_requested=%lu nr_scanned=%lu nr_skipped=%lu nr_taken=%lu lru=%s", __entry->isolate_mode, __entry->classzone_idx, __entry->order, __entry->nr_requested, __entry->nr_scanned, + __entry->nr_skipped, __entry->nr_taken, - __entry->file) -); - -DEFINE_EVENT(mm_vmscan_lru_isolate_template, mm_vmscan_lru_isolate, - - TP_PROTO(int classzone_idx, - int order, - unsigned long nr_requested, - unsigned long nr_scanned, - unsigned long nr_taken, - isolate_mode_t isolate_mode, - int file), - - TP_ARGS(classzone_idx, order, nr_requested, nr_scanned, nr_taken, isolate_mode, file) - -); - -DEFINE_EVENT(mm_vmscan_lru_isolate_template, mm_vmscan_memcg_isolate, - - TP_PROTO(int classzone_idx, - int order, - unsigned long nr_requested, - unsigned long nr_scanned, - unsigned long nr_taken, - isolate_mode_t isolate_mode, - int file), - - TP_ARGS(classzone_idx, order, nr_requested, nr_scanned, nr_taken, isolate_mode, file) - + __print_symbolic(__entry->lru, LRU_NAMES)) ); TRACE_EVENT(mm_vmscan_writepage, @@ -365,14 +341,27 @@ TRACE_EVENT(mm_vmscan_lru_shrink_inactive, TP_PROTO(int nid, unsigned long nr_scanned, unsigned long nr_reclaimed, + unsigned long nr_dirty, unsigned long nr_writeback, + unsigned long nr_congested, unsigned long nr_immediate, + unsigned long nr_activate, unsigned long nr_ref_keep, + unsigned long nr_unmap_fail, int priority, int file), - TP_ARGS(nid, nr_scanned, nr_reclaimed, priority, file), + TP_ARGS(nid, nr_scanned, nr_reclaimed, nr_dirty, nr_writeback, + nr_congested, nr_immediate, nr_activate, nr_ref_keep, + nr_unmap_fail, priority, file), TP_STRUCT__entry( __field(int, nid) __field(unsigned long, nr_scanned) __field(unsigned long, nr_reclaimed) + __field(unsigned long, nr_dirty) + __field(unsigned long, nr_writeback) + __field(unsigned long, nr_congested) + __field(unsigned long, nr_immediate) + __field(unsigned long, nr_activate) + __field(unsigned long, nr_ref_keep) + __field(unsigned long, nr_unmap_fail) __field(int, priority) __field(int, reclaim_flags) ), @@ -381,17 +370,102 @@ TRACE_EVENT(mm_vmscan_lru_shrink_inactive, __entry->nid = nid; __entry->nr_scanned = nr_scanned; __entry->nr_reclaimed = nr_reclaimed; + __entry->nr_dirty = nr_dirty; + __entry->nr_writeback = nr_writeback; + __entry->nr_congested = nr_congested; + __entry->nr_immediate = nr_immediate; + __entry->nr_activate = nr_activate; + __entry->nr_ref_keep = nr_ref_keep; + __entry->nr_unmap_fail = nr_unmap_fail; __entry->priority = priority; __entry->reclaim_flags = trace_shrink_flags(file); ), - TP_printk("nid=%d nr_scanned=%ld nr_reclaimed=%ld priority=%d flags=%s", + TP_printk("nid=%d nr_scanned=%ld nr_reclaimed=%ld nr_dirty=%ld nr_writeback=%ld nr_congested=%ld nr_immediate=%ld nr_activate=%ld nr_ref_keep=%ld nr_unmap_fail=%ld priority=%d flags=%s", __entry->nid, __entry->nr_scanned, __entry->nr_reclaimed, + __entry->nr_dirty, __entry->nr_writeback, + __entry->nr_congested, __entry->nr_immediate, + __entry->nr_activate, __entry->nr_ref_keep, + __entry->nr_unmap_fail, __entry->priority, + show_reclaim_flags(__entry->reclaim_flags)) +); + +TRACE_EVENT(mm_vmscan_lru_shrink_active, + + TP_PROTO(int nid, unsigned long nr_taken, + unsigned long nr_active, unsigned long nr_deactivated, + unsigned long nr_referenced, int priority, int file), + + TP_ARGS(nid, nr_taken, nr_active, nr_deactivated, nr_referenced, priority, file), + + TP_STRUCT__entry( + __field(int, nid) + __field(unsigned long, nr_taken) + __field(unsigned long, nr_active) + __field(unsigned long, nr_deactivated) + __field(unsigned long, nr_referenced) + __field(int, priority) + __field(int, reclaim_flags) + ), + + TP_fast_assign( + __entry->nid = nid; + __entry->nr_taken = nr_taken; + __entry->nr_active = nr_active; + __entry->nr_deactivated = nr_deactivated; + __entry->nr_referenced = nr_referenced; + __entry->priority = priority; + __entry->reclaim_flags = trace_shrink_flags(file); + ), + + TP_printk("nid=%d nr_taken=%ld nr_active=%ld nr_deactivated=%ld nr_referenced=%ld priority=%d flags=%s", + __entry->nid, + __entry->nr_taken, + __entry->nr_active, __entry->nr_deactivated, __entry->nr_referenced, __entry->priority, show_reclaim_flags(__entry->reclaim_flags)) ); +TRACE_EVENT(mm_vmscan_inactive_list_is_low, + + TP_PROTO(int nid, int reclaim_idx, + unsigned long total_inactive, unsigned long inactive, + unsigned long total_active, unsigned long active, + unsigned long ratio, int file), + + TP_ARGS(nid, reclaim_idx, total_inactive, inactive, total_active, active, ratio, file), + + TP_STRUCT__entry( + __field(int, nid) + __field(int, reclaim_idx) + __field(unsigned long, total_inactive) + __field(unsigned long, inactive) + __field(unsigned long, total_active) + __field(unsigned long, active) + __field(unsigned long, ratio) + __field(int, reclaim_flags) + ), + + TP_fast_assign( + __entry->nid = nid; + __entry->reclaim_idx = reclaim_idx; + __entry->total_inactive = total_inactive; + __entry->inactive = inactive; + __entry->total_active = total_active; + __entry->active = active; + __entry->ratio = ratio; + __entry->reclaim_flags = trace_shrink_flags(file) & RECLAIM_WB_LRU; + ), + + TP_printk("nid=%d reclaim_idx=%d total_inactive=%ld inactive=%ld total_active=%ld active=%ld ratio=%ld flags=%s", + __entry->nid, + __entry->reclaim_idx, + __entry->total_inactive, __entry->inactive, + __entry->total_active, __entry->active, + __entry->ratio, + show_reclaim_flags(__entry->reclaim_flags)) +); #endif /* _TRACE_VMSCAN_H */ /* This part must be outside protection */ diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index 2ccd9ccbf9ef..7bd8783a590f 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -31,7 +31,7 @@ #define WB_WORK_REASON \ EM( WB_REASON_BACKGROUND, "background") \ - EM( WB_REASON_TRY_TO_FREE_PAGES, "try_to_free_pages") \ + EM( WB_REASON_VMSCAN, "vmscan") \ EM( WB_REASON_SYNC, "sync") \ EM( WB_REASON_PERIODIC, "periodic") \ EM( WB_REASON_LAPTOP_TIMER, "laptop_timer") \ diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h index 5c06f4af8323..00f643164ca2 100644 --- a/include/trace/trace_events.h +++ b/include/trace/trace_events.h @@ -283,8 +283,16 @@ TRACE_MAKE_SYSTEM_STR(); trace_print_symbols_seq(p, value, symbols); \ }) +#undef __print_flags_u64 #undef __print_symbolic_u64 #if BITS_PER_LONG == 32 +#define __print_flags_u64(flag, delim, flag_array...) \ + ({ \ + static const struct trace_print_flags_u64 __flags[] = \ + { flag_array, { -1, NULL } }; \ + trace_print_flags_seq_u64(p, delim, flag, __flags); \ + }) + #define __print_symbolic_u64(value, symbol_array...) \ ({ \ static const struct trace_print_flags_u64 symbols[] = \ @@ -292,6 +300,9 @@ TRACE_MAKE_SYSTEM_STR(); trace_print_symbols_seq_u64(p, value, symbols); \ }) #else +#define __print_flags_u64(flag, delim, flag_array...) \ + __print_flags(flag, delim, flag_array) + #define __print_symbolic_u64(value, symbol_array...) \ __print_symbolic(value, symbol_array) #endif diff --git a/include/uapi/asm-generic/ioctl.h b/include/uapi/asm-generic/ioctl.h index 7e7c11b52143..749b32fe5623 100644 --- a/include/uapi/asm-generic/ioctl.h +++ b/include/uapi/asm-generic/ioctl.h @@ -48,6 +48,9 @@ /* * Direction bits, which any architecture can choose to override * before including this file. + * + * NOTE: _IOC_WRITE means userland is writing and kernel is + * reading. _IOC_READ means userland is reading and kernel is writing. */ #ifndef _IOC_NONE @@ -72,7 +75,12 @@ #define _IOC_TYPECHECK(t) (sizeof(t)) #endif -/* used to create numbers */ +/* + * Used to create numbers. + * + * NOTE: _IOW means userland is writing and kernel is reading. _IOR + * means userland is reading and kernel is writing. + */ #define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0) #define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),(_IOC_TYPECHECK(size))) #define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size))) diff --git a/include/uapi/drm/Kbuild b/include/uapi/drm/Kbuild index 9355dd8eff3b..c97addd08f8c 100644 --- a/include/uapi/drm/Kbuild +++ b/include/uapi/drm/Kbuild @@ -9,6 +9,7 @@ header-y += i810_drm.h header-y += i915_drm.h header-y += mga_drm.h header-y += nouveau_drm.h +header-y += omap_drm.h header-y += qxl_drm.h header-y += r128_drm.h header-y += radeon_drm.h diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 396183628f3c..5797283c2d79 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -528,6 +528,8 @@ struct drm_amdgpu_cs_chunk_data { #define AMDGPU_INFO_VBIOS_SIZE 0x1 /* Subquery id: Query vbios image */ #define AMDGPU_INFO_VBIOS_IMAGE 0x2 +/* Query UVD handles */ +#define AMDGPU_INFO_NUM_HANDLES 0x1C #define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0 #define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff @@ -719,6 +721,13 @@ struct drm_amdgpu_info_hw_ip { __u32 _pad; }; +struct drm_amdgpu_info_num_handles { + /** Max handles as supported by firmware for UVD */ + __u32 uvd_max_handles; + /** Handles currently in use for UVD */ + __u32 uvd_used_handles; +}; + #define AMDGPU_VCE_CLOCK_TABLE_ENTRIES 6 struct drm_amdgpu_info_vce_clock_table_entry { diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index a5890bf44c0a..ef20abb8119b 100644 --- a/include/uapi/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h @@ -41,10 +41,17 @@ extern "C" { /* 8 bpp Red */ #define DRM_FORMAT_R8 fourcc_code('R', '8', ' ', ' ') /* [7:0] R */ +/* 16 bpp Red */ +#define DRM_FORMAT_R16 fourcc_code('R', '1', '6', ' ') /* [15:0] R little endian */ + /* 16 bpp RG */ #define DRM_FORMAT_RG88 fourcc_code('R', 'G', '8', '8') /* [15:0] R:G 8:8 little endian */ #define DRM_FORMAT_GR88 fourcc_code('G', 'R', '8', '8') /* [15:0] G:R 8:8 little endian */ +/* 32 bpp RG */ +#define DRM_FORMAT_RG1616 fourcc_code('R', 'G', '3', '2') /* [31:0] R:G 16:16 little endian */ +#define DRM_FORMAT_GR1616 fourcc_code('G', 'R', '3', '2') /* [31:0] G:R 16:16 little endian */ + /* 8 bpp RGB */ #define DRM_FORMAT_RGB332 fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */ #define DRM_FORMAT_BGR233 fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */ @@ -154,11 +161,13 @@ extern "C" { /* Vendor Ids: */ #define DRM_FORMAT_MOD_NONE 0 +#define DRM_FORMAT_MOD_VENDOR_NONE 0 #define DRM_FORMAT_MOD_VENDOR_INTEL 0x01 #define DRM_FORMAT_MOD_VENDOR_AMD 0x02 #define DRM_FORMAT_MOD_VENDOR_NV 0x03 #define DRM_FORMAT_MOD_VENDOR_SAMSUNG 0x04 #define DRM_FORMAT_MOD_VENDOR_QCOM 0x05 +#define DRM_FORMAT_MOD_VENDOR_VIVANTE 0x06 /* add more to the end as needed */ #define fourcc_mod_code(vendor, val) \ @@ -172,6 +181,16 @@ extern "C" { * authoritative source for all of these. */ +/* + * Linear Layout + * + * Just plain linear layout. Note that this is different from no specifying any + * modifier (e.g. not setting DRM_MODE_FB_MODIFIERS in the DRM_ADDFB2 ioctl), + * which tells the driver to also take driver-internal information into account + * and so might actually result in a tiled framebuffer. + */ +#define DRM_FORMAT_MOD_LINEAR fourcc_mod_code(NONE, 0) + /* Intel framebuffer modifiers */ /* @@ -233,6 +252,46 @@ extern "C" { */ #define DRM_FORMAT_MOD_SAMSUNG_64_32_TILE fourcc_mod_code(SAMSUNG, 1) +/* Vivante framebuffer modifiers */ + +/* + * Vivante 4x4 tiling layout + * + * This is a simple tiled layout using tiles of 4x4 pixels in a row-major + * layout. + */ +#define DRM_FORMAT_MOD_VIVANTE_TILED fourcc_mod_code(VIVANTE, 1) + +/* + * Vivante 64x64 super-tiling layout + * + * This is a tiled layout using 64x64 pixel super-tiles, where each super-tile + * contains 8x4 groups of 2x4 tiles of 4x4 pixels (like above) each, all in row- + * major layout. + * + * For more information: see + * https://github.com/etnaviv/etna_viv/blob/master/doc/hardware.md#texture-tiling + */ +#define DRM_FORMAT_MOD_VIVANTE_SUPER_TILED fourcc_mod_code(VIVANTE, 2) + +/* + * Vivante 4x4 tiling layout for dual-pipe + * + * Same as the 4x4 tiling layout, except every second 4x4 pixel tile starts at a + * different base address. Offsets from the base addresses are therefore halved + * compared to the non-split tiled layout. + */ +#define DRM_FORMAT_MOD_VIVANTE_SPLIT_TILED fourcc_mod_code(VIVANTE, 3) + +/* + * Vivante 64x64 super-tiling layout for dual-pipe + * + * Same as the 64x64 super-tiling layout, except every second 4x4 pixel tile + * starts at a different base address. Offsets from the base addresses are + * therefore halved compared to the non-split super-tiled layout. + */ +#define DRM_FORMAT_MOD_VIVANTE_SPLIT_SUPER_TILED fourcc_mod_code(VIVANTE, 4) + #if defined(__cplusplus) } #endif diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 1c12a350eca3..57093b455db6 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -258,6 +258,7 @@ typedef struct _drm_i915_sarea { #define DRM_I915_GEM_USERPTR 0x33 #define DRM_I915_GEM_CONTEXT_GETPARAM 0x34 #define DRM_I915_GEM_CONTEXT_SETPARAM 0x35 +#define DRM_I915_PERF_OPEN 0x36 #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) @@ -311,6 +312,7 @@ typedef struct _drm_i915_sarea { #define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr) #define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param) #define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param) +#define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param) /* Allow drivers to submit batchbuffers directly to hardware, relying * on the security mechanisms provided by hardware. @@ -393,6 +395,7 @@ typedef struct drm_i915_irq_wait { * priorities and the driver will attempt to execute batches in priority order. */ #define I915_PARAM_HAS_SCHEDULER 41 +#define I915_PARAM_HUC_STATUS 42 typedef struct drm_i915_getparam { __s32 param; @@ -1224,9 +1227,142 @@ struct drm_i915_gem_context_param { #define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2 #define I915_CONTEXT_PARAM_GTT_SIZE 0x3 #define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4 +#define I915_CONTEXT_PARAM_BANNABLE 0x5 __u64 value; }; +enum drm_i915_oa_format { + I915_OA_FORMAT_A13 = 1, + I915_OA_FORMAT_A29, + I915_OA_FORMAT_A13_B8_C8, + I915_OA_FORMAT_B4_C8, + I915_OA_FORMAT_A45_B8_C8, + I915_OA_FORMAT_B4_C8_A16, + I915_OA_FORMAT_C4_B8, + + I915_OA_FORMAT_MAX /* non-ABI */ +}; + +enum drm_i915_perf_property_id { + /** + * Open the stream for a specific context handle (as used with + * execbuffer2). A stream opened for a specific context this way + * won't typically require root privileges. + */ + DRM_I915_PERF_PROP_CTX_HANDLE = 1, + + /** + * A value of 1 requests the inclusion of raw OA unit reports as + * part of stream samples. + */ + DRM_I915_PERF_PROP_SAMPLE_OA, + + /** + * The value specifies which set of OA unit metrics should be + * be configured, defining the contents of any OA unit reports. + */ + DRM_I915_PERF_PROP_OA_METRICS_SET, + + /** + * The value specifies the size and layout of OA unit reports. + */ + DRM_I915_PERF_PROP_OA_FORMAT, + + /** + * Specifying this property implicitly requests periodic OA unit + * sampling and (at least on Haswell) the sampling frequency is derived + * from this exponent as follows: + * + * 80ns * 2^(period_exponent + 1) + */ + DRM_I915_PERF_PROP_OA_EXPONENT, + + DRM_I915_PERF_PROP_MAX /* non-ABI */ +}; + +struct drm_i915_perf_open_param { + __u32 flags; +#define I915_PERF_FLAG_FD_CLOEXEC (1<<0) +#define I915_PERF_FLAG_FD_NONBLOCK (1<<1) +#define I915_PERF_FLAG_DISABLED (1<<2) + + /** The number of u64 (id, value) pairs */ + __u32 num_properties; + + /** + * Pointer to array of u64 (id, value) pairs configuring the stream + * to open. + */ + __u64 properties_ptr; +}; + +/** + * Enable data capture for a stream that was either opened in a disabled state + * via I915_PERF_FLAG_DISABLED or was later disabled via + * I915_PERF_IOCTL_DISABLE. + * + * It is intended to be cheaper to disable and enable a stream than it may be + * to close and re-open a stream with the same configuration. + * + * It's undefined whether any pending data for the stream will be lost. + */ +#define I915_PERF_IOCTL_ENABLE _IO('i', 0x0) + +/** + * Disable data capture for a stream. + * + * It is an error to try and read a stream that is disabled. + */ +#define I915_PERF_IOCTL_DISABLE _IO('i', 0x1) + +/** + * Common to all i915 perf records + */ +struct drm_i915_perf_record_header { + __u32 type; + __u16 pad; + __u16 size; +}; + +enum drm_i915_perf_record_type { + + /** + * Samples are the work horse record type whose contents are extensible + * and defined when opening an i915 perf stream based on the given + * properties. + * + * Boolean properties following the naming convention + * DRM_I915_PERF_SAMPLE_xyz_PROP request the inclusion of 'xyz' data in + * every sample. + * + * The order of these sample properties given by userspace has no + * affect on the ordering of data within a sample. The order is + * documented here. + * + * struct { + * struct drm_i915_perf_record_header header; + * + * { u32 oa_report[]; } && DRM_I915_PERF_PROP_SAMPLE_OA + * }; + */ + DRM_I915_PERF_RECORD_SAMPLE = 1, + + /* + * Indicates that one or more OA reports were not written by the + * hardware. This can happen for example if an MI_REPORT_PERF_COUNT + * command collides with periodic sampling - which would be more likely + * at higher sampling frequencies. + */ + DRM_I915_PERF_RECORD_OA_REPORT_LOST = 2, + + /** + * An error occurred that resulted in all pending OA reports being lost. + */ + DRM_I915_PERF_RECORD_OA_BUFFER_LOST = 3, + + DRM_I915_PERF_RECORD_MAX /* non-ABI */ +}; + #if defined(__cplusplus) } #endif diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index a2e90722a4c4..dd9820b1c779 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild @@ -110,6 +110,7 @@ header-y += dlm_netlink.h header-y += dlm_plock.h header-y += dm-ioctl.h header-y += dm-log-userspace.h +header-y += dma-buf.h header-y += dn.h header-y += dqblk_xfs.h header-y += edd.h @@ -465,6 +466,7 @@ header-y += virtio_console.h header-y += virtio_gpu.h header-y += virtio_ids.h header-y += virtio_input.h +header-y += virtio_mmio.h header-y += virtio_net.h header-y += virtio_pci.h header-y += virtio_ring.h diff --git a/include/uapi/linux/auto_dev-ioctl.h b/include/uapi/linux/auto_dev-ioctl.h index 021ed331dd71..744b3d060968 100644 --- a/include/uapi/linux/auto_dev-ioctl.h +++ b/include/uapi/linux/auto_dev-ioctl.h @@ -113,17 +113,13 @@ struct autofs_dev_ioctl { static inline void init_autofs_dev_ioctl(struct autofs_dev_ioctl *in) { - memset(in, 0, sizeof(struct autofs_dev_ioctl)); + memset(in, 0, AUTOFS_DEV_IOCTL_SIZE); in->ver_major = AUTOFS_DEV_IOCTL_VERSION_MAJOR; in->ver_minor = AUTOFS_DEV_IOCTL_VERSION_MINOR; - in->size = sizeof(struct autofs_dev_ioctl); + in->size = AUTOFS_DEV_IOCTL_SIZE; in->ioctlfd = -1; } -/* - * If you change this make sure you make the corresponding change - * to autofs-dev-ioctl.c:lookup_ioctl() - */ enum { /* Get various version info */ AUTOFS_DEV_IOCTL_VERSION_CMD = 0x71, @@ -160,8 +156,6 @@ enum { AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD, }; -#define AUTOFS_IOCTL 0x93 - #define AUTOFS_DEV_IOCTL_VERSION \ _IOWR(AUTOFS_IOCTL, \ AUTOFS_DEV_IOCTL_VERSION_CMD, struct autofs_dev_ioctl) diff --git a/include/uapi/linux/auto_fs.h b/include/uapi/linux/auto_fs.h index 1bfc3ed8b284..aa63451ef20a 100644 --- a/include/uapi/linux/auto_fs.h +++ b/include/uapi/linux/auto_fs.h @@ -61,12 +61,23 @@ struct autofs_packet_expire { char name[NAME_MAX+1]; }; -#define AUTOFS_IOC_READY _IO(0x93, 0x60) -#define AUTOFS_IOC_FAIL _IO(0x93, 0x61) -#define AUTOFS_IOC_CATATONIC _IO(0x93, 0x62) -#define AUTOFS_IOC_PROTOVER _IOR(0x93, 0x63, int) -#define AUTOFS_IOC_SETTIMEOUT32 _IOWR(0x93, 0x64, compat_ulong_t) -#define AUTOFS_IOC_SETTIMEOUT _IOWR(0x93, 0x64, unsigned long) -#define AUTOFS_IOC_EXPIRE _IOR(0x93, 0x65, struct autofs_packet_expire) +#define AUTOFS_IOCTL 0x93 + +enum { + AUTOFS_IOC_READY_CMD = 0x60, + AUTOFS_IOC_FAIL_CMD, + AUTOFS_IOC_CATATONIC_CMD, + AUTOFS_IOC_PROTOVER_CMD, + AUTOFS_IOC_SETTIMEOUT_CMD, + AUTOFS_IOC_EXPIRE_CMD, +}; + +#define AUTOFS_IOC_READY _IO(AUTOFS_IOCTL, AUTOFS_IOC_READY_CMD) +#define AUTOFS_IOC_FAIL _IO(AUTOFS_IOCTL, AUTOFS_IOC_FAIL_CMD) +#define AUTOFS_IOC_CATATONIC _IO(AUTOFS_IOCTL, AUTOFS_IOC_CATATONIC_CMD) +#define AUTOFS_IOC_PROTOVER _IOR(AUTOFS_IOCTL, AUTOFS_IOC_PROTOVER_CMD, int) +#define AUTOFS_IOC_SETTIMEOUT32 _IOWR(AUTOFS_IOCTL, AUTOFS_IOC_SETTIMEOUT_CMD, compat_ulong_t) +#define AUTOFS_IOC_SETTIMEOUT _IOWR(AUTOFS_IOCTL, AUTOFS_IOC_SETTIMEOUT_CMD, unsigned long) +#define AUTOFS_IOC_EXPIRE _IOR(AUTOFS_IOCTL, AUTOFS_IOC_EXPIRE_CMD, struct autofs_packet_expire) #endif /* _UAPI_LINUX_AUTO_FS_H */ diff --git a/include/uapi/linux/auto_fs4.h b/include/uapi/linux/auto_fs4.h index 8f8f1bdcca8c..7c6da423d54e 100644 --- a/include/uapi/linux/auto_fs4.h +++ b/include/uapi/linux/auto_fs4.h @@ -148,10 +148,16 @@ union autofs_v5_packet_union { autofs_packet_expire_direct_t expire_direct; }; -#define AUTOFS_IOC_EXPIRE_MULTI _IOW(0x93, 0x66, int) -#define AUTOFS_IOC_EXPIRE_INDIRECT AUTOFS_IOC_EXPIRE_MULTI -#define AUTOFS_IOC_EXPIRE_DIRECT AUTOFS_IOC_EXPIRE_MULTI -#define AUTOFS_IOC_PROTOSUBVER _IOR(0x93, 0x67, int) -#define AUTOFS_IOC_ASKUMOUNT _IOR(0x93, 0x70, int) +enum { + AUTOFS_IOC_EXPIRE_MULTI_CMD = 0x66, /* AUTOFS_IOC_EXPIRE_CMD + 1 */ + AUTOFS_IOC_PROTOSUBVER_CMD, + AUTOFS_IOC_ASKUMOUNT_CMD = 0x70, /* AUTOFS_DEV_IOCTL_VERSION_CMD - 1 */ +}; + +#define AUTOFS_IOC_EXPIRE_MULTI _IOW(AUTOFS_IOCTL, AUTOFS_IOC_EXPIRE_MULTI_CMD, int) +#define AUTOFS_IOC_EXPIRE_INDIRECT AUTOFS_IOC_EXPIRE_MULTI +#define AUTOFS_IOC_EXPIRE_DIRECT AUTOFS_IOC_EXPIRE_MULTI +#define AUTOFS_IOC_PROTOSUBVER _IOR(AUTOFS_IOCTL, AUTOFS_IOC_PROTOSUBVER_CMD, int) +#define AUTOFS_IOC_ASKUMOUNT _IOR(AUTOFS_IOCTL, AUTOFS_IOC_ASKUMOUNT_CMD, int) #endif /* _LINUX_AUTO_FS4_H */ diff --git a/include/uapi/linux/fcntl.h b/include/uapi/linux/fcntl.h index beed138bd359..813afd6eee71 100644 --- a/include/uapi/linux/fcntl.h +++ b/include/uapi/linux/fcntl.h @@ -63,5 +63,10 @@ #define AT_NO_AUTOMOUNT 0x800 /* Suppress terminal automount traversal */ #define AT_EMPTY_PATH 0x1000 /* Allow empty relative pathname */ +#define AT_STATX_SYNC_TYPE 0x6000 /* Type of synchronisation required from statx() */ +#define AT_STATX_SYNC_AS_STAT 0x0000 /* - Do whatever stat() does */ +#define AT_STATX_FORCE_SYNC 0x2000 /* - Force the attributes to be sync'd with the server */ +#define AT_STATX_DONT_SYNC 0x4000 /* - Don't sync attributes with the server */ + #endif /* _UAPI_LINUX_FCNTL_H */ diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h index 36da93fbf188..048a85e9f017 100644 --- a/include/uapi/linux/fs.h +++ b/include/uapi/linux/fs.h @@ -132,6 +132,7 @@ struct inodes_stat_t { #define MS_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */ /* These sb flags are internal to the kernel */ +#define MS_SUBMOUNT (1<<26) #define MS_NOREMOTELOCK (1<<27) #define MS_NOSEC (1<<28) #define MS_BORN (1<<29) diff --git a/include/uapi/linux/if.h b/include/uapi/linux/if.h index 1158a043342a..259617a551f2 100644 --- a/include/uapi/linux/if.h +++ b/include/uapi/linux/if.h @@ -24,6 +24,10 @@ #include <linux/socket.h> /* for "struct sockaddr" et al */ #include <linux/compiler.h> /* for "__user" et al */ +#ifndef __KERNEL__ +#include <sys/socket.h> /* for struct sockaddr. */ +#endif + #if __UAPI_DEF_IF_IFNAMSIZ #define IFNAMSIZ 16 #endif /* __UAPI_DEF_IF_IFNAMSIZ */ diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h index 3e5185e9ef03..5bc9bfd816b7 100644 --- a/include/uapi/linux/if_ether.h +++ b/include/uapi/linux/if_ether.h @@ -93,6 +93,7 @@ #define ETH_P_NCSI 0x88F8 /* NCSI protocol */ #define ETH_P_PRP 0x88FB /* IEC 62439-3 PRP/HSRv0 */ #define ETH_P_FCOE 0x8906 /* Fibre Channel over Ethernet */ +#define ETH_P_IBOE 0x8915 /* Infiniband over Ethernet */ #define ETH_P_TDLS 0x890D /* TDLS */ #define ETH_P_FIP 0x8914 /* FCoE Initialization Protocol */ #define ETH_P_80221 0x8917 /* IEEE 802.21 Media Independent Handover Protocol */ diff --git a/include/uapi/linux/ip6_tunnel.h b/include/uapi/linux/ip6_tunnel.h index 48af63c9a48d..425926c467d7 100644 --- a/include/uapi/linux/ip6_tunnel.h +++ b/include/uapi/linux/ip6_tunnel.h @@ -2,6 +2,8 @@ #define _IP6_TUNNEL_H #include <linux/types.h> +#include <linux/if.h> /* For IFNAMSIZ. */ +#include <linux/in6.h> /* For struct in6_addr. */ #define IPV6_TLV_TNL_ENCAP_LIMIT 4 #define IPV6_DEFAULT_TNL_ENCAP_LIMIT 4 diff --git a/include/uapi/linux/llc.h b/include/uapi/linux/llc.h index 9c987a402473..a6c17f66ee94 100644 --- a/include/uapi/linux/llc.h +++ b/include/uapi/linux/llc.h @@ -14,6 +14,7 @@ #define _UAPI__LINUX_LLC_H #include <linux/socket.h> +#include <linux/if.h> /* For IFHWADDRLEN. */ #define __LLC_SOCK_SIZE__ 16 /* sizeof(sockaddr_llc), word align. */ struct sockaddr_llc { diff --git a/include/uapi/linux/mqueue.h b/include/uapi/linux/mqueue.h index d0a2b8e89813..bbd5116ea739 100644 --- a/include/uapi/linux/mqueue.h +++ b/include/uapi/linux/mqueue.h @@ -18,6 +18,8 @@ #ifndef _LINUX_MQUEUE_H #define _LINUX_MQUEUE_H +#include <linux/types.h> + #define MQ_PRIO_MAX 32768 /* per-uid limit of kernel memory used by mqueue, in bytes */ #define MQ_BYTES_MAX 819200 diff --git a/include/uapi/linux/netfilter.h b/include/uapi/linux/netfilter.h index 7550e9176a54..c111a91adcc0 100644 --- a/include/uapi/linux/netfilter.h +++ b/include/uapi/linux/netfilter.h @@ -3,7 +3,6 @@ #include <linux/types.h> #include <linux/compiler.h> -#include <linux/sysctl.h> #include <linux/in.h> #include <linux/in6.h> diff --git a/include/uapi/linux/netfilter/nf_conntrack_common.h b/include/uapi/linux/netfilter/nf_conntrack_common.h index 6d074d14ee27..6a8e33dd4ecb 100644 --- a/include/uapi/linux/netfilter/nf_conntrack_common.h +++ b/include/uapi/linux/netfilter/nf_conntrack_common.h @@ -82,6 +82,10 @@ enum ip_conntrack_status { IPS_DYING_BIT = 9, IPS_DYING = (1 << IPS_DYING_BIT), + /* Bits that cannot be altered from userland. */ + IPS_UNCHANGEABLE_MASK = (IPS_NAT_DONE_MASK | IPS_NAT_MASK | + IPS_EXPECTED | IPS_CONFIRMED | IPS_DYING), + /* Connection has fixed timeout. */ IPS_FIXED_TIMEOUT_BIT = 10, IPS_FIXED_TIMEOUT = (1 << IPS_FIXED_TIMEOUT_BIT), diff --git a/include/uapi/linux/netfilter/nfnetlink_queue.h b/include/uapi/linux/netfilter/nfnetlink_queue.h index ae30841ff94e..d42f0396fe30 100644 --- a/include/uapi/linux/netfilter/nfnetlink_queue.h +++ b/include/uapi/linux/netfilter/nfnetlink_queue.h @@ -36,7 +36,7 @@ enum nfqnl_vlan_attr { NFQA_VLAN_TCI, /* __be16 skb htons(vlan_tci) */ __NFQA_VLAN_MAX, }; -#define NFQA_VLAN_MAX (__NFQA_VLAN_MAX + 1) +#define NFQA_VLAN_MAX (__NFQA_VLAN_MAX - 1) enum nfqnl_attr_type { NFQA_UNSPEC, diff --git a/include/uapi/linux/netfilter/xt_hashlimit.h b/include/uapi/linux/netfilter/xt_hashlimit.h index 3efc0ca18345..79da349f1060 100644 --- a/include/uapi/linux/netfilter/xt_hashlimit.h +++ b/include/uapi/linux/netfilter/xt_hashlimit.h @@ -2,6 +2,7 @@ #define _UAPI_XT_HASHLIMIT_H #include <linux/types.h> +#include <linux/limits.h> #include <linux/if.h> /* timings are in milliseconds. */ diff --git a/include/uapi/linux/nfsd/export.h b/include/uapi/linux/nfsd/export.h index 0df7bd5d2fb1..c3be256107c6 100644 --- a/include/uapi/linux/nfsd/export.h +++ b/include/uapi/linux/nfsd/export.h @@ -32,7 +32,8 @@ #define NFSEXP_ASYNC 0x0010 #define NFSEXP_GATHERED_WRITES 0x0020 #define NFSEXP_NOREADDIRPLUS 0x0040 -/* 80 100 currently unused */ +#define NFSEXP_SECURITY_LABEL 0x0080 +/* 0x100 currently unused */ #define NFSEXP_NOHIDE 0x0200 #define NFSEXP_NOSUBTREECHECK 0x0400 #define NFSEXP_NOAUTHNLM 0x0800 /* Don't authenticate NLM requests - just trust */ @@ -53,7 +54,7 @@ #define NFSEXP_PNFS 0x20000 /* All flags that we claim to support. (Note we don't support NOACL.) */ -#define NFSEXP_ALLFLAGS 0x3FE7F +#define NFSEXP_ALLFLAGS 0x3FEFF /* The flags that may vary depending on security flavor: */ #define NFSEXP_SECINFO_FLAGS (NFSEXP_READONLY | NFSEXP_ROOTSQUASH \ diff --git a/include/uapi/linux/nsfs.h b/include/uapi/linux/nsfs.h index 3af617230d1b..1a3ca79f466b 100644 --- a/include/uapi/linux/nsfs.h +++ b/include/uapi/linux/nsfs.h @@ -6,8 +6,13 @@ #define NSIO 0xb7 /* Returns a file descriptor that refers to an owning user namespace */ -#define NS_GET_USERNS _IO(NSIO, 0x1) +#define NS_GET_USERNS _IO(NSIO, 0x1) /* Returns a file descriptor that refers to a parent namespace */ -#define NS_GET_PARENT _IO(NSIO, 0x2) +#define NS_GET_PARENT _IO(NSIO, 0x2) +/* Returns the type of namespace (CLONE_NEW* value) referred to by + file descriptor */ +#define NS_GET_NSTYPE _IO(NSIO, 0x3) +/* Get owner UID (in the caller's user namespace) for a user namespace */ +#define NS_GET_OWNER_UID _IO(NSIO, 0x4) #endif /* __LINUX_NSFS_H */ diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h index 174d1147081b..634c9c44ed6c 100644 --- a/include/uapi/linux/pci_regs.h +++ b/include/uapi/linux/pci_regs.h @@ -682,6 +682,7 @@ #define PCI_EXT_CAP_ID_PMUX 0x1A /* Protocol Multiplexing */ #define PCI_EXT_CAP_ID_PASID 0x1B /* Process Address Space ID */ #define PCI_EXT_CAP_ID_DPC 0x1D /* Downstream Port Containment */ +#define PCI_EXT_CAP_ID_L1SS 0x1E /* L1 PM Substates */ #define PCI_EXT_CAP_ID_PTM 0x1F /* Precision Time Measurement */ #define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_PTM @@ -973,6 +974,7 @@ #define PCI_EXP_DPC_STATUS 8 /* DPC Status */ #define PCI_EXP_DPC_STATUS_TRIGGER 0x01 /* Trigger Status */ #define PCI_EXP_DPC_STATUS_INTERRUPT 0x08 /* Interrupt Status */ +#define PCI_EXP_DPC_RP_BUSY 0x10 /* Root Port Busy */ #define PCI_EXP_DPC_SOURCE_ID 10 /* DPC Source Identifier */ @@ -985,4 +987,19 @@ #define PCI_PTM_CTRL_ENABLE 0x00000001 /* PTM enable */ #define PCI_PTM_CTRL_ROOT 0x00000002 /* Root select */ +/* L1 PM Substates */ +#define PCI_L1SS_CAP 4 /* capability register */ +#define PCI_L1SS_CAP_PCIPM_L1_2 1 /* PCI PM L1.2 Support */ +#define PCI_L1SS_CAP_PCIPM_L1_1 2 /* PCI PM L1.1 Support */ +#define PCI_L1SS_CAP_ASPM_L1_2 4 /* ASPM L1.2 Support */ +#define PCI_L1SS_CAP_ASPM_L1_1 8 /* ASPM L1.1 Support */ +#define PCI_L1SS_CAP_L1_PM_SS 16 /* L1 PM Substates Support */ +#define PCI_L1SS_CTL1 8 /* Control Register 1 */ +#define PCI_L1SS_CTL1_PCIPM_L1_2 1 /* PCI PM L1.2 Enable */ +#define PCI_L1SS_CTL1_PCIPM_L1_1 2 /* PCI PM L1.1 Support */ +#define PCI_L1SS_CTL1_ASPM_L1_2 4 /* ASPM L1.2 Support */ +#define PCI_L1SS_CTL1_ASPM_L1_1 8 /* ASPM L1.1 Support */ +#define PCI_L1SS_CTL1_L1SS_MASK 0x0000000F +#define PCI_L1SS_CTL2 0xC /* Control Register 2 */ + #endif /* LINUX_PCI_REGS_H */ diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h index 47c03ca5c404..198892b95f09 100644 --- a/include/uapi/linux/rds.h +++ b/include/uapi/linux/rds.h @@ -195,14 +195,14 @@ enum rds_message_rxpath_latency { }; struct rds_rx_trace_so { - u8 rx_traces; - u8 rx_trace_pos[RDS_MSG_RX_DGRAM_TRACE_MAX]; + __u8 rx_traces; + __u8 rx_trace_pos[RDS_MSG_RX_DGRAM_TRACE_MAX]; }; struct rds_cmsg_rx_trace { - u8 rx_traces; - u8 rx_trace_pos[RDS_MSG_RX_DGRAM_TRACE_MAX]; - u64 rx_trace[RDS_MSG_RX_DGRAM_TRACE_MAX]; + __u8 rx_traces; + __u8 rx_trace_pos[RDS_MSG_RX_DGRAM_TRACE_MAX]; + __u64 rx_trace[RDS_MSG_RX_DGRAM_TRACE_MAX]; }; /* diff --git a/include/uapi/linux/rpmsg.h b/include/uapi/linux/rpmsg.h new file mode 100644 index 000000000000..dedc226e0d3f --- /dev/null +++ b/include/uapi/linux/rpmsg.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2016, Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _UAPI_RPMSG_H_ +#define _UAPI_RPMSG_H_ + +#include <linux/ioctl.h> +#include <linux/types.h> + +/** + * struct rpmsg_endpoint_info - endpoint info representation + * @name: name of service + * @src: local address + * @dst: destination address + */ +struct rpmsg_endpoint_info { + char name[32]; + __u32 src; + __u32 dst; +}; + +#define RPMSG_CREATE_EPT_IOCTL _IOW(0xb5, 0x1, struct rpmsg_endpoint_info) +#define RPMSG_DESTROY_EPT_IOCTL _IO(0xb5, 0x2) + +#endif diff --git a/include/uapi/linux/sched/types.h b/include/uapi/linux/sched/types.h new file mode 100644 index 000000000000..307acbc82d80 --- /dev/null +++ b/include/uapi/linux/sched/types.h @@ -0,0 +1,74 @@ +#ifndef _UAPI_LINUX_SCHED_TYPES_H +#define _UAPI_LINUX_SCHED_TYPES_H + +#include <linux/types.h> + +struct sched_param { + int sched_priority; +}; + +#define SCHED_ATTR_SIZE_VER0 48 /* sizeof first published struct */ + +/* + * Extended scheduling parameters data structure. + * + * This is needed because the original struct sched_param can not be + * altered without introducing ABI issues with legacy applications + * (e.g., in sched_getparam()). + * + * However, the possibility of specifying more than just a priority for + * the tasks may be useful for a wide variety of application fields, e.g., + * multimedia, streaming, automation and control, and many others. + * + * This variant (sched_attr) is meant at describing a so-called + * sporadic time-constrained task. In such model a task is specified by: + * - the activation period or minimum instance inter-arrival time; + * - the maximum (or average, depending on the actual scheduling + * discipline) computation time of all instances, a.k.a. runtime; + * - the deadline (relative to the actual activation time) of each + * instance. + * Very briefly, a periodic (sporadic) task asks for the execution of + * some specific computation --which is typically called an instance-- + * (at most) every period. Moreover, each instance typically lasts no more + * than the runtime and must be completed by time instant t equal to + * the instance activation time + the deadline. + * + * This is reflected by the actual fields of the sched_attr structure: + * + * @size size of the structure, for fwd/bwd compat. + * + * @sched_policy task's scheduling policy + * @sched_flags for customizing the scheduler behaviour + * @sched_nice task's nice value (SCHED_NORMAL/BATCH) + * @sched_priority task's static priority (SCHED_FIFO/RR) + * @sched_deadline representative of the task's deadline + * @sched_runtime representative of the task's runtime + * @sched_period representative of the task's period + * + * Given this task model, there are a multiplicity of scheduling algorithms + * and policies, that can be used to ensure all the tasks will make their + * timing constraints. + * + * As of now, the SCHED_DEADLINE policy (sched_dl scheduling class) is the + * only user of this new interface. More information about the algorithm + * available in the scheduling class file or in Documentation/. + */ +struct sched_attr { + u32 size; + + u32 sched_policy; + u64 sched_flags; + + /* SCHED_NORMAL, SCHED_BATCH */ + s32 sched_nice; + + /* SCHED_FIFO, SCHED_RR */ + u32 sched_priority; + + /* SCHED_DEADLINE */ + u64 sched_runtime; + u64 sched_deadline; + u64 sched_period; +}; + +#endif /* _UAPI_LINUX_SCHED_TYPES_H */ diff --git a/include/uapi/linux/seg6.h b/include/uapi/linux/seg6.h index 61df8d392f41..7278511d339e 100644 --- a/include/uapi/linux/seg6.h +++ b/include/uapi/linux/seg6.h @@ -15,6 +15,7 @@ #define _UAPI_LINUX_SEG6_H #include <linux/types.h> +#include <linux/in6.h> /* For struct in6_addr. */ /* * SRH diff --git a/include/uapi/linux/seg6_iptunnel.h b/include/uapi/linux/seg6_iptunnel.h index 7a7183d4062a..b6e5a0a1afd7 100644 --- a/include/uapi/linux/seg6_iptunnel.h +++ b/include/uapi/linux/seg6_iptunnel.h @@ -14,6 +14,8 @@ #ifndef _UAPI_LINUX_SEG6_IPTUNNEL_H #define _UAPI_LINUX_SEG6_IPTUNNEL_H +#include <linux/seg6.h> /* For struct ipv6_sr_hdr. */ + enum { SEG6_IPTUNNEL_UNSPEC, SEG6_IPTUNNEL_SRH, diff --git a/include/uapi/linux/serio.h b/include/uapi/linux/serio.h index f2447a83ac8d..ccd0ccd00f47 100644 --- a/include/uapi/linux/serio.h +++ b/include/uapi/linux/serio.h @@ -17,9 +17,10 @@ /* * bit masks for use in "interrupt" flags (3rd argument) */ -#define SERIO_TIMEOUT 1 -#define SERIO_PARITY 2 -#define SERIO_FRAME 4 +#define SERIO_TIMEOUT BIT(0) +#define SERIO_PARITY BIT(1) +#define SERIO_FRAME BIT(2) +#define SERIO_OOB_DATA BIT(3) /* * Serio types diff --git a/include/uapi/linux/stat.h b/include/uapi/linux/stat.h index 7fec7e36d921..51a6b86e3700 100644 --- a/include/uapi/linux/stat.h +++ b/include/uapi/linux/stat.h @@ -1,6 +1,7 @@ #ifndef _UAPI_LINUX_STAT_H #define _UAPI_LINUX_STAT_H +#include <linux/types.h> #if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) @@ -41,5 +42,135 @@ #endif +/* + * Timestamp structure for the timestamps in struct statx. + * + * tv_sec holds the number of seconds before (negative) or after (positive) + * 00:00:00 1st January 1970 UTC. + * + * tv_nsec holds a number of nanoseconds before (0..-999,999,999 if tv_sec is + * negative) or after (0..999,999,999 if tv_sec is positive) the tv_sec time. + * + * Note that if both tv_sec and tv_nsec are non-zero, then the two values must + * either be both positive or both negative. + * + * __reserved is held in case we need a yet finer resolution. + */ +struct statx_timestamp { + __s64 tv_sec; + __s32 tv_nsec; + __s32 __reserved; +}; + +/* + * Structures for the extended file attribute retrieval system call + * (statx()). + * + * The caller passes a mask of what they're specifically interested in as a + * parameter to statx(). What statx() actually got will be indicated in + * st_mask upon return. + * + * For each bit in the mask argument: + * + * - if the datum is not supported: + * + * - the bit will be cleared, and + * + * - the datum will be set to an appropriate fabricated value if one is + * available (eg. CIFS can take a default uid and gid), otherwise + * + * - the field will be cleared; + * + * - otherwise, if explicitly requested: + * + * - the datum will be synchronised to the server if AT_STATX_FORCE_SYNC is + * set or if the datum is considered out of date, and + * + * - the field will be filled in and the bit will be set; + * + * - otherwise, if not requested, but available in approximate form without any + * effort, it will be filled in anyway, and the bit will be set upon return + * (it might not be up to date, however, and no attempt will be made to + * synchronise the internal state first); + * + * - otherwise the field and the bit will be cleared before returning. + * + * Items in STATX_BASIC_STATS may be marked unavailable on return, but they + * will have values installed for compatibility purposes so that stat() and + * co. can be emulated in userspace. + */ +struct statx { + /* 0x00 */ + __u32 stx_mask; /* What results were written [uncond] */ + __u32 stx_blksize; /* Preferred general I/O size [uncond] */ + __u64 stx_attributes; /* Flags conveying information about the file [uncond] */ + /* 0x10 */ + __u32 stx_nlink; /* Number of hard links */ + __u32 stx_uid; /* User ID of owner */ + __u32 stx_gid; /* Group ID of owner */ + __u16 stx_mode; /* File mode */ + __u16 __spare0[1]; + /* 0x20 */ + __u64 stx_ino; /* Inode number */ + __u64 stx_size; /* File size */ + __u64 stx_blocks; /* Number of 512-byte blocks allocated */ + __u64 __spare1[1]; + /* 0x40 */ + struct statx_timestamp stx_atime; /* Last access time */ + struct statx_timestamp stx_btime; /* File creation time */ + struct statx_timestamp stx_ctime; /* Last attribute change time */ + struct statx_timestamp stx_mtime; /* Last data modification time */ + /* 0x80 */ + __u32 stx_rdev_major; /* Device ID of special file [if bdev/cdev] */ + __u32 stx_rdev_minor; + __u32 stx_dev_major; /* ID of device containing file [uncond] */ + __u32 stx_dev_minor; + /* 0x90 */ + __u64 __spare2[14]; /* Spare space for future expansion */ + /* 0x100 */ +}; + +/* + * Flags to be stx_mask + * + * Query request/result mask for statx() and struct statx::stx_mask. + * + * These bits should be set in the mask argument of statx() to request + * particular items when calling statx(). + */ +#define STATX_TYPE 0x00000001U /* Want/got stx_mode & S_IFMT */ +#define STATX_MODE 0x00000002U /* Want/got stx_mode & ~S_IFMT */ +#define STATX_NLINK 0x00000004U /* Want/got stx_nlink */ +#define STATX_UID 0x00000008U /* Want/got stx_uid */ +#define STATX_GID 0x00000010U /* Want/got stx_gid */ +#define STATX_ATIME 0x00000020U /* Want/got stx_atime */ +#define STATX_MTIME 0x00000040U /* Want/got stx_mtime */ +#define STATX_CTIME 0x00000080U /* Want/got stx_ctime */ +#define STATX_INO 0x00000100U /* Want/got stx_ino */ +#define STATX_SIZE 0x00000200U /* Want/got stx_size */ +#define STATX_BLOCKS 0x00000400U /* Want/got stx_blocks */ +#define STATX_BASIC_STATS 0x000007ffU /* The stuff in the normal stat struct */ +#define STATX_BTIME 0x00000800U /* Want/got stx_btime */ +#define STATX_ALL 0x00000fffU /* All currently supported flags */ + +/* + * Attributes to be found in stx_attributes + * + * These give information about the features or the state of a file that might + * be of use to ordinary userspace programs such as GUIs or ls rather than + * specialised tools. + * + * Note that the flags marked [I] correspond to generic FS_IOC_FLAGS + * semantically. Where possible, the numerical value is picked to correspond + * also. + */ +#define STATX_ATTR_COMPRESSED 0x00000004 /* [I] File is compressed by the fs */ +#define STATX_ATTR_IMMUTABLE 0x00000010 /* [I] File is marked immutable */ +#define STATX_ATTR_APPEND 0x00000020 /* [I] File is append-only */ +#define STATX_ATTR_NODUMP 0x00000040 /* [I] File is not to be dumped */ +#define STATX_ATTR_ENCRYPTED 0x00000800 /* [I] File requires key to decrypt in fs */ + +#define STATX_ATTR_AUTOMOUNT 0x00001000 /* Dir: Automount trigger */ + #endif /* _UAPI_LINUX_STAT_H */ diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h index c506cddb8165..af17b4154ef6 100644 --- a/include/uapi/linux/target_core_user.h +++ b/include/uapi/linux/target_core_user.h @@ -105,26 +105,26 @@ struct tcmu_cmd_entry { union { struct { - uint32_t iov_cnt; - uint32_t iov_bidi_cnt; - uint32_t iov_dif_cnt; - uint64_t cdb_off; - uint64_t __pad1; - uint64_t __pad2; + __u32 iov_cnt; + __u32 iov_bidi_cnt; + __u32 iov_dif_cnt; + __u64 cdb_off; + __u64 __pad1; + __u64 __pad2; struct iovec iov[0]; } req; struct { - uint8_t scsi_status; - uint8_t __pad1; - uint16_t __pad2; - uint32_t __pad3; + __u8 scsi_status; + __u8 __pad1; + __u16 __pad2; + __u32 __pad3; char sense_buffer[TCMU_SENSE_BUFFERSIZE]; } rsp; }; } __packed; -#define TCMU_OP_ALIGN_SIZE sizeof(uint64_t) +#define TCMU_OP_ALIGN_SIZE sizeof(__u64) enum tcmu_genl_cmd { TCMU_CMD_UNSPEC, diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h index 9057d7af3ae1..c055947c5c98 100644 --- a/include/uapi/linux/userfaultfd.h +++ b/include/uapi/linux/userfaultfd.h @@ -11,13 +11,20 @@ #include <linux/types.h> -#define UFFD_API ((__u64)0xAA) /* - * After implementing the respective features it will become: - * #define UFFD_API_FEATURES (UFFD_FEATURE_PAGEFAULT_FLAG_WP | \ - * UFFD_FEATURE_EVENT_FORK) + * If the UFFDIO_API is upgraded someday, the UFFDIO_UNREGISTER and + * UFFDIO_WAKE ioctls should be defined as _IOW and not as _IOR. In + * userfaultfd.h we assumed the kernel was reading (instead _IOC_READ + * means the userland is reading). */ -#define UFFD_API_FEATURES (0) +#define UFFD_API ((__u64)0xAA) +#define UFFD_API_FEATURES (UFFD_FEATURE_EVENT_EXIT | \ + UFFD_FEATURE_EVENT_FORK | \ + UFFD_FEATURE_EVENT_REMAP | \ + UFFD_FEATURE_EVENT_REMOVE | \ + UFFD_FEATURE_EVENT_UNMAP | \ + UFFD_FEATURE_MISSING_HUGETLBFS | \ + UFFD_FEATURE_MISSING_SHMEM) #define UFFD_API_IOCTLS \ ((__u64)1 << _UFFDIO_REGISTER | \ (__u64)1 << _UFFDIO_UNREGISTER | \ @@ -26,6 +33,9 @@ ((__u64)1 << _UFFDIO_WAKE | \ (__u64)1 << _UFFDIO_COPY | \ (__u64)1 << _UFFDIO_ZEROPAGE) +#define UFFD_API_RANGE_IOCTLS_BASIC \ + ((__u64)1 << _UFFDIO_WAKE | \ + (__u64)1 << _UFFDIO_COPY) /* * Valid ioctl command number range with this API is from 0x00 to @@ -72,6 +82,21 @@ struct uffd_msg { } pagefault; struct { + __u32 ufd; + } fork; + + struct { + __u64 from; + __u64 to; + __u64 len; + } remap; + + struct { + __u64 start; + __u64 end; + } remove; + + struct { /* unused reserved fields */ __u64 reserved1; __u64 reserved2; @@ -84,9 +109,11 @@ struct uffd_msg { * Start at 0x12 and not at 0 to be more strict against bugs. */ #define UFFD_EVENT_PAGEFAULT 0x12 -#if 0 /* not available yet */ #define UFFD_EVENT_FORK 0x13 -#endif +#define UFFD_EVENT_REMAP 0x14 +#define UFFD_EVENT_REMOVE 0x15 +#define UFFD_EVENT_UNMAP 0x16 +#define UFFD_EVENT_EXIT 0x17 /* flags for UFFD_EVENT_PAGEFAULT */ #define UFFD_PAGEFAULT_FLAG_WRITE (1<<0) /* If this was a write fault */ @@ -104,11 +131,39 @@ struct uffdio_api { * Note: UFFD_EVENT_PAGEFAULT and UFFD_PAGEFAULT_FLAG_WRITE * are to be considered implicitly always enabled in all kernels as * long as the uffdio_api.api requested matches UFFD_API. + * + * UFFD_FEATURE_MISSING_HUGETLBFS means an UFFDIO_REGISTER + * with UFFDIO_REGISTER_MODE_MISSING mode will succeed on + * hugetlbfs virtual memory ranges. Adding or not adding + * UFFD_FEATURE_MISSING_HUGETLBFS to uffdio_api.features has + * no real functional effect after UFFDIO_API returns, but + * it's only useful for an initial feature set probe at + * UFFDIO_API time. There are two ways to use it: + * + * 1) by adding UFFD_FEATURE_MISSING_HUGETLBFS to the + * uffdio_api.features before calling UFFDIO_API, an error + * will be returned by UFFDIO_API on a kernel without + * hugetlbfs missing support + * + * 2) the UFFD_FEATURE_MISSING_HUGETLBFS can not be added in + * uffdio_api.features and instead it will be set by the + * kernel in the uffdio_api.features if the kernel supports + * it, so userland can later check if the feature flag is + * present in uffdio_api.features after UFFDIO_API + * succeeded. + * + * UFFD_FEATURE_MISSING_SHMEM works the same as + * UFFD_FEATURE_MISSING_HUGETLBFS, but it applies to shmem + * (i.e. tmpfs and other shmem based APIs). */ -#if 0 /* not available yet */ #define UFFD_FEATURE_PAGEFAULT_FLAG_WP (1<<0) #define UFFD_FEATURE_EVENT_FORK (1<<1) -#endif +#define UFFD_FEATURE_EVENT_REMAP (1<<2) +#define UFFD_FEATURE_EVENT_REMOVE (1<<3) +#define UFFD_FEATURE_MISSING_HUGETLBFS (1<<4) +#define UFFD_FEATURE_MISSING_SHMEM (1<<5) +#define UFFD_FEATURE_EVENT_UNMAP (1<<6) +#define UFFD_FEATURE_EVENT_EXIT (1<<7) __u64 features; __u64 ioctls; diff --git a/include/linux/virtio_mmio.h b/include/uapi/linux/virtio_mmio.h index c4b09689ab64..c4b09689ab64 100644 --- a/include/linux/virtio_mmio.h +++ b/include/uapi/linux/virtio_mmio.h diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h index 90007a1abcab..15b4385a2be1 100644 --- a/include/uapi/linux/virtio_pci.h +++ b/include/uapi/linux/virtio_pci.h @@ -79,7 +79,7 @@ * configuration space */ #define VIRTIO_PCI_CONFIG_OFF(msix_enabled) ((msix_enabled) ? 24 : 20) /* Deprecated: please use VIRTIO_PCI_CONFIG_OFF instead */ -#define VIRTIO_PCI_CONFIG(dev) VIRTIO_PCI_CONFIG_OFF((dev)->msix_enabled) +#define VIRTIO_PCI_CONFIG(dev) VIRTIO_PCI_CONFIG_OFF((dev)->pci_dev->msix_enabled) /* Virtio ABI version, this must match exactly */ #define VIRTIO_PCI_ABI_VERSION 0 diff --git a/include/uapi/rdma/Kbuild b/include/uapi/rdma/Kbuild index bb68cb1b04ed..1e0af1ff75c3 100644 --- a/include/uapi/rdma/Kbuild +++ b/include/uapi/rdma/Kbuild @@ -1,5 +1,6 @@ # UAPI Header export list header-y += ib_user_cm.h +header-y += rdma_user_ioctl.h header-y += ib_user_mad.h header-y += ib_user_sa.h header-y += ib_user_verbs.h diff --git a/include/uapi/rdma/bnxt_re-abi.h b/include/uapi/rdma/bnxt_re-abi.h new file mode 100644 index 000000000000..e2c8a3f0ccec --- /dev/null +++ b/include/uapi/rdma/bnxt_re-abi.h @@ -0,0 +1,89 @@ +/* + * Broadcom NetXtreme-E RoCE driver. + * + * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term + * Broadcom refers to Broadcom Limited and/or its subsidiaries. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Description: Uverbs ABI header file + */ + +#ifndef __BNXT_RE_UVERBS_ABI_H__ +#define __BNXT_RE_UVERBS_ABI_H__ + +#define BNXT_RE_ABI_VERSION 1 + +struct bnxt_re_uctx_resp { + __u32 dev_id; + __u32 max_qp; + __u32 pg_size; + __u32 cqe_sz; + __u32 max_cqd; + __u32 rsvd; +}; + +struct bnxt_re_pd_resp { + __u32 pdid; + __u32 dpi; + __u64 dbr; +}; + +struct bnxt_re_cq_req { + __u64 cq_va; + __u64 cq_handle; +}; + +struct bnxt_re_cq_resp { + __u32 cqid; + __u32 tail; + __u32 phase; + __u32 rsvd; +}; + +struct bnxt_re_qp_req { + __u64 qpsva; + __u64 qprva; + __u64 qp_handle; +}; + +struct bnxt_re_qp_resp { + __u32 qpid; + __u32 rsvd; +}; + +enum bnxt_re_shpg_offt { + BNXT_RE_BEG_RESV_OFFT = 0x00, + BNXT_RE_AVID_OFFT = 0x10, + BNXT_RE_AVID_SIZE = 0x04, + BNXT_RE_END_RESV_OFFT = 0xFF0 +}; + +#endif /* __BNXT_RE_UVERBS_ABI_H__*/ diff --git a/include/uapi/rdma/hfi/Kbuild b/include/uapi/rdma/hfi/Kbuild index ef23c294fc71..b65b0b3a5f63 100644 --- a/include/uapi/rdma/hfi/Kbuild +++ b/include/uapi/rdma/hfi/Kbuild @@ -1,2 +1,3 @@ # UAPI Header export list header-y += hfi1_user.h +header-y += hfi1_ioctl.h diff --git a/include/uapi/rdma/hfi/hfi1_ioctl.h b/include/uapi/rdma/hfi/hfi1_ioctl.h new file mode 100644 index 000000000000..4791cc8cb09b --- /dev/null +++ b/include/uapi/rdma/hfi/hfi1_ioctl.h @@ -0,0 +1,173 @@ +/* + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright(c) 2015 Intel Corporation. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * - Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef _LINUX__HFI1_IOCTL_H +#define _LINUX__HFI1_IOCTL_H +#include <linux/types.h> + +/* + * This structure is passed to the driver to tell it where + * user code buffers are, sizes, etc. The offsets and sizes of the + * fields must remain unchanged, for binary compatibility. It can + * be extended, if userversion is changed so user code can tell, if needed + */ +struct hfi1_user_info { + /* + * version of user software, to detect compatibility issues. + * Should be set to HFI1_USER_SWVERSION. + */ + __u32 userversion; + __u32 pad; + /* + * If two or more processes wish to share a context, each process + * must set the subcontext_cnt and subcontext_id to the same + * values. The only restriction on the subcontext_id is that + * it be unique for a given node. + */ + __u16 subctxt_cnt; + __u16 subctxt_id; + /* 128bit UUID passed in by PSM. */ + __u8 uuid[16]; +}; + +struct hfi1_ctxt_info { + __u64 runtime_flags; /* chip/drv runtime flags (HFI1_CAP_*) */ + __u32 rcvegr_size; /* size of each eager buffer */ + __u16 num_active; /* number of active units */ + __u16 unit; /* unit (chip) assigned to caller */ + __u16 ctxt; /* ctxt on unit assigned to caller */ + __u16 subctxt; /* subctxt on unit assigned to caller */ + __u16 rcvtids; /* number of Rcv TIDs for this context */ + __u16 credits; /* number of PIO credits for this context */ + __u16 numa_node; /* NUMA node of the assigned device */ + __u16 rec_cpu; /* cpu # for affinity (0xffff if none) */ + __u16 send_ctxt; /* send context in use by this user context */ + __u16 egrtids; /* number of RcvArray entries for Eager Rcvs */ + __u16 rcvhdrq_cnt; /* number of RcvHdrQ entries */ + __u16 rcvhdrq_entsize; /* size (in bytes) for each RcvHdrQ entry */ + __u16 sdma_ring_size; /* number of entries in SDMA request ring */ +}; + +struct hfi1_tid_info { + /* virtual address of first page in transfer */ + __u64 vaddr; + /* pointer to tid array. this array is big enough */ + __u64 tidlist; + /* number of tids programmed by this request */ + __u32 tidcnt; + /* length of transfer buffer programmed by this request */ + __u32 length; +}; + +/* + * This structure is returned by the driver immediately after + * open to get implementation-specific info, and info specific to this + * instance. + * + * This struct must have explicit pad fields where type sizes + * may result in different alignments between 32 and 64 bit + * programs, since the 64 bit * bit kernel requires the user code + * to have matching offsets + */ +struct hfi1_base_info { + /* version of hardware, for feature checking. */ + __u32 hw_version; + /* version of software, for feature checking. */ + __u32 sw_version; + /* Job key */ + __u16 jkey; + __u16 padding1; + /* + * The special QP (queue pair) value that identifies PSM + * protocol packet from standard IB packets. + */ + __u32 bthqp; + /* PIO credit return address, */ + __u64 sc_credits_addr; + /* + * Base address of write-only pio buffers for this process. + * Each buffer has sendpio_credits*64 bytes. + */ + __u64 pio_bufbase_sop; + /* + * Base address of write-only pio buffers for this process. + * Each buffer has sendpio_credits*64 bytes. + */ + __u64 pio_bufbase; + /* address where receive buffer queue is mapped into */ + __u64 rcvhdr_bufbase; + /* base address of Eager receive buffers. */ + __u64 rcvegr_bufbase; + /* base address of SDMA completion ring */ + __u64 sdma_comp_bufbase; + /* + * User register base for init code, not to be used directly by + * protocol or applications. Always maps real chip register space. + * the register addresses are: + * ur_rcvhdrhead, ur_rcvhdrtail, ur_rcvegrhead, ur_rcvegrtail, + * ur_rcvtidflow + */ + __u64 user_regbase; + /* notification events */ + __u64 events_bufbase; + /* status page */ + __u64 status_bufbase; + /* rcvhdrtail update */ + __u64 rcvhdrtail_base; + /* + * shared memory pages for subctxts if ctxt is shared; these cover + * all the processes in the group sharing a single context. + * all have enough space for the num_subcontexts value on this job. + */ + __u64 subctxt_uregbase; + __u64 subctxt_rcvegrbuf; + __u64 subctxt_rcvhdrbuf; +}; +#endif /* _LINIUX__HFI1_IOCTL_H */ diff --git a/include/uapi/rdma/hfi/hfi1_user.h b/include/uapi/rdma/hfi/hfi1_user.h index 587b7360e820..3f4ee93ae5eb 100644 --- a/include/uapi/rdma/hfi/hfi1_user.h +++ b/include/uapi/rdma/hfi/hfi1_user.h @@ -57,6 +57,7 @@ #define _LINUX__HFI1_USER_H #include <linux/types.h> +#include <rdma/rdma_user_ioctl.h> /* * This version number is given to the driver by the user code during @@ -112,61 +113,6 @@ #define HFI1_RCVHDR_ENTSIZE_16 (1UL << 1) #define HFI1_RCVDHR_ENTSIZE_32 (1UL << 2) -/* User commands. */ -#define HFI1_CMD_ASSIGN_CTXT 1 /* allocate HFI and context */ -#define HFI1_CMD_CTXT_INFO 2 /* find out what resources we got */ -#define HFI1_CMD_USER_INFO 3 /* set up userspace */ -#define HFI1_CMD_TID_UPDATE 4 /* update expected TID entries */ -#define HFI1_CMD_TID_FREE 5 /* free expected TID entries */ -#define HFI1_CMD_CREDIT_UPD 6 /* force an update of PIO credit */ - -#define HFI1_CMD_RECV_CTRL 8 /* control receipt of packets */ -#define HFI1_CMD_POLL_TYPE 9 /* set the kind of polling we want */ -#define HFI1_CMD_ACK_EVENT 10 /* ack & clear user status bits */ -#define HFI1_CMD_SET_PKEY 11 /* set context's pkey */ -#define HFI1_CMD_CTXT_RESET 12 /* reset context's HW send context */ -#define HFI1_CMD_TID_INVAL_READ 13 /* read TID cache invalidations */ -#define HFI1_CMD_GET_VERS 14 /* get the version of the user cdev */ - -/* - * User IOCTLs can not go above 128 if they do then see common.h and change the - * base for the snoop ioctl - */ -#define IB_IOCTL_MAGIC 0x1b /* See Documentation/ioctl/ioctl-number.txt */ - -/* - * Make the ioctls occupy the last 0xf0-0xff portion of the IB range - */ -#define __NUM(cmd) (HFI1_CMD_##cmd + 0xe0) - -struct hfi1_cmd; -#define HFI1_IOCTL_ASSIGN_CTXT \ - _IOWR(IB_IOCTL_MAGIC, __NUM(ASSIGN_CTXT), struct hfi1_user_info) -#define HFI1_IOCTL_CTXT_INFO \ - _IOW(IB_IOCTL_MAGIC, __NUM(CTXT_INFO), struct hfi1_ctxt_info) -#define HFI1_IOCTL_USER_INFO \ - _IOW(IB_IOCTL_MAGIC, __NUM(USER_INFO), struct hfi1_base_info) -#define HFI1_IOCTL_TID_UPDATE \ - _IOWR(IB_IOCTL_MAGIC, __NUM(TID_UPDATE), struct hfi1_tid_info) -#define HFI1_IOCTL_TID_FREE \ - _IOWR(IB_IOCTL_MAGIC, __NUM(TID_FREE), struct hfi1_tid_info) -#define HFI1_IOCTL_CREDIT_UPD \ - _IO(IB_IOCTL_MAGIC, __NUM(CREDIT_UPD)) -#define HFI1_IOCTL_RECV_CTRL \ - _IOW(IB_IOCTL_MAGIC, __NUM(RECV_CTRL), int) -#define HFI1_IOCTL_POLL_TYPE \ - _IOW(IB_IOCTL_MAGIC, __NUM(POLL_TYPE), int) -#define HFI1_IOCTL_ACK_EVENT \ - _IOW(IB_IOCTL_MAGIC, __NUM(ACK_EVENT), unsigned long) -#define HFI1_IOCTL_SET_PKEY \ - _IOW(IB_IOCTL_MAGIC, __NUM(SET_PKEY), __u16) -#define HFI1_IOCTL_CTXT_RESET \ - _IO(IB_IOCTL_MAGIC, __NUM(CTXT_RESET)) -#define HFI1_IOCTL_TID_INVAL_READ \ - _IOWR(IB_IOCTL_MAGIC, __NUM(TID_INVAL_READ), struct hfi1_tid_info) -#define HFI1_IOCTL_GET_VERS \ - _IOR(IB_IOCTL_MAGIC, __NUM(GET_VERS), int) - #define _HFI1_EVENT_FROZEN_BIT 0 #define _HFI1_EVENT_LINKDOWN_BIT 1 #define _HFI1_EVENT_LID_CHANGE_BIT 2 @@ -211,60 +157,6 @@ struct hfi1_cmd; #define HFI1_POLL_TYPE_ANYRCV 0x0 #define HFI1_POLL_TYPE_URGENT 0x1 -/* - * This structure is passed to the driver to tell it where - * user code buffers are, sizes, etc. The offsets and sizes of the - * fields must remain unchanged, for binary compatibility. It can - * be extended, if userversion is changed so user code can tell, if needed - */ -struct hfi1_user_info { - /* - * version of user software, to detect compatibility issues. - * Should be set to HFI1_USER_SWVERSION. - */ - __u32 userversion; - __u32 pad; - /* - * If two or more processes wish to share a context, each process - * must set the subcontext_cnt and subcontext_id to the same - * values. The only restriction on the subcontext_id is that - * it be unique for a given node. - */ - __u16 subctxt_cnt; - __u16 subctxt_id; - /* 128bit UUID passed in by PSM. */ - __u8 uuid[16]; -}; - -struct hfi1_ctxt_info { - __u64 runtime_flags; /* chip/drv runtime flags (HFI1_CAP_*) */ - __u32 rcvegr_size; /* size of each eager buffer */ - __u16 num_active; /* number of active units */ - __u16 unit; /* unit (chip) assigned to caller */ - __u16 ctxt; /* ctxt on unit assigned to caller */ - __u16 subctxt; /* subctxt on unit assigned to caller */ - __u16 rcvtids; /* number of Rcv TIDs for this context */ - __u16 credits; /* number of PIO credits for this context */ - __u16 numa_node; /* NUMA node of the assigned device */ - __u16 rec_cpu; /* cpu # for affinity (0xffff if none) */ - __u16 send_ctxt; /* send context in use by this user context */ - __u16 egrtids; /* number of RcvArray entries for Eager Rcvs */ - __u16 rcvhdrq_cnt; /* number of RcvHdrQ entries */ - __u16 rcvhdrq_entsize; /* size (in bytes) for each RcvHdrQ entry */ - __u16 sdma_ring_size; /* number of entries in SDMA request ring */ -}; - -struct hfi1_tid_info { - /* virtual address of first page in transfer */ - __u64 vaddr; - /* pointer to tid array. this array is big enough */ - __u64 tidlist; - /* number of tids programmed by this request */ - __u32 tidcnt; - /* length of transfer buffer programmed by this request */ - __u32 length; -}; - enum hfi1_sdma_comp_state { FREE = 0, QUEUED, @@ -289,71 +181,6 @@ struct hfi1_status { char freezemsg[0]; }; -/* - * This structure is returned by the driver immediately after - * open to get implementation-specific info, and info specific to this - * instance. - * - * This struct must have explicit pad fields where type sizes - * may result in different alignments between 32 and 64 bit - * programs, since the 64 bit * bit kernel requires the user code - * to have matching offsets - */ -struct hfi1_base_info { - /* version of hardware, for feature checking. */ - __u32 hw_version; - /* version of software, for feature checking. */ - __u32 sw_version; - /* Job key */ - __u16 jkey; - __u16 padding1; - /* - * The special QP (queue pair) value that identifies PSM - * protocol packet from standard IB packets. - */ - __u32 bthqp; - /* PIO credit return address, */ - __u64 sc_credits_addr; - /* - * Base address of write-only pio buffers for this process. - * Each buffer has sendpio_credits*64 bytes. - */ - __u64 pio_bufbase_sop; - /* - * Base address of write-only pio buffers for this process. - * Each buffer has sendpio_credits*64 bytes. - */ - __u64 pio_bufbase; - /* address where receive buffer queue is mapped into */ - __u64 rcvhdr_bufbase; - /* base address of Eager receive buffers. */ - __u64 rcvegr_bufbase; - /* base address of SDMA completion ring */ - __u64 sdma_comp_bufbase; - /* - * User register base for init code, not to be used directly by - * protocol or applications. Always maps real chip register space. - * the register addresses are: - * ur_rcvhdrhead, ur_rcvhdrtail, ur_rcvegrhead, ur_rcvegrtail, - * ur_rcvtidflow - */ - __u64 user_regbase; - /* notification events */ - __u64 events_bufbase; - /* status page */ - __u64 status_bufbase; - /* rcvhdrtail update */ - __u64 rcvhdrtail_base; - /* - * shared memory pages for subctxts if ctxt is shared; these cover - * all the processes in the group sharing a single context. - * all have enough space for the num_subcontexts value on this job. - */ - __u64 subctxt_uregbase; - __u64 subctxt_rcvegrbuf; - __u64 subctxt_rcvhdrbuf; -}; - enum sdma_req_opcode { EXPECTED = 0, EAGER diff --git a/include/uapi/rdma/ib_user_mad.h b/include/uapi/rdma/ib_user_mad.h index 09f809f323ea..5c7abd859e0f 100644 --- a/include/uapi/rdma/ib_user_mad.h +++ b/include/uapi/rdma/ib_user_mad.h @@ -35,7 +35,7 @@ #define IB_USER_MAD_H #include <linux/types.h> -#include <linux/ioctl.h> +#include <rdma/rdma_user_ioctl.h> /* * Increment this value if any changes that break userspace ABI @@ -230,16 +230,4 @@ struct ib_user_mad_reg_req2 { __u8 reserved[3]; }; -#define IB_IOCTL_MAGIC 0x1b - -#define IB_USER_MAD_REGISTER_AGENT _IOWR(IB_IOCTL_MAGIC, 1, \ - struct ib_user_mad_reg_req) - -#define IB_USER_MAD_UNREGISTER_AGENT _IOW(IB_IOCTL_MAGIC, 2, __u32) - -#define IB_USER_MAD_ENABLE_PKEY _IO(IB_IOCTL_MAGIC, 3) - -#define IB_USER_MAD_REGISTER_AGENT2 _IOWR(IB_IOCTL_MAGIC, 4, \ - struct ib_user_mad_reg_req2) - #endif /* IB_USER_MAD_H */ diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h index f4f87cff6dc6..997f904c7692 100644 --- a/include/uapi/rdma/ib_user_verbs.h +++ b/include/uapi/rdma/ib_user_verbs.h @@ -246,7 +246,7 @@ struct ib_uverbs_ex_query_device_resp { __u64 device_cap_flags_ex; struct ib_uverbs_rss_caps rss_caps; __u32 max_wq_type_rq; - __u32 reserved; + __u32 raw_packet_caps; }; struct ib_uverbs_query_port { @@ -934,6 +934,19 @@ struct ib_uverbs_flow_spec_ipv6 { struct ib_uverbs_flow_ipv6_filter mask; }; +struct ib_uverbs_flow_spec_action_tag { + union { + struct ib_uverbs_flow_spec_hdr hdr; + struct { + __u32 type; + __u16 size; + __u16 reserved; + }; + }; + __u32 tag_id; + __u32 reserved1; +}; + struct ib_uverbs_flow_tunnel_filter { __be32 tunnel_id; }; @@ -1053,6 +1066,8 @@ struct ib_uverbs_ex_create_wq { __u32 cq_handle; __u32 max_wr; __u32 max_sge; + __u32 create_flags; /* Use enum ib_wq_flags */ + __u32 reserved; }; struct ib_uverbs_ex_create_wq_resp { @@ -1081,6 +1096,8 @@ struct ib_uverbs_ex_modify_wq { __u32 wq_handle; __u32 wq_state; __u32 curr_wq_state; + __u32 flags; /* Use enum ib_wq_flags */ + __u32 flags_mask; /* Use enum ib_wq_flags */ }; /* Prevent memory allocation rather than max expected size */ diff --git a/include/uapi/rdma/rdma_user_ioctl.h b/include/uapi/rdma/rdma_user_ioctl.h new file mode 100644 index 000000000000..9388125ad51b --- /dev/null +++ b/include/uapi/rdma/rdma_user_ioctl.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2016 Mellanox Technologies, LTD. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef RDMA_USER_IOCTL_H +#define RDMA_USER_IOCTL_H + +#include <linux/types.h> +#include <linux/ioctl.h> +#include <rdma/ib_user_mad.h> +#include <rdma/hfi/hfi1_ioctl.h> + +/* Documentation/ioctl/ioctl-number.txt */ +#define RDMA_IOCTL_MAGIC 0x1b +/* Legacy name, for user space application which already use it */ +#define IB_IOCTL_MAGIC RDMA_IOCTL_MAGIC + +/* + * General blocks assignments + * It is closed on purpose do not expose it it user space + * #define MAD_CMD_BASE 0x00 + * #define HFI1_CMD_BAS 0xE0 + */ + +/* MAD specific section */ +#define IB_USER_MAD_REGISTER_AGENT _IOWR(RDMA_IOCTL_MAGIC, 0x01, struct ib_user_mad_reg_req) +#define IB_USER_MAD_UNREGISTER_AGENT _IOW(RDMA_IOCTL_MAGIC, 0x02, __u32) +#define IB_USER_MAD_ENABLE_PKEY _IO(RDMA_IOCTL_MAGIC, 0x03) +#define IB_USER_MAD_REGISTER_AGENT2 _IOWR(RDMA_IOCTL_MAGIC, 0x04, struct ib_user_mad_reg_req2) + +/* HFI specific section */ +/* allocate HFI and context */ +#define HFI1_IOCTL_ASSIGN_CTXT _IOWR(RDMA_IOCTL_MAGIC, 0xE1, struct hfi1_user_info) +/* find out what resources we got */ +#define HFI1_IOCTL_CTXT_INFO _IOW(RDMA_IOCTL_MAGIC, 0xE2, struct hfi1_ctxt_info) +/* set up userspace */ +#define HFI1_IOCTL_USER_INFO _IOW(RDMA_IOCTL_MAGIC, 0xE3, struct hfi1_base_info) +/* update expected TID entries */ +#define HFI1_IOCTL_TID_UPDATE _IOWR(RDMA_IOCTL_MAGIC, 0xE4, struct hfi1_tid_info) +/* free expected TID entries */ +#define HFI1_IOCTL_TID_FREE _IOWR(RDMA_IOCTL_MAGIC, 0xE5, struct hfi1_tid_info) +/* force an update of PIO credit */ +#define HFI1_IOCTL_CREDIT_UPD _IO(RDMA_IOCTL_MAGIC, 0xE6) +/* control receipt of packets */ +#define HFI1_IOCTL_RECV_CTRL _IOW(RDMA_IOCTL_MAGIC, 0xE8, int) +/* set the kind of polling we want */ +#define HFI1_IOCTL_POLL_TYPE _IOW(RDMA_IOCTL_MAGIC, 0xE9, int) +/* ack & clear user status bits */ +#define HFI1_IOCTL_ACK_EVENT _IOW(RDMA_IOCTL_MAGIC, 0xEA, unsigned long) +/* set context's pkey */ +#define HFI1_IOCTL_SET_PKEY _IOW(RDMA_IOCTL_MAGIC, 0xEB, __u16) +/* reset context's HW send context */ +#define HFI1_IOCTL_CTXT_RESET _IO(RDMA_IOCTL_MAGIC, 0xEC) +/* read TID cache invalidations */ +#define HFI1_IOCTL_TID_INVAL_READ _IOWR(RDMA_IOCTL_MAGIC, 0xED, struct hfi1_tid_info) +/* get the version of the user cdev */ +#define HFI1_IOCTL_GET_VERS _IOR(RDMA_IOCTL_MAGIC, 0xEE, int) + +#endif /* RDMA_USER_IOCTL_H */ diff --git a/include/video/exynos5433_decon.h b/include/video/exynos5433_decon.h index 0098a522d9f4..ef8e2a8ad0af 100644 --- a/include/video/exynos5433_decon.h +++ b/include/video/exynos5433_decon.h @@ -89,6 +89,8 @@ #define VIDCON0_ENVID_F (1 << 0) /* VIDOUTCON0 */ +#define VIDOUT_INTERLACE_FIELD_F (1 << 29) +#define VIDOUT_INTERLACE_EN_F (1 << 28) #define VIDOUT_LCD_ON (1 << 24) #define VIDOUT_IF_F_MASK (0x3 << 20) #define VIDOUT_RGB_IF (0x0 << 20) diff --git a/include/xen/arm/hypervisor.h b/include/xen/arm/hypervisor.h index 95251512e2c4..44b587b49904 100644 --- a/include/xen/arm/hypervisor.h +++ b/include/xen/arm/hypervisor.h @@ -18,7 +18,7 @@ static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void) return PARAVIRT_LAZY_NONE; } -extern struct dma_map_ops *xen_dma_ops; +extern const struct dma_map_ops *xen_dma_ops; #ifdef CONFIG_XEN void __init xen_early_init(void); diff --git a/include/xen/interface/grant_table.h b/include/xen/interface/grant_table.h index 56806bc90c2f..7fb7112d667c 100644 --- a/include/xen/interface/grant_table.h +++ b/include/xen/interface/grant_table.h @@ -181,7 +181,7 @@ struct grant_entry_header { }; /* - * Version 2 of the grant entry structure, here is an union because three + * Version 2 of the grant entry structure, here is a union because three * different types are suppotted: full_page, sub_page and transitive. */ union grant_entry_v2 { |