diff options
author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2019-09-16 09:56:27 -0700 |
---|---|---|
committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2019-09-16 09:56:27 -0700 |
commit | 0898782247ae533d1f4e47a06bc5d4870931b284 (patch) | |
tree | 21f75cc590542a870f42350b9410fc0588f02b79 /include/linux | |
parent | 0c043d70d04711fe6c380df9065fdc44192c49bf (diff) | |
parent | 410f25de467ee94c1a577c6ee7370c37b376c17c (diff) |
Merge branch 'next' into for-linus
Prepare input updates for 5.4 merge window.
Diffstat (limited to 'include/linux')
370 files changed, 10129 insertions, 5446 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index d315d86844e4..9426b9aaed86 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -10,6 +10,7 @@ #include <linux/errno.h> #include <linux/ioport.h> /* for struct resource */ +#include <linux/irqdomain.h> #include <linux/resource_ext.h> #include <linux/device.h> #include <linux/property.h> @@ -314,10 +315,19 @@ int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi); void acpi_set_irq_model(enum acpi_irq_model_id model, struct fwnode_handle *fwnode); +struct irq_domain *acpi_irq_create_hierarchy(unsigned int flags, + unsigned int size, + struct fwnode_handle *fwnode, + const struct irq_domain_ops *ops, + void *host_data); + #ifdef CONFIG_X86_IO_APIC extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity); #else -#define acpi_get_override_irq(gsi, trigger, polarity) (-1) +static inline int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity) +{ + return -1; +} #endif /* * This function undoes the effect of one call to acpi_register_gsi(). @@ -367,6 +377,7 @@ extern acpi_status wmi_install_notify_handler(const char *guid, extern acpi_status wmi_remove_notify_handler(const char *guid); extern acpi_status wmi_get_event_data(u32 event, struct acpi_buffer *out); extern bool wmi_has_guid(const char *guid); +extern char *wmi_get_acpi_device_uid(const char *guid); #endif /* CONFIG_ACPI_WMI */ @@ -913,31 +924,21 @@ static inline int acpi_dev_pm_attach(struct device *dev, bool power_on) #endif #if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP) -int acpi_dev_suspend_late(struct device *dev); int acpi_subsys_prepare(struct device *dev); void acpi_subsys_complete(struct device *dev); int acpi_subsys_suspend_late(struct device *dev); int acpi_subsys_suspend_noirq(struct device *dev); -int acpi_subsys_resume_noirq(struct device *dev); -int acpi_subsys_resume_early(struct device *dev); int acpi_subsys_suspend(struct device *dev); int acpi_subsys_freeze(struct device *dev); -int acpi_subsys_freeze_late(struct device *dev); -int acpi_subsys_freeze_noirq(struct device *dev); -int acpi_subsys_thaw_noirq(struct device *dev); +int acpi_subsys_poweroff(struct device *dev); #else -static inline int acpi_dev_resume_early(struct device *dev) { return 0; } static inline int acpi_subsys_prepare(struct device *dev) { return 0; } static inline void acpi_subsys_complete(struct device *dev) {} static inline int acpi_subsys_suspend_late(struct device *dev) { return 0; } static inline int acpi_subsys_suspend_noirq(struct device *dev) { return 0; } -static inline int acpi_subsys_resume_noirq(struct device *dev) { return 0; } -static inline int acpi_subsys_resume_early(struct device *dev) { return 0; } static inline int acpi_subsys_suspend(struct device *dev) { return 0; } static inline int acpi_subsys_freeze(struct device *dev) { return 0; } -static inline int acpi_subsys_freeze_late(struct device *dev) { return 0; } -static inline int acpi_subsys_freeze_noirq(struct device *dev) { return 0; } -static inline int acpi_subsys_thaw_noirq(struct device *dev) { return 0; } +static inline int acpi_subsys_poweroff(struct device *dev) { return 0; } #endif #ifdef CONFIG_ACPI @@ -1303,6 +1304,7 @@ static inline int lpit_read_residency_count_address(u64 *address) #ifdef CONFIG_ACPI_PPTT int find_acpi_cpu_topology(unsigned int cpu, int level); int find_acpi_cpu_topology_package(unsigned int cpu); +int find_acpi_cpu_topology_hetero_id(unsigned int cpu); int find_acpi_cpu_cache_topology(unsigned int cpu, int level); #else static inline int find_acpi_cpu_topology(unsigned int cpu, int level) @@ -1313,6 +1315,10 @@ static inline int find_acpi_cpu_topology_package(unsigned int cpu) { return -EINVAL; } +static inline int find_acpi_cpu_topology_hetero_id(unsigned int cpu) +{ + return -EINVAL; +} static inline int find_acpi_cpu_cache_topology(unsigned int cpu, int level) { return -EINVAL; diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h index d9bdc1a7f4e7..1cfe05ea1d89 100644 --- a/include/linux/arch_topology.h +++ b/include/linux/arch_topology.h @@ -18,7 +18,7 @@ DECLARE_PER_CPU(unsigned long, cpu_scale); struct sched_domain; static inline -unsigned long topology_get_cpu_scale(struct sched_domain *sd, int cpu) +unsigned long topology_get_cpu_scale(int cpu) { return per_cpu(cpu_scale, cpu); } diff --git a/include/linux/audit.h b/include/linux/audit.h index 3a4f2415bb7c..97d0925454df 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -182,6 +182,9 @@ static inline unsigned int audit_get_sessionid(struct task_struct *tsk) } extern u32 audit_enabled; + +extern int audit_signal_info(int sig, struct task_struct *t); + #else /* CONFIG_AUDIT */ static inline __printf(4, 5) void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type, @@ -235,6 +238,12 @@ static inline unsigned int audit_get_sessionid(struct task_struct *tsk) } #define audit_enabled AUDIT_OFF + +static inline int audit_signal_info(int sig, struct task_struct *t) +{ + return 0; +} + #endif /* CONFIG_AUDIT */ #ifdef CONFIG_AUDIT_COMPAT_GENERIC diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h index 191621ff7594..ca956b672ac0 100644 --- a/include/linux/avf/virtchnl.h +++ b/include/linux/avf/virtchnl.h @@ -61,12 +61,14 @@ enum virtchnl_status_code { #define VIRTCHNL_ERR_PARAM VIRTCHNL_STATUS_ERR_PARAM #define VIRTCHNL_STATUS_NOT_SUPPORTED VIRTCHNL_STATUS_ERR_NOT_SUPPORTED +#define VIRTCHNL_LINK_SPEED_2_5GB_SHIFT 0x0 #define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1 #define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2 #define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3 #define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4 #define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5 #define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6 +#define VIRTCHNL_LINK_SPEED_5GB_SHIFT 0x7 enum virtchnl_link_speed { VIRTCHNL_LINK_SPEED_UNKNOWN = 0, @@ -76,6 +78,8 @@ enum virtchnl_link_speed { VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT), VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT), VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT), + VIRTCHNL_LINK_SPEED_2_5GB = BIT(VIRTCHNL_LINK_SPEED_2_5GB_SHIFT), + VIRTCHNL_LINK_SPEED_5GB = BIT(VIRTCHNL_LINK_SPEED_5GB_SHIFT), }; /* for hsplit_0 field of Rx HMC context */ diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index 07e02d6df5ad..6a1a8a314d85 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h @@ -203,7 +203,6 @@ struct backing_dev_info { #ifdef CONFIG_DEBUG_FS struct dentry *debug_dir; - struct dentry *debug_stats; #endif }; diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index f9b029180241..35b31d176f74 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -48,6 +48,7 @@ extern spinlock_t bdi_lock; extern struct list_head bdi_list; extern struct workqueue_struct *bdi_wq; +extern struct workqueue_struct *bdi_async_bio_wq; static inline bool wb_has_dirty_io(struct bdi_writeback *wb) { diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h index f31521dcb09a..338aa27e4773 100644 --- a/include/linux/balloon_compaction.h +++ b/include/linux/balloon_compaction.h @@ -64,6 +64,10 @@ extern struct page *balloon_page_alloc(void); extern void balloon_page_enqueue(struct balloon_dev_info *b_dev_info, struct page *page); extern struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info); +extern size_t balloon_page_list_enqueue(struct balloon_dev_info *b_dev_info, + struct list_head *pages); +extern size_t balloon_page_list_dequeue(struct balloon_dev_info *b_dev_info, + struct list_head *pages, size_t n_req_pages); static inline void balloon_devinfo_init(struct balloon_dev_info *balloon) { diff --git a/include/linux/bio.h b/include/linux/bio.h index f87abaa898f0..3cdb84cdc488 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -102,9 +102,23 @@ static inline void *bio_data(struct bio *bio) return NULL; } -static inline bool bio_full(struct bio *bio) +/** + * bio_full - check if the bio is full + * @bio: bio to check + * @len: length of one segment to be added + * + * Return true if @bio is full and one segment with @len bytes can't be + * added to the bio, otherwise return false + */ +static inline bool bio_full(struct bio *bio, unsigned len) { - return bio->bi_vcnt >= bio->bi_max_vecs; + if (bio->bi_vcnt >= bio->bi_max_vecs) + return true; + + if (bio->bi_iter.bi_size > UINT_MAX - len) + return true; + + return false; } static inline bool bio_next_segment(const struct bio *bio, @@ -408,7 +422,6 @@ static inline void bio_wouldblock_error(struct bio *bio) } struct request_queue; -extern int bio_phys_segments(struct request_queue *, struct bio *); extern int submit_bio_wait(struct bio *bio); extern void bio_advance(struct bio *, unsigned); @@ -427,6 +440,7 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page, void __bio_add_page(struct bio *bio, struct page *page, unsigned int len, unsigned int off); int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter); +void bio_release_pages(struct bio *bio, bool mark_dirty); struct rq_map_data; extern struct bio *bio_map_user_iov(struct request_queue *, struct iov_iter *, gfp_t); @@ -444,17 +458,6 @@ void generic_end_io_acct(struct request_queue *q, int op, struct hd_struct *part, unsigned long start_time); -#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE -# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" -#endif -#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE -extern void bio_flush_dcache_pages(struct bio *bi); -#else -static inline void bio_flush_dcache_pages(struct bio *bi) -{ -} -#endif - extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, struct bio *src, struct bvec_iter *src_iter); extern void bio_copy_data(struct bio *dst, struct bio *src); diff --git a/include/linux/bits.h b/include/linux/bits.h index 2b7b532c1d51..669d69441a62 100644 --- a/include/linux/bits.h +++ b/include/linux/bits.h @@ -1,13 +1,15 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_BITS_H #define __LINUX_BITS_H + +#include <linux/const.h> #include <asm/bitsperlong.h> -#define BIT(nr) (1UL << (nr)) -#define BIT_ULL(nr) (1ULL << (nr)) -#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) +#define BIT(nr) (UL(1) << (nr)) +#define BIT_ULL(nr) (ULL(1) << (nr)) +#define BIT_MASK(nr) (UL(1) << ((nr) % BITS_PER_LONG)) #define BIT_WORD(nr) ((nr) / BITS_PER_LONG) -#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG)) +#define BIT_ULL_MASK(nr) (ULL(1) << ((nr) % BITS_PER_LONG_LONG)) #define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG) #define BITS_PER_BYTE 8 @@ -17,10 +19,11 @@ * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000. */ #define GENMASK(h, l) \ - (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) + (((~UL(0)) - (UL(1) << (l)) + 1) & \ + (~UL(0) >> (BITS_PER_LONG - 1 - (h)))) #define GENMASK_ULL(h, l) \ - (((~0ULL) - (1ULL << (l)) + 1) & \ - (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) + (((~ULL(0)) - (ULL(1) << (l)) + 1) & \ + (~ULL(0) >> (BITS_PER_LONG_LONG - 1 - (h)))) #endif /* __LINUX_BITS_H */ diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index 76c61318fda5..12811091fd50 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -63,19 +63,17 @@ struct blkcg { /* * blkg_[rw]stat->aux_cnt is excluded for local stats but included for - * recursive. Used to carry stats of dead children, and, for blkg_rwstat, - * to carry result values from read and sum operations. + * recursive. Used to carry stats of dead children. */ -struct blkg_stat { - struct percpu_counter cpu_cnt; - atomic64_t aux_cnt; -}; - struct blkg_rwstat { struct percpu_counter cpu_cnt[BLKG_RWSTAT_NR]; atomic64_t aux_cnt[BLKG_RWSTAT_NR]; }; +struct blkg_rwstat_sample { + u64 cnt[BLKG_RWSTAT_NR]; +}; + /* * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a * request_queue (q). This is used by blkcg policies which need to track @@ -134,13 +132,17 @@ struct blkcg_gq { struct blkg_policy_data *pd[BLKCG_MAX_POLS]; - struct rcu_head rcu_head; + spinlock_t async_bio_lock; + struct bio_list async_bios; + struct work_struct async_bio_work; atomic_t use_delay; atomic64_t delay_nsec; atomic64_t delay_start; u64 last_delay; int last_use; + + struct rcu_head rcu_head; }; typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp); @@ -179,6 +181,7 @@ struct blkcg_policy { extern struct blkcg blkcg_root; extern struct cgroup_subsys_state * const blkcg_root_css; +extern bool blkcg_debug_stats; struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, struct request_queue *q, bool update_hint); @@ -198,6 +201,13 @@ int blkcg_activate_policy(struct request_queue *q, void blkcg_deactivate_policy(struct request_queue *q, const struct blkcg_policy *pol); +static inline u64 blkg_rwstat_read_counter(struct blkg_rwstat *rwstat, + unsigned int idx) +{ + return atomic64_read(&rwstat->aux_cnt[idx]) + + percpu_counter_sum_positive(&rwstat->cpu_cnt[idx]); +} + const char *blkg_dev_name(struct blkcg_gq *blkg); void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, u64 (*prfill)(struct seq_file *, @@ -206,8 +216,7 @@ void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, bool show_total); u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v); u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, - const struct blkg_rwstat *rwstat); -u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off); + const struct blkg_rwstat_sample *rwstat); u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, int off); int blkg_print_stat_bytes(struct seq_file *sf, void *v); @@ -215,10 +224,8 @@ int blkg_print_stat_ios(struct seq_file *sf, void *v); int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v); int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v); -u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg, - struct blkcg_policy *pol, int off); -struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, - struct blkcg_policy *pol, int off); +void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol, + int off, struct blkg_rwstat_sample *sum); struct blkg_conf_ctx { struct gendisk *disk; @@ -569,69 +576,6 @@ static inline void blkg_put(struct blkcg_gq *blkg) if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ (p_blkg)->q, false))) -static inline int blkg_stat_init(struct blkg_stat *stat, gfp_t gfp) -{ - int ret; - - ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp); - if (ret) - return ret; - - atomic64_set(&stat->aux_cnt, 0); - return 0; -} - -static inline void blkg_stat_exit(struct blkg_stat *stat) -{ - percpu_counter_destroy(&stat->cpu_cnt); -} - -/** - * blkg_stat_add - add a value to a blkg_stat - * @stat: target blkg_stat - * @val: value to add - * - * Add @val to @stat. The caller must ensure that IRQ on the same CPU - * don't re-enter this function for the same counter. - */ -static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val) -{ - percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH); -} - -/** - * blkg_stat_read - read the current value of a blkg_stat - * @stat: blkg_stat to read - */ -static inline uint64_t blkg_stat_read(struct blkg_stat *stat) -{ - return percpu_counter_sum_positive(&stat->cpu_cnt); -} - -/** - * blkg_stat_reset - reset a blkg_stat - * @stat: blkg_stat to reset - */ -static inline void blkg_stat_reset(struct blkg_stat *stat) -{ - percpu_counter_set(&stat->cpu_cnt, 0); - atomic64_set(&stat->aux_cnt, 0); -} - -/** - * blkg_stat_add_aux - add a blkg_stat into another's aux count - * @to: the destination blkg_stat - * @from: the source - * - * Add @from's count including the aux one to @to's aux count. - */ -static inline void blkg_stat_add_aux(struct blkg_stat *to, - struct blkg_stat *from) -{ - atomic64_add(blkg_stat_read(from) + atomic64_read(&from->aux_cnt), - &to->aux_cnt); -} - static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp) { int i, ret; @@ -693,15 +637,14 @@ static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat, * * Read the current snapshot of @rwstat and return it in the aux counts. */ -static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat) +static inline void blkg_rwstat_read(struct blkg_rwstat *rwstat, + struct blkg_rwstat_sample *result) { - struct blkg_rwstat result; int i; for (i = 0; i < BLKG_RWSTAT_NR; i++) - atomic64_set(&result.aux_cnt[i], - percpu_counter_sum_positive(&rwstat->cpu_cnt[i])); - return result; + result->cnt[i] = + percpu_counter_sum_positive(&rwstat->cpu_cnt[i]); } /** @@ -714,10 +657,10 @@ static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat) */ static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat) { - struct blkg_rwstat tmp = blkg_rwstat_read(rwstat); + struct blkg_rwstat_sample tmp = { }; - return atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) + - atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]); + blkg_rwstat_read(rwstat, &tmp); + return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE]; } /** @@ -763,6 +706,15 @@ static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg struct bio *bio) { return false; } #endif +bool __blkcg_punt_bio_submit(struct bio *bio); + +static inline bool blkcg_punt_bio_submit(struct bio *bio) +{ + if (bio->bi_opf & REQ_CGROUP_PUNT) + return __blkcg_punt_bio_submit(bio); + else + return false; +} static inline void blkcg_bio_issue_init(struct bio *bio) { @@ -910,6 +862,7 @@ static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; } static inline void blkg_get(struct blkcg_gq *blkg) { } static inline void blkg_put(struct blkcg_gq *blkg) { } +static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; } static inline void blkcg_bio_issue_init(struct bio *bio) { } static inline bool blkcg_bio_issue_check(struct request_queue *q, struct bio *bio) { return true; } diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 15d1aa53d96c..3fa1fa59f9b2 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -306,7 +306,7 @@ void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs bool blk_mq_complete_request(struct request *rq); void blk_mq_complete_request_sync(struct request *rq); bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list, - struct bio *bio); + struct bio *bio, unsigned int nr_segs); bool blk_mq_queue_stopped(struct request_queue *q); void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 95202f80676c..1b1fa1557e68 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -154,11 +154,6 @@ struct bio { blk_status_t bi_status; u8 bi_partno; - /* Number of segments in this BIO after - * physical address coalescing is performed. - */ - unsigned int bi_phys_segments; - struct bvec_iter bi_iter; atomic_t __bi_remaining; @@ -210,7 +205,6 @@ struct bio { */ enum { BIO_NO_PAGE_REF, /* don't put release vec pages */ - BIO_SEG_VALID, /* bi_phys_segments valid */ BIO_CLONED, /* doesn't own data */ BIO_BOUNCED, /* bio is a bounce bio */ BIO_USER_MAPPED, /* contains user pages */ @@ -317,6 +311,15 @@ enum req_flag_bits { __REQ_RAHEAD, /* read ahead, can fail anytime */ __REQ_BACKGROUND, /* background IO */ __REQ_NOWAIT, /* Don't wait if request will block */ + __REQ_NOWAIT_INLINE, /* Return would-block error inline */ + /* + * When a shared kthread needs to issue a bio for a cgroup, doing + * so synchronously can lead to priority inversions as the kthread + * can be trapped waiting for that cgroup. CGROUP_PUNT flag makes + * submit_bio() punt the actual issuing to a dedicated per-blkcg + * work item to avoid such priority inversions. + */ + __REQ_CGROUP_PUNT, /* command specific flags for REQ_OP_WRITE_ZEROES: */ __REQ_NOUNMAP, /* do not free blocks when zeroing */ @@ -343,6 +346,9 @@ enum req_flag_bits { #define REQ_RAHEAD (1ULL << __REQ_RAHEAD) #define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND) #define REQ_NOWAIT (1ULL << __REQ_NOWAIT) +#define REQ_NOWAIT_INLINE (1ULL << __REQ_NOWAIT_INLINE) +#define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT) + #define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP) #define REQ_HIPRI (1ULL << __REQ_HIPRI) @@ -414,12 +420,13 @@ static inline int op_stat_group(unsigned int op) typedef unsigned int blk_qc_t; #define BLK_QC_T_NONE -1U +#define BLK_QC_T_EAGAIN -2U #define BLK_QC_T_SHIFT 16 #define BLK_QC_T_INTERNAL (1U << 31) static inline bool blk_qc_t_valid(blk_qc_t cookie) { - return cookie != BLK_QC_T_NONE; + return cookie != BLK_QC_T_NONE && cookie != BLK_QC_T_EAGAIN; } static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 592669bcc536..1ef375dafb1c 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -137,11 +137,11 @@ struct request { unsigned int cmd_flags; /* op and common flags */ req_flags_t rq_flags; + int tag; int internal_tag; /* the following two fields are internal, NEVER access directly */ unsigned int __data_len; /* total data len */ - int tag; sector_t __sector; /* sector cursor */ struct bio *bio; @@ -344,10 +344,15 @@ struct queue_limits { #ifdef CONFIG_BLK_DEV_ZONED +/* + * Maximum number of zones to report with a single report zones command. + */ +#define BLK_ZONED_REPORT_MAX_ZONES 8192U + extern unsigned int blkdev_nr_zones(struct block_device *bdev); extern int blkdev_report_zones(struct block_device *bdev, sector_t sector, struct blk_zone *zones, - unsigned int *nr_zones, gfp_t gfp_mask); + unsigned int *nr_zones); extern int blkdev_reset_zones(struct block_device *bdev, sector_t sectors, sector_t nr_sectors, gfp_t gfp_mask); extern int blk_revalidate_disk_zones(struct gendisk *disk); @@ -681,7 +686,7 @@ static inline bool blk_queue_is_zoned(struct request_queue *q) } } -static inline unsigned int blk_queue_zone_sectors(struct request_queue *q) +static inline sector_t blk_queue_zone_sectors(struct request_queue *q) { return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0; } @@ -828,7 +833,6 @@ extern void blk_unregister_queue(struct gendisk *disk); extern blk_qc_t generic_make_request(struct bio *bio); extern blk_qc_t direct_make_request(struct bio *bio); extern void blk_rq_init(struct request_queue *q, struct request *rq); -extern void blk_init_request_from_bio(struct request *req, struct bio *bio); extern void blk_put_request(struct request *); extern struct request *blk_get_request(struct request_queue *, unsigned int op, blk_mq_req_flags_t flags); @@ -842,7 +846,6 @@ extern blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq); extern int blk_rq_append_bio(struct request *rq, struct bio **bio); extern void blk_queue_split(struct request_queue *, struct bio **); -extern void blk_recount_segments(struct request_queue *, struct bio *); extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t, unsigned int, void __user *); @@ -867,6 +870,9 @@ extern void blk_execute_rq(struct request_queue *, struct gendisk *, extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, struct request *, int, rq_end_io_fn *); +/* Helper to convert REQ_OP_XXX to its string format XXX */ +extern const char *blk_op_str(unsigned int op); + int blk_status_to_errno(blk_status_t status); blk_status_t errno_to_blk_status(int errno); @@ -1026,21 +1032,9 @@ void blk_steal_bios(struct bio_list *list, struct request *rq); * * blk_update_request() completes given number of bytes and updates * the request without completing it. - * - * blk_end_request() and friends. __blk_end_request() must be called - * with the request queue spinlock acquired. - * - * Several drivers define their own end_request and call - * blk_end_request() for parts of the original function. - * This prevents code duplication in drivers. */ extern bool blk_update_request(struct request *rq, blk_status_t error, unsigned int nr_bytes); -extern void blk_end_request_all(struct request *rq, blk_status_t error); -extern bool __blk_end_request(struct request *rq, blk_status_t error, - unsigned int nr_bytes); -extern void __blk_end_request_all(struct request *rq, blk_status_t error); -extern bool __blk_end_request_cur(struct request *rq, blk_status_t error); extern void __blk_complete_request(struct request *); extern void blk_abort_request(struct request *); @@ -1429,7 +1423,7 @@ static inline bool bdev_is_zoned(struct block_device *bdev) return false; } -static inline unsigned int bdev_zone_sectors(struct block_device *bdev) +static inline sector_t bdev_zone_sectors(struct block_device *bdev) { struct request_queue *q = bdev_get_queue(bdev); @@ -1684,8 +1678,7 @@ struct block_device_operations { /* this callback is with swap_lock and sometimes page table lock held */ void (*swap_slot_free_notify) (struct block_device *, unsigned long); int (*report_zones)(struct gendisk *, sector_t sector, - struct blk_zone *zones, unsigned int *nr_zones, - gfp_t gfp_mask); + struct blk_zone *zones, unsigned int *nr_zones); struct module *owner; const struct pr_ops *pr_ops; }; diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index a7f7a98ec39d..169fd25f6bc2 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -6,6 +6,7 @@ #include <linux/errno.h> #include <linux/jump_label.h> #include <linux/percpu.h> +#include <linux/percpu-refcount.h> #include <linux/rbtree.h> #include <uapi/linux/bpf.h> @@ -71,11 +72,17 @@ struct cgroup_bpf { u32 flags[MAX_BPF_ATTACH_TYPE]; /* temp storage for effective prog array used by prog_attach/detach */ - struct bpf_prog_array __rcu *inactive; + struct bpf_prog_array *inactive; + + /* reference counter used to detach bpf programs after cgroup removal */ + struct percpu_ref refcnt; + + /* cgroup_bpf is released using a work queue */ + struct work_struct release_work; }; -void cgroup_bpf_put(struct cgroup *cgrp); int cgroup_bpf_inherit(struct cgroup *cgrp); +void cgroup_bpf_offline(struct cgroup *cgrp); int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, enum bpf_attach_type type, u32 flags); @@ -117,6 +124,14 @@ int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, loff_t *ppos, void **new_buf, enum bpf_attach_type type); +int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level, + int *optname, char __user *optval, + int *optlen, char **kernel_optval); +int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, + int optname, char __user *optval, + int __user *optlen, int max_optlen, + int retval); + static inline enum bpf_cgroup_storage_type cgroup_storage_type( struct bpf_map *map) { @@ -279,6 +294,38 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, __ret; \ }) +#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \ + kernel_optval) \ +({ \ + int __ret = 0; \ + if (cgroup_bpf_enabled) \ + __ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \ + optname, optval, \ + optlen, \ + kernel_optval); \ + __ret; \ +}) + +#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \ +({ \ + int __ret = 0; \ + if (cgroup_bpf_enabled) \ + get_user(__ret, optlen); \ + __ret; \ +}) + +#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen, \ + max_optlen, retval) \ +({ \ + int __ret = retval; \ + if (cgroup_bpf_enabled) \ + __ret = __cgroup_bpf_run_filter_getsockopt(sock, level, \ + optname, optval, \ + optlen, max_optlen, \ + retval); \ + __ret; \ +}) + int cgroup_bpf_prog_attach(const union bpf_attr *attr, enum bpf_prog_type ptype, struct bpf_prog *prog); int cgroup_bpf_prog_detach(const union bpf_attr *attr, @@ -289,8 +336,8 @@ int cgroup_bpf_prog_query(const union bpf_attr *attr, struct bpf_prog; struct cgroup_bpf {}; -static inline void cgroup_bpf_put(struct cgroup *cgrp) {} static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; } +static inline void cgroup_bpf_offline(struct cgroup *cgrp) {} static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr, enum bpf_prog_type ptype, @@ -350,6 +397,11 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map, #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; }) #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; }) #define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos,nbuf) ({ 0; }) +#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \ + optlen, max_optlen, retval) ({ retval; }) +#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \ + kernel_optval) ({ 0; }) #define for_each_cgroup_storage_type(stype) for (; false; ) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index b92ef9f73e42..18f4cc2c6acd 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -63,6 +63,11 @@ struct bpf_map_ops { u64 imm, u32 *off); }; +struct bpf_map_memory { + u32 pages; + struct user_struct *user; +}; + struct bpf_map { /* The first two cachelines with read-mostly members of which some * are also accessed in fast-path (e.g. ops, max_entries). @@ -83,7 +88,7 @@ struct bpf_map { u32 btf_key_type_id; u32 btf_value_type_id; struct btf *btf; - u32 pages; + struct bpf_map_memory memory; bool unpriv_array; bool frozen; /* write-once */ /* 48 bytes hole */ @@ -91,8 +96,7 @@ struct bpf_map { /* The 3rd and 4th cacheline with misc members to avoid false sharing * particularly with refcounting. */ - struct user_struct *user ____cacheline_aligned; - atomic_t refcnt; + atomic_t refcnt ____cacheline_aligned; atomic_t usercnt; struct work_struct work; char name[BPF_OBJ_NAME_LEN]; @@ -273,6 +277,7 @@ enum bpf_reg_type { PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */ PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */ PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */ + PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */ }; /* The information passed from prog-specific *_is_valid_access @@ -367,6 +372,7 @@ struct bpf_prog_aux { u32 id; u32 func_cnt; /* used by non-func prog as the number of func progs */ u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */ + bool verifier_zext; /* Zero extensions has been inserted by verifier. */ bool offload_requested; struct bpf_prog **func; void *jit_data; /* JIT specific data. arch dependent */ @@ -510,17 +516,18 @@ struct bpf_prog_array { }; struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags); -void bpf_prog_array_free(struct bpf_prog_array __rcu *progs); -int bpf_prog_array_length(struct bpf_prog_array __rcu *progs); -int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs, +void bpf_prog_array_free(struct bpf_prog_array *progs); +int bpf_prog_array_length(struct bpf_prog_array *progs); +bool bpf_prog_array_is_empty(struct bpf_prog_array *array); +int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs, __u32 __user *prog_ids, u32 cnt); -void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs, +void bpf_prog_array_delete_safe(struct bpf_prog_array *progs, struct bpf_prog *old_prog); -int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array, +int bpf_prog_array_copy_info(struct bpf_prog_array *array, u32 *prog_ids, u32 request_cnt, u32 *prog_cnt); -int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array, +int bpf_prog_array_copy(struct bpf_prog_array *old_array, struct bpf_prog *exclude_prog, struct bpf_prog *include_prog, struct bpf_prog_array **new_array); @@ -548,6 +555,56 @@ _out: \ _ret; \ }) +/* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs + * so BPF programs can request cwr for TCP packets. + * + * Current cgroup skb programs can only return 0 or 1 (0 to drop the + * packet. This macro changes the behavior so the low order bit + * indicates whether the packet should be dropped (0) or not (1) + * and the next bit is a congestion notification bit. This could be + * used by TCP to call tcp_enter_cwr() + * + * Hence, new allowed return values of CGROUP EGRESS BPF programs are: + * 0: drop packet + * 1: keep packet + * 2: drop packet and cn + * 3: keep packet and cn + * + * This macro then converts it to one of the NET_XMIT or an error + * code that is then interpreted as drop packet (and no cn): + * 0: NET_XMIT_SUCCESS skb should be transmitted + * 1: NET_XMIT_DROP skb should be dropped and cn + * 2: NET_XMIT_CN skb should be transmitted and cn + * 3: -EPERM skb should be dropped + */ +#define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func) \ + ({ \ + struct bpf_prog_array_item *_item; \ + struct bpf_prog *_prog; \ + struct bpf_prog_array *_array; \ + u32 ret; \ + u32 _ret = 1; \ + u32 _cn = 0; \ + preempt_disable(); \ + rcu_read_lock(); \ + _array = rcu_dereference(array); \ + _item = &_array->items[0]; \ + while ((_prog = READ_ONCE(_item->prog))) { \ + bpf_cgroup_storage_set(_item->cgroup_storage); \ + ret = func(_prog, ctx); \ + _ret &= (ret & 1); \ + _cn |= (ret & 2); \ + _item++; \ + } \ + rcu_read_unlock(); \ + preempt_enable(); \ + if (_ret) \ + _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \ + else \ + _ret = (_cn ? NET_XMIT_DROP : -EPERM); \ + _ret; \ + }) + #define BPF_PROG_RUN_ARRAY(array, ctx, func) \ __BPF_PROG_RUN_ARRAY(array, ctx, func, false) @@ -592,9 +649,12 @@ struct bpf_map *__bpf_map_get(struct fd f); struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref); void bpf_map_put_with_uref(struct bpf_map *map); void bpf_map_put(struct bpf_map *map); -int bpf_map_precharge_memlock(u32 pages); int bpf_map_charge_memlock(struct bpf_map *map, u32 pages); void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages); +int bpf_map_charge_init(struct bpf_map_memory *mem, size_t size); +void bpf_map_charge_finish(struct bpf_map_memory *mem); +void bpf_map_charge_move(struct bpf_map_memory *dst, + struct bpf_map_memory *src); void *bpf_map_area_alloc(size_t size, int numa_node); void bpf_map_area_free(void *base); void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); @@ -992,6 +1052,7 @@ extern const struct bpf_func_proto bpf_spin_unlock_proto; extern const struct bpf_func_proto bpf_get_local_storage_proto; extern const struct bpf_func_proto bpf_strtol_proto; extern const struct bpf_func_proto bpf_strtoul_proto; +extern const struct bpf_func_proto bpf_tcp_sock_proto; /* Shared helpers among cBPF and eBPF. */ void bpf_user_rnd_init_once(void); @@ -1040,6 +1101,15 @@ u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, struct bpf_insn *insn_buf, struct bpf_prog *prog, u32 *target_size); + +bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type, + struct bpf_insn_access_aux *info); + +u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, + const struct bpf_insn *si, + struct bpf_insn *insn_buf, + struct bpf_prog *prog, + u32 *target_size); #else static inline bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type, @@ -1056,6 +1126,21 @@ static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, { return 0; } +static inline bool bpf_xdp_sock_is_valid_access(int off, int size, + enum bpf_access_type type, + struct bpf_insn_access_aux *info) +{ + return false; +} + +static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, + const struct bpf_insn *si, + struct bpf_insn *insn_buf, + struct bpf_prog *prog, + u32 *target_size) +{ + return 0; +} #endif /* CONFIG_INET */ #endif /* _LINUX_BPF_H */ diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index 5a9975678d6f..eec5aeeeaf92 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -30,6 +30,7 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, raw_tracepoint_writable) #ifdef CONFIG_CGROUP_BPF BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_DEVICE, cg_dev) BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SYSCTL, cg_sysctl) +BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCKOPT, cg_sockopt) #endif #ifdef CONFIG_BPF_LIRC_MODE2 BPF_PROG_TYPE(BPF_PROG_TYPE_LIRC_MODE2, lirc_mode2) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 519aafabc40c..5fe99f322b1c 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -33,9 +33,11 @@ */ enum bpf_reg_liveness { REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */ - REG_LIVE_READ, /* reg was read, so we're sensitive to initial value */ - REG_LIVE_WRITTEN, /* reg was written first, screening off later reads */ - REG_LIVE_DONE = 4, /* liveness won't be updating this register anymore */ + REG_LIVE_READ32 = 0x1, /* reg was read, so we're sensitive to initial value */ + REG_LIVE_READ64 = 0x2, /* likewise, but full 64-bit content matters */ + REG_LIVE_READ = REG_LIVE_READ32 | REG_LIVE_READ64, + REG_LIVE_WRITTEN = 0x4, /* reg was written first, screening off later reads */ + REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */ }; struct bpf_reg_state { @@ -128,7 +130,14 @@ struct bpf_reg_state { * pointing to bpf_func_state. */ u32 frameno; + /* Tracks subreg definition. The stored value is the insn_idx of the + * writing insn. This is safe because subreg_def is used before any insn + * patching which only happens after main verification finished. + */ + s32 subreg_def; enum bpf_reg_liveness live; + /* if (!precise && SCALAR_VALUE) min/max/tnum don't affect safety */ + bool precise; }; enum bpf_stack_slot_type { @@ -180,13 +189,77 @@ struct bpf_func_state { struct bpf_stack_state *stack; }; +struct bpf_idx_pair { + u32 prev_idx; + u32 idx; +}; + #define MAX_CALL_FRAMES 8 struct bpf_verifier_state { /* call stack tracking */ struct bpf_func_state *frame[MAX_CALL_FRAMES]; + struct bpf_verifier_state *parent; + /* + * 'branches' field is the number of branches left to explore: + * 0 - all possible paths from this state reached bpf_exit or + * were safely pruned + * 1 - at least one path is being explored. + * This state hasn't reached bpf_exit + * 2 - at least two paths are being explored. + * This state is an immediate parent of two children. + * One is fallthrough branch with branches==1 and another + * state is pushed into stack (to be explored later) also with + * branches==1. The parent of this state has branches==1. + * The verifier state tree connected via 'parent' pointer looks like: + * 1 + * 1 + * 2 -> 1 (first 'if' pushed into stack) + * 1 + * 2 -> 1 (second 'if' pushed into stack) + * 1 + * 1 + * 1 bpf_exit. + * + * Once do_check() reaches bpf_exit, it calls update_branch_counts() + * and the verifier state tree will look: + * 1 + * 1 + * 2 -> 1 (first 'if' pushed into stack) + * 1 + * 1 -> 1 (second 'if' pushed into stack) + * 0 + * 0 + * 0 bpf_exit. + * After pop_stack() the do_check() will resume at second 'if'. + * + * If is_state_visited() sees a state with branches > 0 it means + * there is a loop. If such state is exactly equal to the current state + * it's an infinite loop. Note states_equal() checks for states + * equvalency, so two states being 'states_equal' does not mean + * infinite loop. The exact comparison is provided by + * states_maybe_looping() function. It's a stronger pre-check and + * much faster than states_equal(). + * + * This algorithm may not find all possible infinite loops or + * loop iteration count may be too high. + * In such cases BPF_COMPLEXITY_LIMIT_INSNS limit kicks in. + */ + u32 branches; + u32 insn_idx; u32 curframe; u32 active_spin_lock; bool speculative; + + /* first and last insn idx of this verifier state */ + u32 first_insn_idx; + u32 last_insn_idx; + /* jmp history recorded from first to last. + * backtracking is using it to go from last to first. + * For most states jmp_history_cnt is [0-3]. + * For loops can go up to ~40. + */ + struct bpf_idx_pair *jmp_history; + u32 jmp_history_cnt; }; #define bpf_get_spilled_reg(slot, frame) \ @@ -229,7 +302,9 @@ struct bpf_insn_aux_data { int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ int sanitize_stack_off; /* stack slot to be cleared */ bool seen; /* this insn was processed by the verifier */ + bool zext_dst; /* this insn zero extends dst reg */ u8 alu_state; /* used in combination with alu_limit */ + bool prune_point; unsigned int orig_idx; /* original instruction index */ }; @@ -299,7 +374,9 @@ struct bpf_verifier_env { } cfg; u32 subprog_cnt; /* number of instructions analyzed by the verifier */ - u32 insn_processed; + u32 prev_insn_processed, insn_processed; + /* number of jmps, calls, exits analyzed so far */ + u32 prev_jmps_processed, jmps_processed; /* total verification time */ u64 verification_time; /* maximum number of verifier states kept in 'branching' instructions */ diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h index 70e19bc6cc9f..46b92cd61d0c 100644 --- a/include/linux/cacheinfo.h +++ b/include/linux/cacheinfo.h @@ -17,6 +17,8 @@ enum cache_type { CACHE_TYPE_UNIFIED = BIT(2), }; +extern unsigned int coherency_max_size; + /** * struct cacheinfo - represent a cache leaf node * @id: This cache's id. It is unique among caches with the same (type, level). diff --git a/include/linux/ccp.h b/include/linux/ccp.h index 55cb455cfcb0..a5dfbaf2470d 100644 --- a/include/linux/ccp.h +++ b/include/linux/ccp.h @@ -170,6 +170,8 @@ struct ccp_aes_engine { enum ccp_aes_mode mode; enum ccp_aes_action action; + u32 authsize; + struct scatterlist *key; u32 key_len; /* In bytes */ diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h index 65a38c4a02a1..39e6f4c57580 100644 --- a/include/linux/ceph/ceph_features.h +++ b/include/linux/ceph/ceph_features.h @@ -211,6 +211,7 @@ DEFINE_CEPH_FEATURE_DEPRECATED(63, 1, RESERVED_BROKEN, LUMINOUS) // client-facin CEPH_FEATURE_MON_STATEFUL_SUB | \ CEPH_FEATURE_CRUSH_TUNABLES5 | \ CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING | \ + CEPH_FEATURE_MSG_ADDR2 | \ CEPH_FEATURE_CEPHX_V2) #define CEPH_FEATURES_REQUIRED_DEFAULT 0 diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index 3ac0feaf2b5e..cb21c5cf12c3 100644 --- a/include/linux/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h @@ -682,7 +682,7 @@ extern const char *ceph_cap_op_name(int op); /* flags field in client cap messages (version >= 10) */ #define CEPH_CLIENT_CAPS_SYNC (1<<0) #define CEPH_CLIENT_CAPS_NO_CAPSNAP (1<<1) -#define CEPH_CLIENT_CAPS_PENDING_CAPSNAP (1<<2); +#define CEPH_CLIENT_CAPS_PENDING_CAPSNAP (1<<2) /* * caps message, used for capability callbacks, acks, requests, etc. diff --git a/include/linux/ceph/cls_lock_client.h b/include/linux/ceph/cls_lock_client.h index bea6c77d2093..17bc7584d1fe 100644 --- a/include/linux/ceph/cls_lock_client.h +++ b/include/linux/ceph/cls_lock_client.h @@ -52,4 +52,7 @@ int ceph_cls_lock_info(struct ceph_osd_client *osdc, char *lock_name, u8 *type, char **tag, struct ceph_locker **lockers, u32 *num_lockers); +int ceph_cls_assert_locked(struct ceph_osd_request *req, int which, + char *lock_name, u8 type, char *cookie, char *tag); + #endif diff --git a/include/linux/ceph/debugfs.h b/include/linux/ceph/debugfs.h index fa5f9b7f5dbb..cf5e840eec71 100644 --- a/include/linux/ceph/debugfs.h +++ b/include/linux/ceph/debugfs.h @@ -19,9 +19,9 @@ static const struct file_operations name##_fops = { \ }; /* debugfs.c */ -extern int ceph_debugfs_init(void); +extern void ceph_debugfs_init(void); extern void ceph_debugfs_cleanup(void); -extern int ceph_debugfs_client_init(struct ceph_client *client); +extern void ceph_debugfs_client_init(struct ceph_client *client); extern void ceph_debugfs_client_cleanup(struct ceph_client *client); #endif diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h index a6c2a48d42e0..450384fe487c 100644 --- a/include/linux/ceph/decode.h +++ b/include/linux/ceph/decode.h @@ -218,18 +218,27 @@ static inline void ceph_encode_timespec64(struct ceph_timespec *tv, /* * sockaddr_storage <-> ceph_sockaddr */ -static inline void ceph_encode_addr(struct ceph_entity_addr *a) +#define CEPH_ENTITY_ADDR_TYPE_NONE 0 +#define CEPH_ENTITY_ADDR_TYPE_LEGACY __cpu_to_le32(1) + +static inline void ceph_encode_banner_addr(struct ceph_entity_addr *a) { __be16 ss_family = htons(a->in_addr.ss_family); a->in_addr.ss_family = *(__u16 *)&ss_family; + + /* Banner addresses require TYPE_NONE */ + a->type = CEPH_ENTITY_ADDR_TYPE_NONE; } -static inline void ceph_decode_addr(struct ceph_entity_addr *a) +static inline void ceph_decode_banner_addr(struct ceph_entity_addr *a) { __be16 ss_family = *(__be16 *)&a->in_addr.ss_family; a->in_addr.ss_family = ntohs(ss_family); WARN_ON(a->in_addr.ss_family == 512); + a->type = CEPH_ENTITY_ADDR_TYPE_LEGACY; } +extern int ceph_decode_entity_addr(void **p, void *end, + struct ceph_entity_addr *addr); /* * encoders */ diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index 337d5049ff93..82156da3c650 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h @@ -84,11 +84,13 @@ struct ceph_options { #define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024) /* - * Handle the largest possible rbd object in one message. + * The largest possible rbd data object is 32M. + * The largest possible rbd object map object is 64M. + * * There is no limit on the size of cephfs objects, but it has to obey * rsize and wsize mount options anyway. */ -#define CEPH_MSG_MAX_DATA_LEN (32*1024*1024) +#define CEPH_MSG_MAX_DATA_LEN (64*1024*1024) #define CEPH_AUTH_NAME_DEFAULT "guest" @@ -299,10 +301,6 @@ int ceph_wait_for_latest_osdmap(struct ceph_client *client, /* pagevec.c */ extern void ceph_release_page_vector(struct page **pages, int num_pages); - -extern struct page **ceph_get_direct_page_vector(const void __user *data, - int num_pages, - bool write_page); extern void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty); extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags); diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h index 3a4688af7455..b4d134d3312a 100644 --- a/include/linux/ceph/mon_client.h +++ b/include/linux/ceph/mon_client.h @@ -104,7 +104,6 @@ struct ceph_mon_client { #endif }; -extern struct ceph_monmap *ceph_monmap_decode(void *p, void *end); extern int ceph_monmap_contains(struct ceph_monmap *m, struct ceph_entity_addr *addr); diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 2294f963dab7..ad7fe5d10dcd 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -198,9 +198,9 @@ struct ceph_osd_request { bool r_mempool; struct completion r_completion; /* private to osd_client.c */ ceph_osdc_callback_t r_callback; - struct list_head r_unsafe_item; struct inode *r_inode; /* for use by callbacks */ + struct list_head r_private_item; /* ditto */ void *r_priv; /* ditto */ /* set by submitter */ @@ -389,6 +389,14 @@ extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc, void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb); void ceph_osdc_abort_requests(struct ceph_osd_client *osdc, int err); +#define osd_req_op_data(oreq, whch, typ, fld) \ +({ \ + struct ceph_osd_request *__oreq = (oreq); \ + unsigned int __whch = (whch); \ + BUG_ON(__whch >= __oreq->r_num_ops); \ + &__oreq->r_ops[__whch].typ.fld; \ +}) + extern void osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode, u32 flags); @@ -497,7 +505,7 @@ int ceph_osdc_call(struct ceph_osd_client *osdc, const char *class, const char *method, unsigned int flags, struct page *req_page, size_t req_len, - struct page *resp_page, size_t *resp_len); + struct page **resp_pages, size_t *resp_len); extern int ceph_osdc_readpages(struct ceph_osd_client *osdc, struct ceph_vino vino, diff --git a/include/linux/ceph/striper.h b/include/linux/ceph/striper.h index cbd0d24b7148..3486636c0e6e 100644 --- a/include/linux/ceph/striper.h +++ b/include/linux/ceph/striper.h @@ -66,4 +66,6 @@ int ceph_extent_to_file(struct ceph_file_layout *l, struct ceph_file_extent **file_extents, u32 *num_file_extents); +u64 ceph_get_num_objects(struct ceph_file_layout *l, u64 size); + #endif diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index b4e766e93f6e..430e219e3aba 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -624,7 +624,7 @@ struct cftype { /* * Control Group subsystem type. - * See Documentation/cgroup-v1/cgroups.txt for details + * See Documentation/admin-guide/cgroup-v1/cgroups.rst for details */ struct cgroup_subsys { struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css); diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 0297f930a56e..f6b048902d6c 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -131,6 +131,8 @@ void cgroup_free(struct task_struct *p); int cgroup_init_early(void); int cgroup_init(void); +int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v); + /* * Iteration helpers and macros. */ @@ -697,6 +699,7 @@ void cgroup_path_from_kernfs_id(const union kernfs_node_id *id, struct cgroup_subsys_state; struct cgroup; +static inline void css_get(struct cgroup_subsys_state *css) {} static inline void css_put(struct cgroup_subsys_state *css) {} static inline int cgroup_attach_task_all(struct task_struct *from, struct task_struct *t) { return 0; } @@ -934,4 +937,22 @@ static inline bool cgroup_task_frozen(struct task_struct *task) #endif /* !CONFIG_CGROUPS */ +#ifdef CONFIG_CGROUP_BPF +static inline void cgroup_bpf_get(struct cgroup *cgrp) +{ + percpu_ref_get(&cgrp->bpf.refcnt); +} + +static inline void cgroup_bpf_put(struct cgroup *cgrp) +{ + percpu_ref_put(&cgrp->bpf.refcnt); +} + +#else /* CONFIG_CGROUP_BPF */ + +static inline void cgroup_bpf_get(struct cgroup *cgrp) {} +static inline void cgroup_bpf_put(struct cgroup *cgrp) {} + +#endif /* CONFIG_CGROUP_BPF */ + #endif /* _LINUX_CGROUP_H */ diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index bb6118f79784..2ae7604783dd 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -9,8 +9,6 @@ #include <linux/of.h> #include <linux/of_clk.h> -#ifdef CONFIG_COMMON_CLK - /* * flags used across common struct clk. these flags should only affect the * top-level framework. custom flags for dealing with hardware specifics @@ -807,7 +805,14 @@ void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw); /* helper functions */ const char *__clk_get_name(const struct clk *clk); const char *clk_hw_get_name(const struct clk_hw *hw); +#ifdef CONFIG_COMMON_CLK struct clk_hw *__clk_get_hw(struct clk *clk); +#else +static inline struct clk_hw *__clk_get_hw(struct clk *clk) +{ + return (struct clk_hw *)clk; +} +#endif unsigned int clk_hw_get_num_parents(const struct clk_hw *hw); struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw); struct clk_hw *clk_hw_get_parent_by_index(const struct clk_hw *hw, @@ -867,8 +872,6 @@ static inline long divider_ro_round_rate(struct clk_hw *hw, unsigned long rate, */ unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate); -struct of_device_id; - struct clk_onecell_data { struct clk **clks; unsigned int clk_num; @@ -879,8 +882,6 @@ struct clk_hw_onecell_data { struct clk_hw *hws[]; }; -extern struct of_device_id __clk_of_table; - #define CLK_OF_DECLARE(name, compat, fn) OF_DECLARE_1(clk, name, compat, fn) /* @@ -904,6 +905,40 @@ extern struct of_device_id __clk_of_table; .ops = _ops, \ }) +#define CLK_HW_INIT_HW(_name, _parent, _ops, _flags) \ + (&(struct clk_init_data) { \ + .flags = _flags, \ + .name = _name, \ + .parent_hws = (const struct clk_hw*[]) { _parent }, \ + .num_parents = 1, \ + .ops = _ops, \ + }) + +/* + * This macro is intended for drivers to be able to share the otherwise + * individual struct clk_hw[] compound literals created by the compiler + * when using CLK_HW_INIT_HW. It does NOT support multiple parents. + */ +#define CLK_HW_INIT_HWS(_name, _parent, _ops, _flags) \ + (&(struct clk_init_data) { \ + .flags = _flags, \ + .name = _name, \ + .parent_hws = _parent, \ + .num_parents = 1, \ + .ops = _ops, \ + }) + +#define CLK_HW_INIT_FW_NAME(_name, _parent, _ops, _flags) \ + (&(struct clk_init_data) { \ + .flags = _flags, \ + .name = _name, \ + .parent_data = (const struct clk_parent_data[]) { \ + { .fw_name = _parent }, \ + }, \ + .num_parents = 1, \ + .ops = _ops, \ + }) + #define CLK_HW_INIT_PARENTS(_name, _parents, _ops, _flags) \ (&(struct clk_init_data) { \ .flags = _flags, \ @@ -913,6 +948,24 @@ extern struct of_device_id __clk_of_table; .ops = _ops, \ }) +#define CLK_HW_INIT_PARENTS_HW(_name, _parents, _ops, _flags) \ + (&(struct clk_init_data) { \ + .flags = _flags, \ + .name = _name, \ + .parent_hws = _parents, \ + .num_parents = ARRAY_SIZE(_parents), \ + .ops = _ops, \ + }) + +#define CLK_HW_INIT_PARENTS_DATA(_name, _parents, _ops, _flags) \ + (&(struct clk_init_data) { \ + .flags = _flags, \ + .name = _name, \ + .parent_data = _parents, \ + .num_parents = ARRAY_SIZE(_parents), \ + .ops = _ops, \ + }) + #define CLK_HW_INIT_NO_PARENT(_name, _ops, _flags) \ (&(struct clk_init_data) { \ .flags = _flags, \ @@ -933,6 +986,43 @@ extern struct of_device_id __clk_of_table; _flags), \ } +#define CLK_FIXED_FACTOR_HW(_struct, _name, _parent, \ + _div, _mult, _flags) \ + struct clk_fixed_factor _struct = { \ + .div = _div, \ + .mult = _mult, \ + .hw.init = CLK_HW_INIT_HW(_name, \ + _parent, \ + &clk_fixed_factor_ops, \ + _flags), \ + } + +/* + * This macro allows the driver to reuse the _parent array for multiple + * fixed factor clk declarations. + */ +#define CLK_FIXED_FACTOR_HWS(_struct, _name, _parent, \ + _div, _mult, _flags) \ + struct clk_fixed_factor _struct = { \ + .div = _div, \ + .mult = _mult, \ + .hw.init = CLK_HW_INIT_HWS(_name, \ + _parent, \ + &clk_fixed_factor_ops, \ + _flags), \ + } + +#define CLK_FIXED_FACTOR_FW_NAME(_struct, _name, _parent, \ + _div, _mult, _flags) \ + struct clk_fixed_factor _struct = { \ + .div = _div, \ + .mult = _mult, \ + .hw.init = CLK_HW_INIT_FW_NAME(_name, \ + _parent, \ + &clk_fixed_factor_ops, \ + _flags), \ + } + #ifdef CONFIG_OF int of_clk_add_provider(struct device_node *np, struct clk *(*clk_src_get)(struct of_phandle_args *args, @@ -1019,5 +1109,4 @@ static inline int of_clk_detect_critical(struct device_node *np, int index, void clk_gate_restore_context(struct clk_hw *hw); -#endif /* CONFIG_COMMON_CLK */ #endif /* CLK_PROVIDER_H */ diff --git a/include/linux/clk.h b/include/linux/clk.h index c8e3325868bd..853a8f181394 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -329,6 +329,19 @@ int __must_check clk_bulk_get(struct device *dev, int num_clks, */ int __must_check clk_bulk_get_all(struct device *dev, struct clk_bulk_data **clks); + +/** + * clk_bulk_get_optional - lookup and obtain a number of references to clock producer + * @dev: device for clock "consumer" + * @num_clks: the number of clk_bulk_data + * @clks: the clk_bulk_data table of consumer + * + * Behaves the same as clk_bulk_get() except where there is no clock producer. + * In this case, instead of returning -ENOENT, the function returns 0 and + * NULL for a clk for which a clock producer could not be determined. + */ +int __must_check clk_bulk_get_optional(struct device *dev, int num_clks, + struct clk_bulk_data *clks); /** * devm_clk_bulk_get - managed get multiple clk consumers * @dev: device for clock "consumer" @@ -344,6 +357,29 @@ int __must_check clk_bulk_get_all(struct device *dev, int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, struct clk_bulk_data *clks); /** + * devm_clk_bulk_get_optional - managed get multiple optional consumer clocks + * @dev: device for clock "consumer" + * @num_clks: the number of clk_bulk_data + * @clks: pointer to the clk_bulk_data table of consumer + * + * Behaves the same as devm_clk_bulk_get() except where there is no clock + * producer. In this case, instead of returning -ENOENT, the function returns + * NULL for given clk. It is assumed all clocks in clk_bulk_data are optional. + * + * Returns 0 if all clocks specified in clk_bulk_data table are obtained + * successfully or for any clk there was no clk provider available, otherwise + * returns valid IS_ERR() condition containing errno. + * The implementation uses @dev and @clk_bulk_data.id to determine the + * clock consumer, and thereby the clock producer. + * The clock returned is stored in each @clk_bulk_data.clk field. + * + * Drivers must assume that the clock source is not enabled. + * + * clk_bulk_get should not be called from within interrupt context. + */ +int __must_check devm_clk_bulk_get_optional(struct device *dev, int num_clks, + struct clk_bulk_data *clks); +/** * devm_clk_bulk_get_all - managed get multiple clk consumers * @dev: device for clock "consumer" * @clks: pointer to the clk_bulk_data table of consumer @@ -715,6 +751,12 @@ static inline int __must_check clk_bulk_get(struct device *dev, int num_clks, return 0; } +static inline int __must_check clk_bulk_get_optional(struct device *dev, + int num_clks, struct clk_bulk_data *clks) +{ + return 0; +} + static inline int __must_check clk_bulk_get_all(struct device *dev, struct clk_bulk_data **clks) { @@ -738,6 +780,12 @@ static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clk return 0; } +static inline int __must_check devm_clk_bulk_get_optional(struct device *dev, + int num_clks, struct clk_bulk_data *clks) +{ + return 0; +} + static inline int __must_check devm_clk_bulk_get_all(struct device *dev, struct clk_bulk_data **clks) { diff --git a/include/linux/coda.h b/include/linux/coda.h index d30209b9cef8..0ca0c83fdb1c 100644 --- a/include/linux/coda.h +++ b/include/linux/coda.h @@ -58,8 +58,7 @@ Mellon the rights to redistribute these changes without encumbrance. #ifndef _CODA_HEADER_ #define _CODA_HEADER_ -#if defined(__linux__) typedef unsigned long long u_quad_t; -#endif + #include <uapi/linux/coda.h> #endif diff --git a/include/linux/coda_psdev.h b/include/linux/coda_psdev.h deleted file mode 100644 index 15170954aa2b..000000000000 --- a/include/linux/coda_psdev.h +++ /dev/null @@ -1,72 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __CODA_PSDEV_H -#define __CODA_PSDEV_H - -#include <linux/backing-dev.h> -#include <linux/mutex.h> -#include <uapi/linux/coda_psdev.h> - -struct kstatfs; - -/* communication pending/processing queues */ -struct venus_comm { - u_long vc_seq; - wait_queue_head_t vc_waitq; /* Venus wait queue */ - struct list_head vc_pending; - struct list_head vc_processing; - int vc_inuse; - struct super_block *vc_sb; - struct mutex vc_mutex; -}; - - -static inline struct venus_comm *coda_vcp(struct super_block *sb) -{ - return (struct venus_comm *)((sb)->s_fs_info); -} - -/* upcalls */ -int venus_rootfid(struct super_block *sb, struct CodaFid *fidp); -int venus_getattr(struct super_block *sb, struct CodaFid *fid, - struct coda_vattr *attr); -int venus_setattr(struct super_block *, struct CodaFid *, struct coda_vattr *); -int venus_lookup(struct super_block *sb, struct CodaFid *fid, - const char *name, int length, int *type, - struct CodaFid *resfid); -int venus_close(struct super_block *sb, struct CodaFid *fid, int flags, - kuid_t uid); -int venus_open(struct super_block *sb, struct CodaFid *fid, int flags, - struct file **f); -int venus_mkdir(struct super_block *sb, struct CodaFid *dirfid, - const char *name, int length, - struct CodaFid *newfid, struct coda_vattr *attrs); -int venus_create(struct super_block *sb, struct CodaFid *dirfid, - const char *name, int length, int excl, int mode, - struct CodaFid *newfid, struct coda_vattr *attrs) ; -int venus_rmdir(struct super_block *sb, struct CodaFid *dirfid, - const char *name, int length); -int venus_remove(struct super_block *sb, struct CodaFid *dirfid, - const char *name, int length); -int venus_readlink(struct super_block *sb, struct CodaFid *fid, - char *buffer, int *length); -int venus_rename(struct super_block *, struct CodaFid *new_fid, - struct CodaFid *old_fid, size_t old_length, - size_t new_length, const char *old_name, - const char *new_name); -int venus_link(struct super_block *sb, struct CodaFid *fid, - struct CodaFid *dirfid, const char *name, int len ); -int venus_symlink(struct super_block *sb, struct CodaFid *fid, - const char *name, int len, const char *symname, int symlen); -int venus_access(struct super_block *sb, struct CodaFid *fid, int mask); -int venus_pioctl(struct super_block *sb, struct CodaFid *fid, - unsigned int cmd, struct PioctlData *data); -int coda_downcall(struct venus_comm *vcp, int opcode, union outputArgs *out); -int venus_fsync(struct super_block *sb, struct CodaFid *fid); -int venus_statfs(struct dentry *dentry, struct kstatfs *sfs); - -/* - * Statistics - */ - -extern struct venus_comm coda_comms[]; -#endif diff --git a/include/linux/compat.h b/include/linux/compat.h index ebddcb6cfcf8..16dafd9f4b86 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -138,8 +138,7 @@ typedef struct { compat_sigset_word sig[_COMPAT_NSIG_WORDS]; } compat_sigset_t; -int set_compat_user_sigmask(const compat_sigset_t __user *usigmask, - sigset_t *set, sigset_t *oldset, +int set_compat_user_sigmask(const compat_sigset_t __user *umask, size_t sigsetsize); struct compat_sigaction { diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index e8579412ad21..d7ee4c6bad48 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -170,3 +170,5 @@ #else #define __diag_GCC_8(s) #endif + +#define __no_fgcse __attribute__((optimize("-fno-gcse"))) diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 8aaf7cd026b0..f0fd5636fddb 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -116,9 +116,14 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, ".pushsection .discard.unreachable\n\t" \ ".long 999b - .\n\t" \ ".popsection\n\t" + +/* Annotate a C jump table to allow objtool to follow the code flow */ +#define __annotate_jump_table __section(".rodata..c_jump_table") + #else #define annotate_reachable() #define annotate_unreachable() +#define __annotate_jump_table #endif #ifndef ASM_UNREACHABLE diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 19e58b9138a0..599c27b56c29 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -112,6 +112,8 @@ struct ftrace_likely_data { #if defined(CC_USING_HOTPATCH) #define notrace __attribute__((hotpatch(0, 0))) +#elif defined(CC_USING_PATCHABLE_FUNCTION_ENTRY) +#define notrace __attribute__((patchable_function_entry(0, 0))) #else #define notrace __attribute__((__no_instrument_function__)) #endif @@ -187,6 +189,10 @@ struct ftrace_likely_data { #define asm_volatile_goto(x...) asm goto(x) #endif +#ifndef __no_fgcse +# define __no_fgcse +#endif + /* Are two types/vars the same type (ignoring qualifiers)? */ #define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) diff --git a/include/linux/concap.h b/include/linux/concap.h deleted file mode 100644 index 977acb3d1fb2..000000000000 --- a/include/linux/concap.h +++ /dev/null @@ -1,112 +0,0 @@ -/* $Id: concap.h,v 1.3.2.2 2004/01/12 23:08:35 keil Exp $ - * - * Copyright 1997 by Henner Eisen <eis@baty.hanse.de> - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - */ - -#ifndef _LINUX_CONCAP_H -#define _LINUX_CONCAP_H - -#include <linux/skbuff.h> -#include <linux/netdevice.h> - -/* Stuff to support encapsulation protocols genericly. The encapsulation - protocol is processed at the uppermost layer of the network interface. - - Based on a ideas developed in a 'synchronous device' thread in the - linux-x25 mailing list contributed by Alan Cox, Thomasz Motylewski - and Jonathan Naylor. - - For more documetation on this refer to Documentation/isdn/README.concap -*/ - -struct concap_proto_ops; -struct concap_device_ops; - -/* this manages all data needed by the encapsulation protocol - */ -struct concap_proto{ - struct net_device *net_dev; /* net device using our service */ - struct concap_device_ops *dops; /* callbacks provided by device */ - struct concap_proto_ops *pops; /* callbacks provided by us */ - spinlock_t lock; - int flags; - void *proto_data; /* protocol specific private data, to - be accessed via *pops methods only*/ - /* - : - whatever - : - */ -}; - -/* Operations to be supported by the net device. Called by the encapsulation - * protocol entity. No receive method is offered because the encapsulation - * protocol directly calls netif_rx(). - */ -struct concap_device_ops{ - - /* to request data is submitted by device*/ - int (*data_req)(struct concap_proto *, struct sk_buff *); - - /* Control methods must be set to NULL by devices which do not - support connection control.*/ - /* to request a connection is set up */ - int (*connect_req)(struct concap_proto *); - - /* to request a connection is released */ - int (*disconn_req)(struct concap_proto *); -}; - -/* Operations to be supported by the encapsulation protocol. Called by - * device driver. - */ -struct concap_proto_ops{ - - /* create a new encapsulation protocol instance of same type */ - struct concap_proto * (*proto_new) (void); - - /* delete encapsulation protocol instance and free all its resources. - cprot may no loger be referenced after calling this */ - void (*proto_del)(struct concap_proto *cprot); - - /* initialize the protocol's data. To be called at interface startup - or when the device driver resets the interface. All services of the - encapsulation protocol may be used after this*/ - int (*restart)(struct concap_proto *cprot, - struct net_device *ndev, - struct concap_device_ops *dops); - - /* inactivate an encapsulation protocol instance. The encapsulation - protocol may not call any *dops methods after this. */ - int (*close)(struct concap_proto *cprot); - - /* process a frame handed down to us by upper layer */ - int (*encap_and_xmit)(struct concap_proto *cprot, struct sk_buff *skb); - - /* to be called for each data entity received from lower layer*/ - int (*data_ind)(struct concap_proto *cprot, struct sk_buff *skb); - - /* to be called when a connection was set up/down. - Protocols that don't process these primitives might fill in - dummy methods here */ - int (*connect_ind)(struct concap_proto *cprot); - int (*disconn_ind)(struct concap_proto *cprot); - /* - Some network device support functions, like net_header(), rebuild_header(), - and others, that depend solely on the encapsulation protocol, might - be provided here, too. The net device would just fill them in its - corresponding fields when it is opened. - */ -}; - -/* dummy restart/close/connect/reset/disconn methods - */ -extern int concap_nop(struct concap_proto *cprot); - -/* dummy submit method - */ -extern int concap_drop_skb(struct concap_proto *cprot, struct sk_buff *skb); -#endif diff --git a/include/linux/connector.h b/include/linux/connector.h index 1d72ef76f24f..cb732643471b 100644 --- a/include/linux/connector.h +++ b/include/linux/connector.h @@ -50,15 +50,75 @@ struct cn_dev { u32 seq, groups; struct sock *nls; - void (*input) (struct sk_buff *skb); struct cn_queue_dev *cbdev; }; +/** + * cn_add_callback() - Registers new callback with connector core. + * + * @id: unique connector's user identifier. + * It must be registered in connector.h for legal + * in-kernel users. + * @name: connector's callback symbolic name. + * @callback: connector's callback. + * parameters are %cn_msg and the sender's credentials + */ int cn_add_callback(struct cb_id *id, const char *name, void (*callback)(struct cn_msg *, struct netlink_skb_parms *)); -void cn_del_callback(struct cb_id *); +/** + * cn_del_callback() - Unregisters new callback with connector core. + * + * @id: unique connector's user identifier. + */ +void cn_del_callback(struct cb_id *id); + + +/** + * cn_netlink_send_mult - Sends message to the specified groups. + * + * @msg: message header(with attached data). + * @len: Number of @msg to be sent. + * @portid: destination port. + * If non-zero the message will be sent to the given port, + * which should be set to the original sender. + * @group: destination group. + * If @portid and @group is zero, then appropriate group will + * be searched through all registered connector users, and + * message will be delivered to the group which was created + * for user with the same ID as in @msg. + * If @group is not zero, then message will be delivered + * to the specified group. + * @gfp_mask: GFP mask. + * + * It can be safely called from softirq context, but may silently + * fail under strong memory pressure. + * + * If there are no listeners for given group %-ESRCH can be returned. + */ int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 group, gfp_t gfp_mask); + +/** + * cn_netlink_send_mult - Sends message to the specified groups. + * + * @msg: message header(with attached data). + * @portid: destination port. + * If non-zero the message will be sent to the given port, + * which should be set to the original sender. + * @group: destination group. + * If @portid and @group is zero, then appropriate group will + * be searched through all registered connector users, and + * message will be delivered to the group which was created + * for user with the same ID as in @msg. + * If @group is not zero, then message will be delivered + * to the specified group. + * @gfp_mask: GFP mask. + * + * It can be safely called from softirq context, but may silently + * fail under strong memory pressure. + * + * If there are no listeners for given group %-ESRCH can be returned. + */ int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 group, gfp_t gfp_mask); int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name, diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h index ed798e114663..24d4c16e3ae0 100644 --- a/include/linux/console_struct.h +++ b/include/linux/console_struct.h @@ -168,9 +168,6 @@ extern void vc_SAK(struct work_struct *work); #define CUR_DEFAULT CUR_UNDERLINE -static inline bool con_is_visible(const struct vc_data *vc) -{ - return *vc->vc_display_fg == vc; -} +bool con_is_visible(const struct vc_data *vc); #endif /* _LINUX_CONSOLE_STRUCT_H */ diff --git a/include/linux/coresight.h b/include/linux/coresight.h index 62a520df8add..a2b68823717b 100644 --- a/include/linux/coresight.h +++ b/include/linux/coresight.h @@ -91,15 +91,11 @@ union coresight_dev_subtype { /** * struct coresight_platform_data - data harvested from the DT specification - * @cpu: the CPU a source belongs to. Only applicable for ETM/PTMs. - * @name: name of the component as shown under sysfs. * @nr_inport: number of input ports for this component. * @nr_outport: number of output ports for this component. * @conns: Array of nr_outport connections from this component */ struct coresight_platform_data { - int cpu; - const char *name; int nr_inport; int nr_outport; struct coresight_connection *conns; @@ -110,11 +106,12 @@ struct coresight_platform_data { * @type: as defined by @coresight_dev_type. * @subtype: as defined by @coresight_dev_subtype. * @ops: generic operations for this component, as defined - by @coresight_ops. + * by @coresight_ops. * @pdata: platform data collected from DT. * @dev: The device entity associated to this component. * @groups: operations specific to this component. These will end up - in the component's sysfs sub-directory. + * in the component's sysfs sub-directory. + * @name: name for the coresight device, also shown under sysfs. */ struct coresight_desc { enum coresight_dev_type type; @@ -123,28 +120,27 @@ struct coresight_desc { struct coresight_platform_data *pdata; struct device *dev; const struct attribute_group **groups; + const char *name; }; /** * struct coresight_connection - representation of a single connection * @outport: a connection's output port number. - * @chid_name: remote component's name. * @child_port: remote component's port number @output is connected to. + * @chid_fwnode: remote component's fwnode handle. * @child_dev: a @coresight_device representation of the component connected to @outport. */ struct coresight_connection { int outport; - const char *child_name; int child_port; + struct fwnode_handle *child_fwnode; struct coresight_device *child_dev; }; /** * struct coresight_device - representation of a device as used by the framework - * @conns: array of coresight_connections associated to this component. - * @nr_inport: number of input port associated to this component. - * @nr_outport: number of output port associated to this component. + * @pdata: Platform data with device connections associated to this device. * @type: as defined by @coresight_dev_type. * @subtype: as defined by @coresight_dev_subtype. * @ops: generic operations for this component, as defined @@ -159,9 +155,7 @@ struct coresight_connection { * @ea: Device attribute for sink representation under PMU directory. */ struct coresight_device { - struct coresight_connection *conns; - int nr_inport; - int nr_outport; + struct coresight_platform_data *pdata; enum coresight_dev_type type; union coresight_dev_subtype subtype; const struct coresight_ops *ops; @@ -174,6 +168,28 @@ struct coresight_device { struct dev_ext_attribute *ea; }; +/* + * coresight_dev_list - Mapping for devices to "name" index for device + * names. + * + * @nr_idx: Number of entries already allocated. + * @pfx: Prefix pattern for device name. + * @fwnode_list: Array of fwnode_handles associated with each allocated + * index, upto nr_idx entries. + */ +struct coresight_dev_list { + int nr_idx; + const char *pfx; + struct fwnode_handle **fwnode_list; +}; + +#define DEFINE_CORESIGHT_DEVLIST(var, dev_pfx) \ +static struct coresight_dev_list (var) = { \ + .pfx = dev_pfx, \ + .nr_idx = 0, \ + .fwnode_list = NULL, \ +} + #define to_coresight_device(d) container_of(d, struct coresight_device, dev) #define source_ops(csdev) csdev->ops->source_ops @@ -267,7 +283,8 @@ extern int coresight_claim_device_unlocked(void __iomem *base); extern void coresight_disclaim_device(void __iomem *base); extern void coresight_disclaim_device_unlocked(void __iomem *base); - +extern char *coresight_alloc_device_name(struct coresight_dev_list *devs, + struct device *dev); #else static inline struct coresight_device * coresight_register(struct coresight_desc *desc) { return NULL; } @@ -292,16 +309,8 @@ static inline void coresight_disclaim_device_unlocked(void __iomem *base) {} #endif -#ifdef CONFIG_OF -extern int of_coresight_get_cpu(const struct device_node *node); -extern struct coresight_platform_data * -of_get_coresight_platform_data(struct device *dev, - const struct device_node *node); -#else -static inline int of_coresight_get_cpu(const struct device_node *node) -{ return 0; } -static inline struct coresight_platform_data *of_get_coresight_platform_data( - struct device *dev, const struct device_node *node) { return NULL; } -#endif +extern int coresight_get_cpu(struct device *dev); + +struct coresight_platform_data *coresight_get_platform_data(struct device *dev); #endif diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 32a1733014f5..536a049d7ecc 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -47,11 +47,6 @@ struct cpufreq_cpuinfo { unsigned int transition_latency; }; -struct cpufreq_user_policy { - unsigned int min; /* in kHz */ - unsigned int max; /* in kHz */ -}; - struct cpufreq_policy { /* CPUs sharing clock, require sw coordination */ cpumask_var_t cpus; /* Online CPUs only */ @@ -81,7 +76,8 @@ struct cpufreq_policy { struct work_struct update; /* if update_policy() needs to be * called, but you're in IRQ context */ - struct cpufreq_user_policy user_policy; + struct dev_pm_qos_request *min_freq_req; + struct dev_pm_qos_request *max_freq_req; struct cpufreq_frequency_table *freq_table; enum cpufreq_table_sorting freq_table_sorted; @@ -144,6 +140,9 @@ struct cpufreq_policy { /* Pointer to the cooling device if used for thermal mitigation */ struct thermal_cooling_device *cdev; + + struct notifier_block nb_min; + struct notifier_block nb_max; }; struct cpufreq_freqs { @@ -201,6 +200,7 @@ void cpufreq_cpu_release(struct cpufreq_policy *policy); int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); int cpufreq_set_policy(struct cpufreq_policy *policy, struct cpufreq_policy *new_policy); +void refresh_frequency_limits(struct cpufreq_policy *policy); void cpufreq_update_policy(unsigned int cpu); void cpufreq_update_limits(unsigned int cpu); bool have_governor_per_policy(void); @@ -406,6 +406,12 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver_data); const char *cpufreq_get_current_driver(void); void *cpufreq_get_driver_data(void); +static inline int cpufreq_thermal_control_enabled(struct cpufreq_driver *drv) +{ + return IS_ENABLED(CONFIG_CPU_THERMAL) && + (drv->flags & CPUFREQ_IS_COOLING_DEV); +} + static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy, unsigned int min, unsigned int max) { @@ -986,7 +992,7 @@ extern struct freq_attr *cpufreq_generic_attr[]; int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy); unsigned int cpufreq_generic_get(unsigned int cpu); -int cpufreq_generic_init(struct cpufreq_policy *policy, +void cpufreq_generic_init(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table, unsigned int transition_latency); #endif /* _LINUX_CPUFREQ_H */ diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 5c6062206760..068793a619ca 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -116,10 +116,10 @@ enum cpuhp_state { CPUHP_AP_PERF_ARM_ACPI_STARTING, CPUHP_AP_PERF_ARM_STARTING, CPUHP_AP_ARM_L2X0_STARTING, + CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING, CPUHP_AP_ARM_ARCH_TIMER_STARTING, CPUHP_AP_ARM_GLOBAL_TIMER_STARTING, CPUHP_AP_JCORE_TIMER_STARTING, - CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING, CPUHP_AP_ARM_TWD_STARTING, CPUHP_AP_QCOM_TIMER_STARTING, CPUHP_AP_TEGRA_TIMER_STARTING, @@ -176,6 +176,7 @@ enum cpuhp_state { CPUHP_AP_WATCHDOG_ONLINE, CPUHP_AP_WORKQUEUE_ONLINE, CPUHP_AP_RCUTREE_ONLINE, + CPUHP_AP_BASE_CACHEINFO_ONLINE, CPUHP_AP_ONLINE_DYN, CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, CPUHP_AP_X86_HPET_ONLINE, diff --git a/include/linux/cred.h b/include/linux/cred.h index 7eb43a038330..f7a30e0099be 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -145,7 +145,11 @@ struct cred { struct user_struct *user; /* real user ID subscription */ struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */ struct group_info *group_info; /* supplementary groups for euid/fsgid */ - struct rcu_head rcu; /* RCU deletion hook */ + /* RCU deletion */ + union { + int non_rcu; /* Can we skip RCU deletion? */ + struct rcu_head rcu; /* RCU deletion hook */ + }; } __randomize_layout; extern void __put_cred(struct cred *); @@ -246,6 +250,7 @@ static inline const struct cred *get_cred(const struct cred *cred) if (!cred) return cred; validate_creds(cred); + nonconst_cred->non_rcu = 0; return get_new_cred(nonconst_cred); } @@ -257,6 +262,7 @@ static inline const struct cred *get_cred_rcu(const struct cred *cred) if (!atomic_inc_not_zero(&nonconst_cred->usage)) return NULL; validate_creds(cred); + nonconst_cred->non_rcu = 0; return cred; } diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 9cf8f3ce0e50..19ea3a371d7b 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -49,7 +49,6 @@ #define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b #define CRYPTO_ALG_TYPE_RNG 0x0000000c #define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d -#define CRYPTO_ALG_TYPE_DIGEST 0x0000000e #define CRYPTO_ALG_TYPE_HASH 0x0000000e #define CRYPTO_ALG_TYPE_SHASH 0x0000000e #define CRYPTO_ALG_TYPE_AHASH 0x0000000f @@ -323,6 +322,17 @@ struct cipher_alg { void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); }; +/** + * struct compress_alg - compression/decompression algorithm + * @coa_compress: Compress a buffer of specified length, storing the resulting + * data in the specified buffer. Return the length of the + * compressed data in dlen. + * @coa_decompress: Decompress the source buffer, storing the uncompressed + * data in the specified buffer. The length of the data is + * returned in dlen. + * + * All fields are mandatory. + */ struct compress_alg { int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen); diff --git a/include/linux/dax.h b/include/linux/dax.h index becaea5f4488..9bd8528bd305 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -7,6 +7,9 @@ #include <linux/radix-tree.h> #include <asm/pgtable.h> +/* Flag for synchronous flush */ +#define DAXDEV_F_SYNC (1UL << 0) + typedef unsigned long dax_entry_t; struct iomap_ops; @@ -38,18 +41,40 @@ extern struct attribute_group dax_attribute_group; #if IS_ENABLED(CONFIG_DAX) struct dax_device *dax_get_by_host(const char *host); struct dax_device *alloc_dax(void *private, const char *host, - const struct dax_operations *ops); + const struct dax_operations *ops, unsigned long flags); void put_dax(struct dax_device *dax_dev); void kill_dax(struct dax_device *dax_dev); void dax_write_cache(struct dax_device *dax_dev, bool wc); bool dax_write_cache_enabled(struct dax_device *dax_dev); +bool __dax_synchronous(struct dax_device *dax_dev); +static inline bool dax_synchronous(struct dax_device *dax_dev) +{ + return __dax_synchronous(dax_dev); +} +void __set_dax_synchronous(struct dax_device *dax_dev); +static inline void set_dax_synchronous(struct dax_device *dax_dev) +{ + __set_dax_synchronous(dax_dev); +} +/* + * Check if given mapping is supported by the file / underlying device. + */ +static inline bool daxdev_mapping_supported(struct vm_area_struct *vma, + struct dax_device *dax_dev) +{ + if (!(vma->vm_flags & VM_SYNC)) + return true; + if (!IS_DAX(file_inode(vma->vm_file))) + return false; + return dax_synchronous(dax_dev); +} #else static inline struct dax_device *dax_get_by_host(const char *host) { return NULL; } static inline struct dax_device *alloc_dax(void *private, const char *host, - const struct dax_operations *ops) + const struct dax_operations *ops, unsigned long flags) { /* * Callers should check IS_ENABLED(CONFIG_DAX) to know if this @@ -70,6 +95,18 @@ static inline bool dax_write_cache_enabled(struct dax_device *dax_dev) { return false; } +static inline bool dax_synchronous(struct dax_device *dax_dev) +{ + return true; +} +static inline void set_dax_synchronous(struct dax_device *dax_dev) +{ +} +static inline bool daxdev_mapping_supported(struct vm_area_struct *vma, + struct dax_device *dax_dev) +{ + return !(vma->vm_flags & VM_SYNC); +} #endif struct writeback_control; diff --git a/include/linux/dcache.h b/include/linux/dcache.h index f14e587c5d5d..9451011ac014 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -153,7 +153,7 @@ struct dentry_operations { * Locking rules for dentry_operations callbacks are to be found in * Documentation/filesystems/Locking. Keep it updated! * - * FUrther descriptions are found in Documentation/filesystems/vfs.txt. + * FUrther descriptions are found in Documentation/filesystems/vfs.rst. * Keep it updated too! */ @@ -291,7 +291,6 @@ static inline unsigned d_count(const struct dentry *dentry) */ extern __printf(4, 5) char *dynamic_dname(struct dentry *, char *, int, const char *, ...); -extern char *simple_dname(struct dentry *, char *, int); extern char *__d_path(const struct path *, const struct path *, char *, int); extern char *d_absolute_path(const struct path *, char *, int); @@ -568,7 +567,7 @@ static inline struct dentry *d_backing_dentry(struct dentry *upper) * If dentry is on a union/overlay, then return the underlying, real dentry. * Otherwise return the dentry itself. * - * See also: Documentation/filesystems/vfs.txt + * See also: Documentation/filesystems/vfs.rst */ static inline struct dentry *d_real(struct dentry *dentry, const struct inode *inode) diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index 3b0ba54cc4d5..58424eb3b329 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -133,9 +133,8 @@ struct dentry *debugfs_create_regset32(const char *name, umode_t mode, void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs, int nregs, void __iomem *base, char *prefix); -struct dentry *debugfs_create_u32_array(const char *name, umode_t mode, - struct dentry *parent, - u32 *array, u32 elements); +void debugfs_create_u32_array(const char *name, umode_t mode, + struct dentry *parent, u32 *array, u32 elements); struct dentry *debugfs_create_devm_seqfile(struct device *dev, const char *name, struct dentry *parent, @@ -353,11 +352,10 @@ static inline bool debugfs_initialized(void) return false; } -static inline struct dentry *debugfs_create_u32_array(const char *name, umode_t mode, - struct dentry *parent, - u32 *array, u32 elements) +static inline void debugfs_create_u32_array(const char *name, umode_t mode, + struct dentry *parent, u32 *array, + u32 elements) { - return ERR_PTR(-ENODEV); } static inline struct dentry *debugfs_create_devm_seqfile(struct device *dev, diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index e1f51d607cc5..399ad8632356 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -95,8 +95,7 @@ typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device ** typedef int (*dm_report_zones_fn) (struct dm_target *ti, sector_t sector, struct blk_zone *zones, - unsigned int *nr_zones, - gfp_t gfp_mask); + unsigned int *nr_zones); /* * These iteration functions are typically used to check (and combine) @@ -530,29 +529,20 @@ void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size); *---------------------------------------------------------------*/ #define DM_NAME "device-mapper" -#define DM_RATELIMIT(pr_func, fmt, ...) \ -do { \ - static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, \ - DEFAULT_RATELIMIT_BURST); \ - \ - if (__ratelimit(&rs)) \ - pr_func(DM_FMT(fmt), ##__VA_ARGS__); \ -} while (0) - #define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n" #define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__) #define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__) -#define DMERR_LIMIT(fmt, ...) DM_RATELIMIT(pr_err, fmt, ##__VA_ARGS__) +#define DMERR_LIMIT(fmt, ...) pr_err_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) #define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__) -#define DMWARN_LIMIT(fmt, ...) DM_RATELIMIT(pr_warn, fmt, ##__VA_ARGS__) +#define DMWARN_LIMIT(fmt, ...) pr_warn_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) #define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__) -#define DMINFO_LIMIT(fmt, ...) DM_RATELIMIT(pr_info, fmt, ##__VA_ARGS__) +#define DMINFO_LIMIT(fmt, ...) pr_info_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) #ifdef CONFIG_DM_DEBUG #define DMDEBUG(fmt, ...) printk(KERN_DEBUG DM_FMT(fmt), ##__VA_ARGS__) -#define DMDEBUG_LIMIT(fmt, ...) DM_RATELIMIT(pr_debug, fmt, ##__VA_ARGS__) +#define DMDEBUG_LIMIT(fmt, ...) pr_debug_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) #else #define DMDEBUG(fmt, ...) no_printk(fmt, ##__VA_ARGS__) #define DMDEBUG_LIMIT(fmt, ...) no_printk(fmt, ##__VA_ARGS__) diff --git a/include/linux/device.h b/include/linux/device.h index 4a295e324ac5..47ccb2029bc3 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -6,7 +6,7 @@ * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de> * Copyright (c) 2008-2009 Novell Inc. * - * See Documentation/driver-model/ for more information. + * See Documentation/driver-api/driver-model/ for more information. */ #ifndef _DEVICE_H_ @@ -42,6 +42,7 @@ struct iommu_ops; struct iommu_group; struct iommu_fwspec; struct dev_pin_info; +struct iommu_param; struct bus_attribute { struct attribute attr; @@ -163,11 +164,13 @@ void subsys_dev_iter_init(struct subsys_dev_iter *iter, struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter); void subsys_dev_iter_exit(struct subsys_dev_iter *iter); +int device_match_of_node(struct device *dev, const void *np); + int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data, int (*fn)(struct device *dev, void *data)); struct device *bus_find_device(struct bus_type *bus, struct device *start, - void *data, - int (*match)(struct device *dev, void *data)); + const void *data, + int (*match)(struct device *dev, const void *data)); struct device *bus_find_device_by_name(struct bus_type *bus, struct device *start, const char *name); @@ -259,6 +262,8 @@ enum probe_type { * @resume: Called to bring a device from sleep mode. * @groups: Default attributes that get created by the driver core * automatically. + * @dev_groups: Additional attributes attached to device instance once the + * it is bound to the driver. * @pm: Power management operations of the device which matched * this driver. * @coredump: Called when sysfs entry is written to. The device driver @@ -293,6 +298,7 @@ struct device_driver { int (*suspend) (struct device *dev, pm_message_t state); int (*resume) (struct device *dev); const struct attribute_group **groups; + const struct attribute_group **dev_groups; const struct dev_pm_ops *pm; void (*coredump) (struct device *dev); @@ -336,11 +342,12 @@ extern int __must_check driver_for_each_device(struct device_driver *drv, int (*fn)(struct device *dev, void *)); struct device *driver_find_device(struct device_driver *drv, - struct device *start, void *data, - int (*match)(struct device *dev, void *data)); + struct device *start, const void *data, + int (*match)(struct device *dev, const void *data)); void driver_deferred_probe_add(struct device *dev); int driver_deferred_probe_check_state(struct device *dev); +int driver_deferred_probe_check_state_continue(struct device *dev); /** * struct subsys_interface - interfaces to device functions @@ -911,6 +918,8 @@ struct dev_links_info { * This identifies the device type and carries type-specific * information. * @mutex: Mutex to synchronize calls to its driver. + * @lockdep_mutex: An optional debug lock that a subsystem can use as a + * peer lock to gain localized lockdep coverage of the device_lock. * @bus: Type of bus device is on. * @driver: Which driver has allocated this * @platform_data: Platform data specific to the device. @@ -961,6 +970,7 @@ struct dev_links_info { * device (i.e. the bus driver that discovered the device). * @iommu_group: IOMMU group the device belongs to. * @iommu_fwspec: IOMMU-specific properties supplied by firmware. + * @iommu_param: Per device generic IOMMU runtime data * * @offline_disabled: If set, the device is permanently online. * @offline: Set after successful invocation of bus type's .offline(). @@ -993,6 +1003,9 @@ struct device { core doesn't touch it */ void *driver_data; /* Driver data, set and get with dev_set_drvdata/dev_get_drvdata */ +#ifdef CONFIG_PROVE_LOCKING + struct mutex lockdep_mutex; +#endif struct mutex mutex; /* mutex to synchronize calls to * its driver. */ @@ -1054,6 +1067,7 @@ struct device { void (*release)(struct device *dev); struct iommu_group *iommu_group; struct iommu_fwspec *iommu_fwspec; + struct iommu_param *iommu_param; bool offline_disabled:1; bool offline:1; @@ -1252,6 +1266,8 @@ extern int device_for_each_child_reverse(struct device *dev, void *data, int (*fn)(struct device *dev, void *data)); extern struct device *device_find_child(struct device *dev, void *data, int (*match)(struct device *dev, void *data)); +extern struct device *device_find_child_by_name(struct device *parent, + const char *name); extern int device_rename(struct device *dev, const char *new_name); extern int device_move(struct device *dev, struct device *new_parent, enum dpm_order dpm_order); @@ -1375,6 +1391,7 @@ extern int (*platform_notify_remove)(struct device *dev); */ extern struct device *get_device(struct device *dev); extern void put_device(struct device *dev); +extern bool kill_device(struct device *dev); #ifdef CONFIG_DEVTMPFS extern int devtmpfs_create_node(struct device *dev); diff --git a/include/linux/dim.h b/include/linux/dim.h new file mode 100644 index 000000000000..9fa4b3f88c39 --- /dev/null +++ b/include/linux/dim.h @@ -0,0 +1,333 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2019 Mellanox Technologies. */ + +#ifndef DIM_H +#define DIM_H + +#include <linux/module.h> + +/** + * Number of events between DIM iterations. + * Causes a moderation of the algorithm run. + */ +#define DIM_NEVENTS 64 + +/** + * Is a difference between values justifies taking an action. + * We consider 10% difference as significant. + */ +#define IS_SIGNIFICANT_DIFF(val, ref) \ + (((100UL * abs((val) - (ref))) / (ref)) > 10) + +/** + * Calculate the gap between two values. + * Take wrap-around and variable size into consideration. + */ +#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) \ + & (BIT_ULL(bits) - 1)) + +/** + * Structure for CQ moderation values. + * Used for communications between DIM and its consumer. + * + * @usec: CQ timer suggestion (by DIM) + * @pkts: CQ packet counter suggestion (by DIM) + * @cq_period_mode: CQ priod count mode (from CQE/EQE) + */ +struct dim_cq_moder { + u16 usec; + u16 pkts; + u16 comps; + u8 cq_period_mode; +}; + +/** + * Structure for DIM sample data. + * Used for communications between DIM and its consumer. + * + * @time: Sample timestamp + * @pkt_ctr: Number of packets + * @byte_ctr: Number of bytes + * @event_ctr: Number of events + */ +struct dim_sample { + ktime_t time; + u32 pkt_ctr; + u32 byte_ctr; + u16 event_ctr; + u32 comp_ctr; +}; + +/** + * Structure for DIM stats. + * Used for holding current measured rates. + * + * @ppms: Packets per msec + * @bpms: Bytes per msec + * @epms: Events per msec + */ +struct dim_stats { + int ppms; /* packets per msec */ + int bpms; /* bytes per msec */ + int epms; /* events per msec */ + int cpms; /* completions per msec */ + int cpe_ratio; /* ratio of completions to events */ +}; + +/** + * Main structure for dynamic interrupt moderation (DIM). + * Used for holding all information about a specific DIM instance. + * + * @state: Algorithm state (see below) + * @prev_stats: Measured rates from previous iteration (for comparison) + * @start_sample: Sampled data at start of current iteration + * @work: Work to perform on action required + * @priv: A pointer to the struct that points to dim + * @profile_ix: Current moderation profile + * @mode: CQ period count mode + * @tune_state: Algorithm tuning state (see below) + * @steps_right: Number of steps taken towards higher moderation + * @steps_left: Number of steps taken towards lower moderation + * @tired: Parking depth counter + */ +struct dim { + u8 state; + struct dim_stats prev_stats; + struct dim_sample start_sample; + struct dim_sample measuring_sample; + struct work_struct work; + void *priv; + u8 profile_ix; + u8 mode; + u8 tune_state; + u8 steps_right; + u8 steps_left; + u8 tired; +}; + +/** + * enum dim_cq_period_mode + * + * These are the modes for CQ period count. + * + * @DIM_CQ_PERIOD_MODE_START_FROM_EQE: Start counting from EQE + * @DIM_CQ_PERIOD_MODE_START_FROM_CQE: Start counting from CQE (implies timer reset) + * @DIM_CQ_PERIOD_NUM_MODES: Number of modes + */ +enum { + DIM_CQ_PERIOD_MODE_START_FROM_EQE = 0x0, + DIM_CQ_PERIOD_MODE_START_FROM_CQE = 0x1, + DIM_CQ_PERIOD_NUM_MODES +}; + +/** + * enum dim_state + * + * These are the DIM algorithm states. + * These will determine if the algorithm is in a valid state to start an iteration. + * + * @DIM_START_MEASURE: This is the first iteration (also after applying a new profile) + * @DIM_MEASURE_IN_PROGRESS: Algorithm is already in progress - check if + * need to perform an action + * @DIM_APPLY_NEW_PROFILE: DIM consumer is currently applying a profile - no need to measure + */ +enum { + DIM_START_MEASURE, + DIM_MEASURE_IN_PROGRESS, + DIM_APPLY_NEW_PROFILE, +}; + +/** + * enum dim_tune_state + * + * These are the DIM algorithm tune states. + * These will determine which action the algorithm should perform. + * + * @DIM_PARKING_ON_TOP: Algorithm found a local top point - exit on significant difference + * @DIM_PARKING_TIRED: Algorithm found a deep top point - don't exit if tired > 0 + * @DIM_GOING_RIGHT: Algorithm is currently trying higher moderation levels + * @DIM_GOING_LEFT: Algorithm is currently trying lower moderation levels + */ +enum { + DIM_PARKING_ON_TOP, + DIM_PARKING_TIRED, + DIM_GOING_RIGHT, + DIM_GOING_LEFT, +}; + +/** + * enum dim_stats_state + * + * These are the DIM algorithm statistics states. + * These will determine the verdict of current iteration. + * + * @DIM_STATS_WORSE: Current iteration shows worse performance than before + * @DIM_STATS_WORSE: Current iteration shows same performance than before + * @DIM_STATS_WORSE: Current iteration shows better performance than before + */ +enum { + DIM_STATS_WORSE, + DIM_STATS_SAME, + DIM_STATS_BETTER, +}; + +/** + * enum dim_step_result + * + * These are the DIM algorithm step results. + * These describe the result of a step. + * + * @DIM_STEPPED: Performed a regular step + * @DIM_TOO_TIRED: Same kind of step was done multiple times - should go to + * tired parking + * @DIM_ON_EDGE: Stepped to the most left/right profile + */ +enum { + DIM_STEPPED, + DIM_TOO_TIRED, + DIM_ON_EDGE, +}; + +/** + * dim_on_top - check if current state is a good place to stop (top location) + * @dim: DIM context + * + * Check if current profile is a good place to park at. + * This will result in reducing the DIM checks frequency as we assume we + * shouldn't probably change profiles, unless traffic pattern wasn't changed. + */ +bool dim_on_top(struct dim *dim); + +/** + * dim_turn - change profile alterning direction + * @dim: DIM context + * + * Go left if we were going right and vice-versa. + * Do nothing if currently parking. + */ +void dim_turn(struct dim *dim); + +/** + * dim_park_on_top - enter a parking state on a top location + * @dim: DIM context + * + * Enter parking state. + * Clear all movement history. + */ +void dim_park_on_top(struct dim *dim); + +/** + * dim_park_tired - enter a tired parking state + * @dim: DIM context + * + * Enter parking state. + * Clear all movement history and cause DIM checks frequency to reduce. + */ +void dim_park_tired(struct dim *dim); + +/** + * dim_calc_stats - calculate the difference between two samples + * @start: start sample + * @end: end sample + * @curr_stats: delta between samples + * + * Calculate the delta between two samples (in data rates). + * Takes into consideration counter wrap-around. + */ +void dim_calc_stats(struct dim_sample *start, struct dim_sample *end, + struct dim_stats *curr_stats); + +/** + * dim_update_sample - set a sample's fields with give values + * @event_ctr: number of events to set + * @packets: number of packets to set + * @bytes: number of bytes to set + * @s: DIM sample + */ +static inline void +dim_update_sample(u16 event_ctr, u64 packets, u64 bytes, struct dim_sample *s) +{ + s->time = ktime_get(); + s->pkt_ctr = packets; + s->byte_ctr = bytes; + s->event_ctr = event_ctr; +} + +/** + * dim_update_sample_with_comps - set a sample's fields with given + * values including the completion parameter + * @event_ctr: number of events to set + * @packets: number of packets to set + * @bytes: number of bytes to set + * @comps: number of completions to set + * @s: DIM sample + */ +static inline void +dim_update_sample_with_comps(u16 event_ctr, u64 packets, u64 bytes, u64 comps, + struct dim_sample *s) +{ + dim_update_sample(event_ctr, packets, bytes, s); + s->comp_ctr = comps; +} + +/* Net DIM */ + +/** + * net_dim_get_rx_moderation - provide a CQ moderation object for the given RX profile + * @cq_period_mode: CQ period mode + * @ix: Profile index + */ +struct dim_cq_moder net_dim_get_rx_moderation(u8 cq_period_mode, int ix); + +/** + * net_dim_get_def_rx_moderation - provide the default RX moderation + * @cq_period_mode: CQ period mode + */ +struct dim_cq_moder net_dim_get_def_rx_moderation(u8 cq_period_mode); + +/** + * net_dim_get_tx_moderation - provide a CQ moderation object for the given TX profile + * @cq_period_mode: CQ period mode + * @ix: Profile index + */ +struct dim_cq_moder net_dim_get_tx_moderation(u8 cq_period_mode, int ix); + +/** + * net_dim_get_def_tx_moderation - provide the default TX moderation + * @cq_period_mode: CQ period mode + */ +struct dim_cq_moder net_dim_get_def_tx_moderation(u8 cq_period_mode); + +/** + * net_dim - main DIM algorithm entry point + * @dim: DIM instance information + * @end_sample: Current data measurement + * + * Called by the consumer. + * This is the main logic of the algorithm, where data is processed in order to decide on next + * required action. + */ +void net_dim(struct dim *dim, struct dim_sample end_sample); + +/* RDMA DIM */ + +/* + * RDMA DIM profile: + * profile size must be of RDMA_DIM_PARAMS_NUM_PROFILES. + */ +#define RDMA_DIM_PARAMS_NUM_PROFILES 9 +#define RDMA_DIM_START_PROFILE 0 + +/** + * rdma_dim - Runs the adaptive moderation. + * @dim: The moderation struct. + * @completions: The number of completions collected in this round. + * + * Each call to rdma_dim takes the latest amount of completions that + * have been collected and counts them as a new event. + * Once enough events have been collected the algorithm decides a new + * moderation level. + */ +void rdma_dim(struct dim *dim, u64 completions); + +#endif /* DIM_H */ diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 9b84114f74ce..bae060fae862 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -28,19 +28,21 @@ struct dma_buf_attachment; /** * struct dma_buf_ops - operations possible on struct dma_buf - * @map_atomic: [optional] maps a page from the buffer into kernel address - * space, users may not block until the subsequent unmap call. - * This callback must not sleep. - * @unmap_atomic: [optional] unmaps a atomically mapped page from the buffer. - * This Callback must not sleep. - * @map: [optional] maps a page from the buffer into kernel address space. - * @unmap: [optional] unmaps a page from the buffer. * @vmap: [optional] creates a virtual mapping for the buffer into kernel * address space. Same restrictions as for vmap and friends apply. * @vunmap: [optional] unmaps a vmap from the buffer */ struct dma_buf_ops { /** + * @cache_sgt_mapping: + * + * If true the framework will cache the first mapping made for each + * attachment. This avoids creating mappings for attachments multiple + * times. + */ + bool cache_sgt_mapping; + + /** * @attach: * * This is called from dma_buf_attach() to make sure that a given @@ -194,8 +196,6 @@ struct dma_buf_ops { * to be restarted. */ int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); - void *(*map)(struct dma_buf *, unsigned long); - void (*unmap)(struct dma_buf *, unsigned long, void *); /** * @mmap: @@ -234,6 +234,31 @@ struct dma_buf_ops { */ int (*mmap)(struct dma_buf *, struct vm_area_struct *vma); + /** + * @map: + * + * Maps a page from the buffer into kernel address space. The page is + * specified by offset into the buffer in PAGE_SIZE units. + * + * This callback is optional. + * + * Returns: + * + * Virtual address pointer where requested page can be accessed. NULL + * on error or when this function is unimplemented by the exporter. + */ + void *(*map)(struct dma_buf *, unsigned long); + + /** + * @unmap: + * + * Unmaps a page from the buffer. Page offset and address pointer should + * be the same as the one passed to and returned by matching call to map. + * + * This callback is optional. + */ + void (*unmap)(struct dma_buf *, unsigned long, void *); + void *(*vmap)(struct dma_buf *); void (*vunmap)(struct dma_buf *, void *vaddr); }; @@ -244,10 +269,12 @@ struct dma_buf_ops { * @file: file pointer used for sharing buffers across, and for refcounting. * @attachments: list of dma_buf_attachment that denotes all devices attached. * @ops: dma_buf_ops associated with this buffer object. - * @lock: used internally to serialize list manipulation, attach/detach and vmap/unmap + * @lock: used internally to serialize list manipulation, attach/detach and + * vmap/unmap, and accesses to name * @vmapping_counter: used internally to refcnt the vmaps * @vmap_ptr: the current vmap ptr if vmapping_counter > 0 * @exp_name: name of the exporter; useful for debugging. + * @name: userspace-provided name; useful for accounting and debugging. * @owner: pointer to exporter module; used for refcounting when exporter is a * kernel module. * @list_node: node for dma_buf accounting and debugging. @@ -275,6 +302,7 @@ struct dma_buf { unsigned vmapping_counter; void *vmap_ptr; const char *exp_name; + const char *name; struct module *owner; struct list_head list_node; void *priv; @@ -296,6 +324,8 @@ struct dma_buf { * @dmabuf: buffer for this attachment. * @dev: device attached to the buffer. * @node: list of dma_buf_attachment. + * @sgt: cached mapping. + * @dir: direction of cached mapping. * @priv: exporter specific attachment data. * * This structure holds the attachment information between the dma_buf buffer @@ -311,6 +341,8 @@ struct dma_buf_attachment { struct dma_buf *dmabuf; struct device *dev; struct list_head node; + struct sg_table *sgt; + enum dma_data_direction dir; void *priv; }; diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h index 6665fa03c0d1..c05d4e661489 100644 --- a/include/linux/dma-contiguous.h +++ b/include/linux/dma-contiguous.h @@ -50,6 +50,7 @@ #ifdef __KERNEL__ #include <linux/device.h> +#include <linux/mm.h> struct cma; struct page; @@ -111,6 +112,8 @@ struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, unsigned int order, bool no_warn); bool dma_release_from_contiguous(struct device *dev, struct page *pages, int count); +struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp); +void dma_free_contiguous(struct device *dev, struct page *page, size_t size); #else @@ -153,6 +156,22 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages, return false; } +/* Use fallback alloc() and free() when CONFIG_DMA_CMA=n */ +static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size, + gfp_t gfp) +{ + int node = dev ? dev_to_node(dev) : NUMA_NO_NODE; + size_t align = get_order(PAGE_ALIGN(size)); + + return alloc_pages_node(node, gfp, align); +} + +static inline void dma_free_contiguous(struct device *dev, struct page *page, + size_t size) +{ + __free_pages(page, get_order(size)); +} + #endif #endif diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index b7338702592a..adf993a3bd58 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h @@ -32,6 +32,15 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) } #endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */ +#ifdef CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED +bool force_dma_unencrypted(struct device *dev); +#else +static inline bool force_dma_unencrypted(struct device *dev) +{ + return false; +} +#endif /* CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED */ + /* * If memory encryption is supported, phys_to_dma will set the memory encryption * bit in the DMA address, and dma_to_phys will clear it. The raw __phys_to_dma diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h index 37258c8b2063..2112f21f73d8 100644 --- a/include/linux/dma-iommu.h +++ b/include/linux/dma-iommu.h @@ -5,59 +5,21 @@ #ifndef __DMA_IOMMU_H #define __DMA_IOMMU_H -#ifdef __KERNEL__ +#include <linux/errno.h> #include <linux/types.h> -#include <asm/errno.h> #ifdef CONFIG_IOMMU_DMA #include <linux/dma-mapping.h> #include <linux/iommu.h> #include <linux/msi.h> -int iommu_dma_init(void); - /* Domain management interface for IOMMU drivers */ int iommu_get_dma_cookie(struct iommu_domain *domain); int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base); void iommu_put_dma_cookie(struct iommu_domain *domain); /* Setup call for arch DMA mapping code */ -int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, - u64 size, struct device *dev); - -/* General helpers for DMA-API <-> IOMMU-API interaction */ -int dma_info_to_prot(enum dma_data_direction dir, bool coherent, - unsigned long attrs); - -/* - * These implement the bulk of the relevant DMA mapping callbacks, but require - * the arch code to take care of attributes and cache maintenance - */ -struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, - unsigned long attrs, int prot, dma_addr_t *handle, - void (*flush_page)(struct device *, const void *, phys_addr_t)); -void iommu_dma_free(struct device *dev, struct page **pages, size_t size, - dma_addr_t *handle); - -int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma); - -dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, int prot); -int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, - int nents, int prot); - -/* - * Arch code with no special attribute handling may use these - * directly as DMA mapping callbacks for simplicity - */ -void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, - enum dma_data_direction dir, unsigned long attrs); -void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir, unsigned long attrs); -dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, - size_t size, enum dma_data_direction dir, unsigned long attrs); -void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, - size_t size, enum dma_data_direction dir, unsigned long attrs); +void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size); /* The DMA API isn't _quite_ the whole story, though... */ /* @@ -75,16 +37,16 @@ void iommu_dma_compose_msi_msg(struct msi_desc *desc, void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list); -#else +#else /* CONFIG_IOMMU_DMA */ struct iommu_domain; struct msi_desc; struct msi_msg; struct device; -static inline int iommu_dma_init(void) +static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base, + u64 size) { - return 0; } static inline int iommu_get_dma_cookie(struct iommu_domain *domain) @@ -117,5 +79,4 @@ static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_he } #endif /* CONFIG_IOMMU_DMA */ -#endif /* __KERNEL__ */ #endif /* __DMA_IOMMU_H */ diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 6309a721394b..f7d1eea32c78 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -679,6 +679,20 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) return dma_set_mask_and_coherent(dev, mask); } +/** + * dma_addressing_limited - return if the device is addressing limited + * @dev: device to check + * + * Return %true if the devices DMA mask is too small to address all memory in + * the system, else %false. Lack of addressing bits is the prime reason for + * bounce buffering, but might not be the only one. + */ +static inline bool dma_addressing_limited(struct device *dev) +{ + return min_not_zero(dma_get_mask(dev), dev->bus_dma_mask) < + dma_get_required_mask(dev); +} + #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, const struct iommu_ops *iommu, bool coherent); @@ -729,13 +743,6 @@ static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask) return -EIO; } -#ifndef dma_max_pfn -static inline unsigned long dma_max_pfn(struct device *dev) -{ - return (*dev->dma_mask >> PAGE_SHIFT) + dev->dma_pfn_offset; -} -#endif - static inline int dma_get_cache_alignment(void) { #ifdef ARCH_DMA_MINALIGN diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h index 9741767e400f..3813211a9aad 100644 --- a/include/linux/dma-noncoherent.h +++ b/include/linux/dma-noncoherent.h @@ -20,6 +20,22 @@ static inline bool dev_is_dma_coherent(struct device *dev) } #endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */ +/* + * Check if an allocation needs to be marked uncached to be coherent. + */ +static __always_inline bool dma_alloc_need_uncached(struct device *dev, + unsigned long attrs) +{ + if (dev_is_dma_coherent(dev)) + return false; + if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) + return false; + if (IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC) && + (attrs & DMA_ATTR_NON_CONSISTENT)) + return false; + return true; +} + void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, @@ -80,4 +96,7 @@ static inline void arch_dma_prep_coherent(struct page *page, size_t size) } #endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */ +void *uncached_kernel_address(void *addr); +void *cached_kernel_address(void *addr); + #endif /* _LINUX_DMA_NONCOHERENT_H */ diff --git a/include/linux/dma/edma.h b/include/linux/dma/edma.h new file mode 100644 index 000000000000..cab6e18773da --- /dev/null +++ b/include/linux/dma/edma.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare eDMA core driver + * + * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> + */ + +#ifndef _DW_EDMA_H +#define _DW_EDMA_H + +#include <linux/device.h> +#include <linux/dmaengine.h> + +struct dw_edma; + +/** + * struct dw_edma_chip - representation of DesignWare eDMA controller hardware + * @dev: struct device of the eDMA controller + * @id: instance ID + * @irq: irq line + * @dw: struct dw_edma that is filed by dw_edma_probe() + */ +struct dw_edma_chip { + struct device *dev; + int id; + int irq; + struct dw_edma *dw; +}; + +/* Export to the platform drivers */ +#if IS_ENABLED(CONFIG_DW_EDMA) +int dw_edma_probe(struct dw_edma_chip *chip); +int dw_edma_remove(struct dw_edma_chip *chip); +#else +static inline int dw_edma_probe(struct dw_edma_chip *chip) +{ + return -ENODEV; +} + +static inline int dw_edma_remove(struct dw_edma_chip *chip) +{ + return 0; +} +#endif /* CONFIG_DW_EDMA */ + +#endif /* _DW_EDMA_H */ diff --git a/include/linux/dma/mxs-dma.h b/include/linux/dma/mxs-dma.h new file mode 100644 index 000000000000..069d9f5a609e --- /dev/null +++ b/include/linux/dma/mxs-dma.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _MXS_DMA_H_ +#define _MXS_DMA_H_ + +#include <linux/dmaengine.h> + +#define MXS_DMA_CTRL_WAIT4END BIT(31) +#define MXS_DMA_CTRL_WAIT4RDY BIT(30) + +/* + * The mxs dmaengine can do PIO transfers. We pass a pointer to the PIO words + * in the second argument to dmaengine_prep_slave_sg when the direction is + * set to DMA_TRANS_NONE. To make this clear and to prevent users from doing + * the error prone casting we have this wrapper function + */ +static inline struct dma_async_tx_descriptor *mxs_dmaengine_prep_pio( + struct dma_chan *chan, u32 *pio, unsigned int npio, + enum dma_transfer_direction dir, unsigned long flags) +{ + return dmaengine_prep_slave_sg(chan, (struct scatterlist *)pio, npio, + dir, flags); +} + +#endif /* _MXS_DMA_H_ */ diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index c952f987ee57..8fcdee1c0cf9 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -1302,7 +1302,8 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); void dma_issue_pending_all(void); struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, - dma_filter_fn fn, void *fn_param); + dma_filter_fn fn, void *fn_param, + struct device_node *np); struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name); struct dma_chan *dma_request_chan(struct device *dev, const char *name); @@ -1327,7 +1328,9 @@ static inline void dma_issue_pending_all(void) { } static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, - dma_filter_fn fn, void *fn_param) + dma_filter_fn fn, + void *fn_param, + struct device_node *np) { return NULL; } @@ -1399,7 +1402,8 @@ void dma_async_device_unregister(struct dma_device *device); void dma_run_dependencies(struct dma_async_tx_descriptor *tx); struct dma_chan *dma_get_slave_channel(struct dma_chan *chan); struct dma_chan *dma_get_any_slave_channel(struct dma_device *device); -#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) +#define dma_request_channel(mask, x, y) \ + __dma_request_channel(&(mask), x, y, NULL) #define dma_request_slave_channel_compat(mask, x, y, dev, name) \ __dma_request_slave_channel_compat(&(mask), x, y, dev, name) @@ -1417,6 +1421,6 @@ static inline struct dma_chan if (!fn || !fn_param) return NULL; - return __dma_request_channel(mask, fn, fn_param); + return __dma_request_channel(mask, fn, fn_param, NULL); } #endif /* DMAENGINE_H */ diff --git a/include/linux/dmar.h b/include/linux/dmar.h index 28813c6f44b6..a7cf3599d9a1 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -92,12 +92,14 @@ static inline bool dmar_rcu_check(void) #define dmar_rcu_dereference(p) rcu_dereference_check((p), dmar_rcu_check()) -#define for_each_dev_scope(a, c, p, d) \ - for ((p) = 0; ((d) = (p) < (c) ? dmar_rcu_dereference((a)[(p)].dev) : \ - NULL, (p) < (c)); (p)++) - -#define for_each_active_dev_scope(a, c, p, d) \ - for_each_dev_scope((a), (c), (p), (d)) if (!(d)) { continue; } else +#define for_each_dev_scope(devs, cnt, i, tmp) \ + for ((i) = 0; ((tmp) = (i) < (cnt) ? \ + dmar_rcu_dereference((devs)[(i)].dev) : NULL, (i) < (cnt)); \ + (i)++) + +#define for_each_active_dev_scope(devs, cnt, i, tmp) \ + for_each_dev_scope((devs), (cnt), (i), (tmp)) \ + if (!(tmp)) { continue; } else extern int dmar_table_init(void); extern int dmar_dev_scope_init(void); diff --git a/include/linux/dns_resolver.h b/include/linux/dns_resolver.h index f2b3ae22e6b7..976cbbdb2832 100644 --- a/include/linux/dns_resolver.h +++ b/include/linux/dns_resolver.h @@ -26,7 +26,8 @@ #include <uapi/linux/dns_resolver.h> -extern int dns_query(const char *type, const char *name, size_t namelen, +struct net; +extern int dns_query(struct net *net, const char *type, const char *name, size_t namelen, const char *options, char **_result, time64_t *_expiry, bool invalidate); diff --git a/include/linux/dsa/8021q.h b/include/linux/dsa/8021q.h index 3911e0586478..0aa803c451a3 100644 --- a/include/linux/dsa/8021q.h +++ b/include/linux/dsa/8021q.h @@ -20,9 +20,6 @@ int dsa_port_setup_8021q_tagging(struct dsa_switch *ds, int index, struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev, u16 tpid, u16 tci); -struct sk_buff *dsa_8021q_rcv(struct sk_buff *skb, struct net_device *netdev, - struct packet_type *pt, u16 *tpid, u16 *tci); - u16 dsa_8021q_tx_vid(struct dsa_switch *ds, int port); u16 dsa_8021q_rx_vid(struct dsa_switch *ds, int port); @@ -31,6 +28,8 @@ int dsa_8021q_rx_switch_id(u16 vid); int dsa_8021q_rx_source_port(u16 vid); +struct sk_buff *dsa_8021q_remove_header(struct sk_buff *skb); + #else int dsa_port_setup_8021q_tagging(struct dsa_switch *ds, int index, @@ -45,12 +44,6 @@ struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev, return NULL; } -struct sk_buff *dsa_8021q_rcv(struct sk_buff *skb, struct net_device *netdev, - struct packet_type *pt, u16 *tpid, u16 *tci) -{ - return NULL; -} - u16 dsa_8021q_tx_vid(struct dsa_switch *ds, int port) { return 0; @@ -71,6 +64,11 @@ int dsa_8021q_rx_source_port(u16 vid) return 0; } +struct sk_buff *dsa_8021q_remove_header(struct sk_buff *skb) +{ + return NULL; +} + #endif /* IS_ENABLED(CONFIG_NET_DSA_TAG_8021Q) */ #endif /* _NET_DSA_8021Q_H */ diff --git a/include/linux/dsa/sja1105.h b/include/linux/dsa/sja1105.h index e46e18c47d41..79435cfc20eb 100644 --- a/include/linux/dsa/sja1105.h +++ b/include/linux/dsa/sja1105.h @@ -12,6 +12,7 @@ #include <net/dsa.h> #define ETH_P_SJA1105 ETH_P_DSA_8021Q +#define ETH_P_SJA1105_META 0x0008 /* IEEE 802.3 Annex 57A: Slow Protocols PDUs (01:80:C2:xx:xx:xx) */ #define SJA1105_LINKLOCAL_FILTER_A 0x0180C2000000ull @@ -20,8 +21,41 @@ #define SJA1105_LINKLOCAL_FILTER_B 0x011B19000000ull #define SJA1105_LINKLOCAL_FILTER_B_MASK 0xFFFFFF000000ull +/* Source and Destination MAC of follow-up meta frames. + * Whereas the choice of SMAC only affects the unique identification of the + * switch as sender of meta frames, the DMAC must be an address that is present + * in the DSA master port's multicast MAC filter. + * 01-80-C2-00-00-0E is a good choice for this, as all profiles of IEEE 1588 + * over L2 use this address for some purpose already. + */ +#define SJA1105_META_SMAC 0x222222222222ull +#define SJA1105_META_DMAC 0x0180C200000Eull + +/* Global tagger data: each struct sja1105_port has a reference to + * the structure defined in struct sja1105_private. + */ +struct sja1105_tagger_data { + struct sk_buff_head skb_rxtstamp_queue; + struct work_struct rxtstamp_work; + struct sk_buff *stampable_skb; + /* Protects concurrent access to the meta state machine + * from taggers running on multiple ports on SMP systems + */ + spinlock_t meta_lock; + bool hwts_rx_en; +}; + +struct sja1105_skb_cb { + u32 meta_tstamp; +}; + +#define SJA1105_SKB_CB(skb) \ + ((struct sja1105_skb_cb *)DSA_SKB_CB_PRIV(skb)) + struct sja1105_port { + struct sja1105_tagger_data *data; struct dsa_port *dp; + bool hwts_tx_en; int mgmt_slot; }; diff --git a/include/linux/efi.h b/include/linux/efi.h index 6ebc2098cfe1..f87fabea4a85 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -689,6 +689,7 @@ void efi_native_runtime_setup(void); #define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f) #define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b) #define LINUX_EFI_TPM_EVENT_LOG_GUID EFI_GUID(0xb7799cb0, 0xeca2, 0x4943, 0x96, 0x67, 0x1f, 0xae, 0x07, 0xb7, 0x47, 0xfa) +#define LINUX_EFI_TPM_FINAL_LOG_GUID EFI_GUID(0x1e2ed096, 0x30e2, 0x4254, 0xbd, 0x89, 0x86, 0x3b, 0xbe, 0xf8, 0x23, 0x25) #define LINUX_EFI_MEMRESERVE_TABLE_GUID EFI_GUID(0x888eb0c6, 0x8ede, 0x4ff5, 0xa8, 0xf0, 0x9a, 0xee, 0x5c, 0xb9, 0x77, 0xc2) typedef struct { @@ -996,6 +997,7 @@ extern struct efi { unsigned long mem_attr_table; /* memory attributes table */ unsigned long rng_seed; /* UEFI firmware random seed */ unsigned long tpm_log; /* TPM2 Event Log table */ + unsigned long tpm_final_log; /* TPM2 Final Events Log table */ unsigned long mem_reserve; /* Linux EFI memreserve table */ efi_get_time_t *get_time; efi_set_time_t *set_time; @@ -1706,12 +1708,20 @@ struct linux_efi_random_seed { struct linux_efi_tpm_eventlog { u32 size; + u32 final_events_preboot_size; u8 version; u8 log[]; }; extern int efi_tpm_eventlog_init(void); +struct efi_tcg2_final_events_table { + u64 version; + u64 nr_events; + u8 events[]; +}; +extern int efi_tpm_final_log_size; + /* * efi_runtime_service() function identifiers. * "NONE" is used by efi_recover_from_page_fault() to check if the page diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 6e8bc53740f0..1dd014c9c87b 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -34,7 +34,7 @@ struct elevator_mq_ops { void (*depth_updated)(struct blk_mq_hw_ctx *); bool (*allow_merge)(struct request_queue *, struct request *, struct bio *); - bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *); + bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *, unsigned int); int (*request_merge)(struct request_queue *q, struct request **, struct bio *); void (*request_merged)(struct request_queue *, struct request *, enum elv_merge); void (*requests_merged)(struct request_queue *, struct request *, struct request *); @@ -45,7 +45,6 @@ struct elevator_mq_ops { struct request *(*dispatch_request)(struct blk_mq_hw_ctx *); bool (*has_work)(struct blk_mq_hw_ctx *); void (*completed_request)(struct request *, u64); - void (*started_request)(struct request *); void (*requeue_request)(struct request *); struct request *(*former_request)(struct request_queue *, struct request *); struct request *(*next_request)(struct request_queue *, struct request *); @@ -75,7 +74,7 @@ struct elevator_type size_t icq_size; /* see iocontext.h */ size_t icq_align; /* ditto */ struct elv_fs_entry *elevator_attrs; - char elevator_name[ELV_NAME_MAX]; + const char *elevator_name; const char *elevator_alias; struct module *elevator_owner; #ifdef CONFIG_BLK_DEBUG_FS @@ -160,15 +159,6 @@ extern struct request *elv_rb_find(struct rb_root *, sector_t); #define ELEVATOR_INSERT_FLUSH 5 #define ELEVATOR_INSERT_SORT_MERGE 6 -/* - * return values from elevator_may_queue_fn - */ -enum { - ELV_MQUEUE_MAY, - ELV_MQUEUE_NO, - ELV_MQUEUE_MUST, -}; - #define rq_end_sector(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) #define rb_entry_rq(node) rb_entry((node), struct request, rb_node) diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h index aa027f7bcb3e..73f8c3cb9588 100644 --- a/include/linux/energy_model.h +++ b/include/linux/energy_model.h @@ -89,7 +89,7 @@ static inline unsigned long em_pd_energy(struct em_perf_domain *pd, * like schedutil. */ cpu = cpumask_first(to_cpumask(pd->cpus)); - scale_cpu = arch_scale_cpu_capacity(NULL, cpu); + scale_cpu = arch_scale_cpu_capacity(cpu); cs = &pd->table[pd->nr_cap_states - 1]; freq = map_util_freq(max_util, cs->frequency, scale_cpu); diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h index 7e6c77740413..e525f6957c49 100644 --- a/include/linux/fault-inject.h +++ b/include/linux/fault-inject.h @@ -11,7 +11,7 @@ /* * For explanation of the elements of this struct, see - * Documentation/fault-injection/fault-injection.txt + * Documentation/fault-injection/fault-injection.rst */ struct fault_attr { unsigned long probability; diff --git a/include/linux/fb.h b/include/linux/fb.h index f52ef0ad6781..303771264644 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -126,39 +126,15 @@ struct fb_cursor_user { /* The resolution of the passed in fb_info about to change */ #define FB_EVENT_MODE_CHANGE 0x01 -/* The display on this fb_info is being suspended, no access to the - * framebuffer is allowed any more after that call returns - */ -#define FB_EVENT_SUSPEND 0x02 -/* The display on this fb_info was resumed, you can restore the display - * if you own it - */ -#define FB_EVENT_RESUME 0x03 -/* An entry from the modelist was removed */ -#define FB_EVENT_MODE_DELETE 0x04 -/* A driver registered itself */ + +#ifdef CONFIG_GUMSTIX_AM200EPD +/* only used by mach-pxa/am200epd.c */ #define FB_EVENT_FB_REGISTERED 0x05 -/* A driver unregistered itself */ #define FB_EVENT_FB_UNREGISTERED 0x06 -/* CONSOLE-SPECIFIC: get console to framebuffer mapping */ -#define FB_EVENT_GET_CONSOLE_MAP 0x07 -/* CONSOLE-SPECIFIC: set console to framebuffer mapping */ -#define FB_EVENT_SET_CONSOLE_MAP 0x08 -/* A hardware display blank change occurred */ +#endif + +/* A display blank is requested */ #define FB_EVENT_BLANK 0x09 -/* Private modelist is to be replaced */ -#define FB_EVENT_NEW_MODELIST 0x0A -/* The resolution of the passed in fb_info about to change and - all vc's should be changed */ -#define FB_EVENT_MODE_CHANGE_ALL 0x0B -/* A software display blank change occurred */ -#define FB_EVENT_CONBLANK 0x0C -/* Get drawing requirements */ -#define FB_EVENT_GET_REQ 0x0D -/* Unbind from the console if possible */ -#define FB_EVENT_FB_UNBIND 0x0E -/* CONSOLE-SPECIFIC: remap all consoles to new fb - for vga_switcheroo */ -#define FB_EVENT_REMAP_ALL_CONSOLE 0x0F /* A hardware display blank early change occurred */ #define FB_EARLY_EVENT_BLANK 0x10 /* A hardware display blank revert early change occurred */ @@ -633,8 +609,8 @@ extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf, /* drivers/video/fbmem.c */ extern int register_framebuffer(struct fb_info *fb_info); -extern int unregister_framebuffer(struct fb_info *fb_info); -extern int unlink_framebuffer(struct fb_info *fb_info); +extern void unregister_framebuffer(struct fb_info *fb_info); +extern void unlink_framebuffer(struct fb_info *fb_info); extern int remove_conflicting_pci_framebuffers(struct pci_dev *pdev, int res_id, const char *name); extern int remove_conflicting_framebuffers(struct apertures_struct *a, @@ -660,7 +636,10 @@ extern struct class *fb_class; for (i = 0; i < FB_MAX; i++) \ if (!registered_fb[i]) {} else -extern int lock_fb_info(struct fb_info *info); +static inline void lock_fb_info(struct fb_info *info) +{ + mutex_lock(&info->lock); +} static inline void unlock_fb_info(struct fb_info *info) { diff --git a/include/linux/fbcon.h b/include/linux/fbcon.h index f68a7db14165..ff5596dd30f8 100644 --- a/include/linux/fbcon.h +++ b/include/linux/fbcon.h @@ -4,9 +4,39 @@ #ifdef CONFIG_FRAMEBUFFER_CONSOLE void __init fb_console_init(void); void __exit fb_console_exit(void); +int fbcon_fb_registered(struct fb_info *info); +void fbcon_fb_unregistered(struct fb_info *info); +void fbcon_fb_unbind(struct fb_info *info); +void fbcon_suspended(struct fb_info *info); +void fbcon_resumed(struct fb_info *info); +int fbcon_mode_deleted(struct fb_info *info, + struct fb_videomode *mode); +void fbcon_new_modelist(struct fb_info *info); +void fbcon_get_requirement(struct fb_info *info, + struct fb_blit_caps *caps); +void fbcon_fb_blanked(struct fb_info *info, int blank); +void fbcon_update_vcs(struct fb_info *info, bool all); +void fbcon_remap_all(struct fb_info *info); +int fbcon_set_con2fb_map_ioctl(void __user *argp); +int fbcon_get_con2fb_map_ioctl(void __user *argp); #else static inline void fb_console_init(void) {} static inline void fb_console_exit(void) {} +static inline int fbcon_fb_registered(struct fb_info *info) { return 0; } +static inline void fbcon_fb_unregistered(struct fb_info *info) {} +static inline void fbcon_fb_unbind(struct fb_info *info) {} +static inline void fbcon_suspended(struct fb_info *info) {} +static inline void fbcon_resumed(struct fb_info *info) {} +static inline int fbcon_mode_deleted(struct fb_info *info, + struct fb_videomode *mode) { return 0; } +static inline void fbcon_new_modelist(struct fb_info *info) {} +static inline void fbcon_get_requirement(struct fb_info *info, + struct fb_blit_caps *caps) {} +static inline void fbcon_fb_blanked(struct fb_info *info, int blank) {} +static inline void fbcon_update_vcs(struct fb_info *info, bool all) {} +static inline void fbcon_remap_all(struct fb_info *info) {} +static inline int fbcon_set_con2fb_map_ioctl(void __user *argp) { return 0; } +static inline int fbcon_get_con2fb_map_ioctl(void __user *argp) { return 0; } #endif #endif /* _LINUX_FBCON_H */ diff --git a/include/linux/filter.h b/include/linux/filter.h index 7148bab96943..92c6e31fb008 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -24,6 +24,7 @@ #include <net/sch_generic.h> +#include <asm/byteorder.h> #include <uapi/linux/filter.h> #include <uapi/linux/bpf.h> @@ -160,6 +161,20 @@ struct ctl_table_header; .off = 0, \ .imm = IMM }) +/* Special form of mov32, used for doing explicit zero extension on dst. */ +#define BPF_ZEXT_REG(DST) \ + ((struct bpf_insn) { \ + .code = BPF_ALU | BPF_MOV | BPF_X, \ + .dst_reg = DST, \ + .src_reg = DST, \ + .off = 0, \ + .imm = 1 }) + +static inline bool insn_is_zext(const struct bpf_insn *insn) +{ + return insn->code == (BPF_ALU | BPF_MOV | BPF_X) && insn->imm == 1; +} + /* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */ #define BPF_LD_IMM64(DST, IMM) \ BPF_LD_IMM64_RAW(DST, 0, IMM) @@ -512,7 +527,8 @@ struct bpf_prog { blinded:1, /* Was blinded */ is_func:1, /* program is a bpf function */ kprobe_override:1, /* Do we override a kprobe? */ - has_callchain_buf:1; /* callchain buffer allocated? */ + has_callchain_buf:1, /* callchain buffer allocated? */ + enforce_expected_attach_type:1; /* Enforce expected_attach_type checking at attach time */ enum bpf_prog_type type; /* Type of BPF program */ enum bpf_attach_type expected_attach_type; /* For some prog types */ u32 len; /* Number of filter blocks */ @@ -563,8 +579,9 @@ struct bpf_skb_data_end { }; struct bpf_redirect_info { - u32 ifindex; u32 flags; + u32 tgt_index; + void *tgt_value; struct bpf_map *map; struct bpf_map *map_to_flush; u32 kern_flags; @@ -731,6 +748,24 @@ bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default) return size <= size_default && (size & (size - 1)) == 0; } +static inline u8 +bpf_ctx_narrow_load_shift(u32 off, u32 size, u32 size_default) +{ + u8 load_off = off & (size_default - 1); + +#ifdef __LITTLE_ENDIAN + return load_off * 8; +#else + return (size_default - (load_off + size)) * 8; +#endif +} + +#define bpf_ctx_wide_access_ok(off, size, type, field) \ + (size == sizeof(__u64) && \ + off >= offsetof(type, field) && \ + off + sizeof(__u64) <= offsetofend(type, field) && \ + off % sizeof(__u64) == 0) + #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0])) static inline void bpf_prog_lock_ro(struct bpf_prog *fp) @@ -811,6 +846,7 @@ u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); void bpf_jit_compile(struct bpf_prog *prog); +bool bpf_jit_needs_zext(void); bool bpf_helper_changes_pkt_data(void *func); static inline bool bpf_dump_raw_ok(void) @@ -1183,4 +1219,14 @@ struct bpf_sysctl_kern { u64 tmp_reg; }; +struct bpf_sockopt_kern { + struct sock *sk; + u8 *optval; + u8 *optval_end; + s32 level; + s32 optname; + s32 optlen; + s32 retval; +}; + #endif /* __LINUX_FILTER_H__ */ diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h index 1262ea6a1f4b..778abbbc7d94 100644 --- a/include/linux/firmware/xlnx-zynqmp.h +++ b/include/linux/firmware/xlnx-zynqmp.h @@ -46,7 +46,6 @@ #define ZYNQMP_PM_CAPABILITY_ACCESS 0x1U #define ZYNQMP_PM_CAPABILITY_CONTEXT 0x2U #define ZYNQMP_PM_CAPABILITY_WAKEUP 0x4U -#define ZYNQMP_PM_CAPABILITY_POWER 0x8U /* * Firmware FPGA Manager flags diff --git a/include/linux/flat.h b/include/linux/flat.h index 569b67d64d5c..83977c0ce3de 100644 --- a/include/linux/flat.h +++ b/include/linux/flat.h @@ -10,8 +10,41 @@ #ifndef _LINUX_FLAT_H #define _LINUX_FLAT_H -#include <uapi/linux/flat.h> -#include <asm/flat.h> +#define FLAT_VERSION 0x00000004L + +/* + * To make everything easier to port and manage cross platform + * development, all fields are in network byte order. + */ + +struct flat_hdr { + char magic[4]; + __be32 rev; /* version (as above) */ + __be32 entry; /* Offset of first executable instruction + with text segment from beginning of file */ + __be32 data_start; /* Offset of data segment from beginning of + file */ + __be32 data_end; /* Offset of end of data segment from beginning + of file */ + __be32 bss_end; /* Offset of end of bss segment from beginning + of file */ + + /* (It is assumed that data_end through bss_end forms the bss segment.) */ + + __be32 stack_size; /* Size of stack, in bytes */ + __be32 reloc_start; /* Offset of relocation records from beginning of + file */ + __be32 reloc_count; /* Number of relocation records */ + __be32 flags; + __be32 build_date; /* When the program/library was built */ + __u32 filler[5]; /* Reservered, set to zero */ +}; + +#define FLAT_FLAG_RAM 0x0001 /* load program entirely into RAM */ +#define FLAT_FLAG_GOTPIC 0x0002 /* program is PIC with GOT */ +#define FLAT_FLAG_GZIP 0x0004 /* all but the header is compressed */ +#define FLAT_FLAG_GZDATA 0x0008 /* only data/relocs are compressed (for XIP) */ +#define FLAT_FLAG_KTRACE 0x0010 /* output useful kernel trace for debugging */ /* * While it would be nice to keep this header clean, users of older @@ -22,28 +55,21 @@ * with the format above, except to fix bugs with old format support. */ -#include <asm/byteorder.h> - #define OLD_FLAT_VERSION 0x00000002L #define OLD_FLAT_RELOC_TYPE_TEXT 0 #define OLD_FLAT_RELOC_TYPE_DATA 1 #define OLD_FLAT_RELOC_TYPE_BSS 2 typedef union { - unsigned long value; + u32 value; struct { -# if defined(mc68000) && !defined(CONFIG_COLDFIRE) - signed long offset : 30; - unsigned long type : 2; -# define OLD_FLAT_FLAG_RAM 0x1 /* load program entirely into RAM */ +#if defined(__LITTLE_ENDIAN_BITFIELD) || \ + (defined(mc68000) && !defined(CONFIG_COLDFIRE)) + s32 offset : 30; + u32 type : 2; # elif defined(__BIG_ENDIAN_BITFIELD) - unsigned long type : 2; - signed long offset : 30; -# define OLD_FLAT_FLAG_RAM 0x1 /* load program entirely into RAM */ -# elif defined(__LITTLE_ENDIAN_BITFIELD) - signed long offset : 30; - unsigned long type : 2; -# define OLD_FLAT_FLAG_RAM 0x1 /* load program entirely into RAM */ + u32 type : 2; + s32 offset : 30; # else # error "Unknown bitfield order for flat files." # endif diff --git a/include/linux/fmc-sdb.h b/include/linux/fmc-sdb.h deleted file mode 100644 index bec899f0867c..000000000000 --- a/include/linux/fmc-sdb.h +++ /dev/null @@ -1,39 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * This file is separate from sdb.h, because I want that one to remain - * unchanged (as far as possible) from the official sdb distribution - * - * This file and associated functionality are a playground for me to - * understand stuff which will later be implemented in more generic places. - */ -#include <linux/sdb.h> - -/* This is the union of all currently defined types */ -union sdb_record { - struct sdb_interconnect ic; - struct sdb_device dev; - struct sdb_bridge bridge; - struct sdb_integration integr; - struct sdb_empty empty; - struct sdb_synthesis synthesis; - struct sdb_repo_url repo_url; -}; - -struct fmc_device; - -/* Every sdb table is turned into this structure */ -struct sdb_array { - int len; - int level; - unsigned long baseaddr; - struct fmc_device *fmc; /* the device that hosts it */ - struct sdb_array *parent; /* NULL at root */ - union sdb_record *record; /* copies of the struct */ - struct sdb_array **subtree; /* only valid for bridge items */ -}; - -extern int fmc_scan_sdb_tree(struct fmc_device *fmc, unsigned long address); -extern void fmc_show_sdb_tree(const struct fmc_device *fmc); -extern signed long fmc_find_sdb_device(struct sdb_array *tree, uint64_t vendor, - uint32_t device, unsigned long *sz); -extern int fmc_free_sdb_tree(struct fmc_device *fmc); diff --git a/include/linux/fmc.h b/include/linux/fmc.h deleted file mode 100644 index b355f3806f3f..000000000000 --- a/include/linux/fmc.h +++ /dev/null @@ -1,269 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Copyright (C) 2012 CERN (www.cern.ch) - * Author: Alessandro Rubini <rubini@gnudd.com> - * - * This work is part of the White Rabbit project, a research effort led - * by CERN, the European Institute for Nuclear Research. - */ -#ifndef __LINUX_FMC_H__ -#define __LINUX_FMC_H__ -#include <linux/types.h> -#include <linux/moduleparam.h> -#include <linux/device.h> -#include <linux/list.h> -#include <linux/interrupt.h> -#include <linux/io.h> - -struct fmc_device; -struct fmc_driver; - -/* - * This bus abstraction is developed separately from drivers, so we need - * to check the version of the data structures we receive. - */ - -#define FMC_MAJOR 3 -#define FMC_MINOR 0 -#define FMC_VERSION ((FMC_MAJOR << 16) | FMC_MINOR) -#define __FMC_MAJOR(x) ((x) >> 16) -#define __FMC_MINOR(x) ((x) & 0xffff) - -/* - * The device identification, as defined by the IPMI FRU (Field Replaceable - * Unit) includes four different strings to describe the device. Here we - * only match the "Board Manufacturer" and the "Board Product Name", - * ignoring the "Board Serial Number" and "Board Part Number". All 4 are - * expected to be strings, so they are treated as zero-terminated C strings. - * Unspecified string (NULL) means "any", so if both are unspecified this - * is a catch-all driver. So null entries are allowed and we use array - * and length. This is unlike pci and usb that use null-terminated arrays - */ -struct fmc_fru_id { - char *manufacturer; - char *product_name; -}; - -/* - * If the FPGA is already programmed (think Etherbone or the second - * SVEC slot), we can match on SDB devices in the memory image. This - * match uses an array of devices that must all be present, and the - * match is based on vendor and device only. Further checks are expected - * to happen in the probe function. Zero means "any" and catch-all is allowed. - */ -struct fmc_sdb_one_id { - uint64_t vendor; - uint32_t device; -}; -struct fmc_sdb_id { - struct fmc_sdb_one_id *cores; - int cores_nr; -}; - -struct fmc_device_id { - struct fmc_fru_id *fru_id; - int fru_id_nr; - struct fmc_sdb_id *sdb_id; - int sdb_id_nr; -}; - -/* This sizes the module_param_array used by generic module parameters */ -#define FMC_MAX_CARDS 32 - -/* The driver is a pretty simple thing */ -struct fmc_driver { - unsigned long version; - struct device_driver driver; - int (*probe)(struct fmc_device *); - int (*remove)(struct fmc_device *); - const struct fmc_device_id id_table; - /* What follows is for generic module parameters */ - int busid_n; - int busid_val[FMC_MAX_CARDS]; - int gw_n; - char *gw_val[FMC_MAX_CARDS]; -}; -#define to_fmc_driver(x) container_of((x), struct fmc_driver, driver) - -/* These are the generic parameters, that drivers may instantiate */ -#define FMC_PARAM_BUSID(_d) \ - module_param_array_named(busid, _d.busid_val, int, &_d.busid_n, 0444) -#define FMC_PARAM_GATEWARE(_d) \ - module_param_array_named(gateware, _d.gw_val, charp, &_d.gw_n, 0444) - -/* - * Drivers may need to configure gpio pins in the carrier. To read input - * (a very uncommon operation, and definitely not in the hot paths), just - * configure one gpio only and get 0 or 1 as retval of the config method - */ -struct fmc_gpio { - char *carrier_name; /* name or NULL for virtual pins */ - int gpio; - int _gpio; /* internal use by the carrier */ - int mode; /* GPIOF_DIR_OUT etc, from <linux/gpio.h> */ - int irqmode; /* IRQF_TRIGGER_LOW and so on */ -}; - -/* The numbering of gpio pins allows access to raw pins or virtual roles */ -#define FMC_GPIO_RAW(x) (x) /* 4096 of them */ -#define __FMC_GPIO_IS_RAW(x) ((x) < 0x1000) -#define FMC_GPIO_IRQ(x) ((x) + 0x1000) /* 256 of them */ -#define FMC_GPIO_LED(x) ((x) + 0x1100) /* 256 of them */ -#define FMC_GPIO_KEY(x) ((x) + 0x1200) /* 256 of them */ -#define FMC_GPIO_TP(x) ((x) + 0x1300) /* 256 of them */ -#define FMC_GPIO_USER(x) ((x) + 0x1400) /* 256 of them */ -/* We may add SCL and SDA, or other roles if the need arises */ - -/* GPIOF_DIR_IN etc are missing before 3.0. copy from <linux/gpio.h> */ -#ifndef GPIOF_DIR_IN -# define GPIOF_DIR_OUT (0 << 0) -# define GPIOF_DIR_IN (1 << 0) -# define GPIOF_INIT_LOW (0 << 1) -# define GPIOF_INIT_HIGH (1 << 1) -#endif - -/* - * The operations are offered by each carrier and should make driver - * design completely independent of the carrier. Named GPIO pins may be - * the exception. - */ -struct fmc_operations { - uint32_t (*read32)(struct fmc_device *fmc, int offset); - void (*write32)(struct fmc_device *fmc, uint32_t value, int offset); - int (*validate)(struct fmc_device *fmc, struct fmc_driver *drv); - int (*reprogram_raw)(struct fmc_device *f, struct fmc_driver *d, - void *gw, unsigned long len); - int (*reprogram)(struct fmc_device *f, struct fmc_driver *d, char *gw); - int (*irq_request)(struct fmc_device *fmc, irq_handler_t h, - char *name, int flags); - void (*irq_ack)(struct fmc_device *fmc); - int (*irq_free)(struct fmc_device *fmc); - int (*gpio_config)(struct fmc_device *fmc, struct fmc_gpio *gpio, - int ngpio); - int (*read_ee)(struct fmc_device *fmc, int pos, void *d, int l); - int (*write_ee)(struct fmc_device *fmc, int pos, const void *d, int l); -}; - -/* Prefer this helper rather than calling of fmc->reprogram directly */ -int fmc_reprogram_raw(struct fmc_device *fmc, struct fmc_driver *d, - void *gw, unsigned long len, int sdb_entry); -extern int fmc_reprogram(struct fmc_device *f, struct fmc_driver *d, char *gw, - int sdb_entry); - -/* - * The device reports all information needed to access hw. - * - * If we have eeprom_len and not contents, the core reads it. - * Then, parsing of identifiers is done by the core which fills fmc_fru_id.. - * Similarly a device that must be matched based on SDB cores must - * fill the entry point and the core will scan the bus (FIXME: sdb match) - */ -struct fmc_device { - unsigned long version; - unsigned long flags; - struct module *owner; /* char device must pin it */ - struct fmc_fru_id id; /* for EEPROM-based match */ - struct fmc_operations *op; /* carrier-provided */ - int irq; /* according to host bus. 0 == none */ - int eeprom_len; /* Usually 8kB, may be less */ - int eeprom_addr; /* 0x50, 0x52 etc */ - uint8_t *eeprom; /* Full contents or leading part */ - char *carrier_name; /* "SPEC" or similar, for special use */ - void *carrier_data; /* "struct spec *" or equivalent */ - __iomem void *fpga_base; /* May be NULL (Etherbone) */ - __iomem void *slot_base; /* Set by the driver */ - struct fmc_device **devarray; /* Allocated by the bus */ - int slot_id; /* Index in the slot array */ - int nr_slots; /* Number of slots in this carrier */ - unsigned long memlen; /* Used for the char device */ - struct device dev; /* For Linux use */ - struct device *hwdev; /* The underlying hardware device */ - unsigned long sdbfs_entry; - struct sdb_array *sdb; - uint32_t device_id; /* Filled by the device */ - char *mezzanine_name; /* Defaults to ``fmc'' */ - void *mezzanine_data; - - struct dentry *dbg_dir; - struct dentry *dbg_sdb_dump; -}; -#define to_fmc_device(x) container_of((x), struct fmc_device, dev) - -#define FMC_DEVICE_HAS_GOLDEN 1 -#define FMC_DEVICE_HAS_CUSTOM 2 -#define FMC_DEVICE_NO_MEZZANINE 4 -#define FMC_DEVICE_MATCH_SDB 8 /* fmc-core must scan sdb in fpga */ - -/* - * If fpga_base can be used, the carrier offers no readl/writel methods, and - * this expands to a single, fast, I/O access. - */ -static inline uint32_t fmc_readl(struct fmc_device *fmc, int offset) -{ - if (unlikely(fmc->op->read32)) - return fmc->op->read32(fmc, offset); - return readl(fmc->fpga_base + offset); -} -static inline void fmc_writel(struct fmc_device *fmc, uint32_t val, int off) -{ - if (unlikely(fmc->op->write32)) - fmc->op->write32(fmc, val, off); - else - writel(val, fmc->fpga_base + off); -} - -/* pci-like naming */ -static inline void *fmc_get_drvdata(const struct fmc_device *fmc) -{ - return dev_get_drvdata(&fmc->dev); -} - -static inline void fmc_set_drvdata(struct fmc_device *fmc, void *data) -{ - dev_set_drvdata(&fmc->dev, data); -} - -struct fmc_gateware { - void *bitstream; - unsigned long len; -}; - -/* The 5 access points */ -extern int fmc_driver_register(struct fmc_driver *drv); -extern void fmc_driver_unregister(struct fmc_driver *drv); -extern int fmc_device_register(struct fmc_device *tdev); -extern int fmc_device_register_gw(struct fmc_device *tdev, - struct fmc_gateware *gw); -extern void fmc_device_unregister(struct fmc_device *tdev); - -/* Three more for device sets, all driven by the same FPGA */ -extern int fmc_device_register_n(struct fmc_device **devs, int n); -extern int fmc_device_register_n_gw(struct fmc_device **devs, int n, - struct fmc_gateware *gw); -extern void fmc_device_unregister_n(struct fmc_device **devs, int n); - -/* Internal cross-calls between files; not exported to other modules */ -extern int fmc_match(struct device *dev, struct device_driver *drv); -extern int fmc_fill_id_info(struct fmc_device *fmc); -extern void fmc_free_id_info(struct fmc_device *fmc); -extern void fmc_dump_eeprom(const struct fmc_device *fmc); - -/* helpers for FMC operations */ -extern int fmc_irq_request(struct fmc_device *fmc, irq_handler_t h, - char *name, int flags); -extern void fmc_irq_free(struct fmc_device *fmc); -extern void fmc_irq_ack(struct fmc_device *fmc); -extern int fmc_validate(struct fmc_device *fmc, struct fmc_driver *drv); -extern int fmc_gpio_config(struct fmc_device *fmc, struct fmc_gpio *gpio, - int ngpio); -extern int fmc_read_ee(struct fmc_device *fmc, int pos, void *d, int l); -extern int fmc_write_ee(struct fmc_device *fmc, int pos, const void *d, int l); - -/* helpers for FMC operations */ -extern int fmc_irq_request(struct fmc_device *fmc, irq_handler_t h, - char *name, int flags); -extern void fmc_irq_free(struct fmc_device *fmc); -extern void fmc_irq_ack(struct fmc_device *fmc); -extern int fmc_validate(struct fmc_device *fmc, struct fmc_driver *drv); - -#endif /* __LINUX_FMC_H__ */ diff --git a/include/linux/fpga/adi-axi-common.h b/include/linux/fpga/adi-axi-common.h new file mode 100644 index 000000000000..7fc95d5c95bb --- /dev/null +++ b/include/linux/fpga/adi-axi-common.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Analog Devices AXI common registers & definitions + * + * Copyright 2019 Analog Devices Inc. + * + * https://wiki.analog.com/resources/fpga/docs/axi_ip + * https://wiki.analog.com/resources/fpga/docs/hdl/regmap + */ + +#ifndef ADI_AXI_COMMON_H_ +#define ADI_AXI_COMMON_H_ + +#define ADI_AXI_REG_VERSION 0x0000 + +#define ADI_AXI_PCORE_VER(major, minor, patch) \ + (((major) << 16) | ((minor) << 8) | (patch)) + +#endif /* ADI_AXI_COMMON_H_ */ diff --git a/include/linux/fs.h b/include/linux/fs.h index f7fdfe93e25d..997a530ff4e9 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -694,7 +694,7 @@ struct inode { atomic_t i_count; atomic_t i_dio_count; atomic_t i_writecount; -#ifdef CONFIG_IMA +#if defined(CONFIG_IMA) || defined(CONFIG_FILE_LOCKING) atomic_t i_readcount; /* struct files open RO */ #endif union { @@ -1019,8 +1019,6 @@ struct file_lock_operations { }; struct lock_manager_operations { - int (*lm_compare_owner)(struct file_lock *, struct file_lock *); - unsigned long (*lm_owner_key)(struct file_lock *); fl_owner_t (*lm_get_owner)(fl_owner_t); void (*lm_put_owner)(fl_owner_t); void (*lm_notify)(struct file_lock *); /* unblock callback */ @@ -1769,7 +1767,7 @@ struct block_device_operations; /* * These flags control the behavior of the remap_file_range function pointer. * If it is called with len == 0 that means "remap to end of source file". - * See Documentation/filesystems/vfs.txt for more details about this call. + * See Documentation/filesystems/vfs.rst for more details about this call. * * REMAP_FILE_DEDUP: only remap if contents identical (i.e. deduplicate) * REMAP_FILE_CAN_SHORTEN: caller can handle a shortened request @@ -1889,6 +1887,9 @@ extern ssize_t vfs_readv(struct file *, const struct iovec __user *, unsigned long, loff_t *, rwf_t); extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *, loff_t, size_t, unsigned int); +extern ssize_t generic_copy_file_range(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + size_t len, unsigned int flags); extern int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, loff_t *count, @@ -2174,6 +2175,8 @@ static inline void file_accessed(struct file *file) touch_atime(&file->f_path); } +extern int file_modified(struct file *file); + int sync_inode(struct inode *inode, struct writeback_control *wbc); int sync_inode_metadata(struct inode *inode, int wait); @@ -2184,6 +2187,7 @@ struct file_system_type { #define FS_BINARY_MOUNTDATA 2 #define FS_HAS_SUBTYPE 4 #define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */ +#define FS_DISALLOW_NOTIFY_PERM 16 /* Disable fanotify permission events */ #define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */ int (*init_fs_context)(struct fs_context *); const struct fs_parameter_description *parameters; @@ -2206,9 +2210,6 @@ struct file_system_type { #define MODULE_ALIAS_FS(NAME) MODULE_ALIAS("fs-" NAME) -extern struct dentry *mount_ns(struct file_system_type *fs_type, - int flags, void *data, void *ns, struct user_namespace *user_ns, - int (*fill_super)(struct super_block *, void *, int)); #ifdef CONFIG_BLOCK extern struct dentry *mount_bdev(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, @@ -2248,28 +2249,10 @@ void free_anon_bdev(dev_t); struct super_block *sget_fc(struct fs_context *fc, int (*test)(struct super_block *, struct fs_context *), int (*set)(struct super_block *, struct fs_context *)); -struct super_block *sget_userns(struct file_system_type *type, - int (*test)(struct super_block *,void *), - int (*set)(struct super_block *,void *), - int flags, struct user_namespace *user_ns, - void *data); struct super_block *sget(struct file_system_type *type, int (*test)(struct super_block *,void *), int (*set)(struct super_block *,void *), int flags, void *data); -extern struct dentry *mount_pseudo_xattr(struct file_system_type *, char *, - const struct super_operations *ops, - const struct xattr_handler **xattr, - const struct dentry_operations *dops, - unsigned long); - -static inline struct dentry * -mount_pseudo(struct file_system_type *fs_type, char *name, - const struct super_operations *ops, - const struct dentry_operations *dops, unsigned long magic) -{ - return mount_pseudo_xattr(fs_type, name, ops, NULL, dops, magic); -} /* Alas, no aliases. Too much hassle with bringing module.h everywhere */ #define fops_get(fops) \ @@ -2615,6 +2598,12 @@ extern struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, void *holder); extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder); +extern struct block_device *bd_start_claiming(struct block_device *bdev, + void *holder); +extern void bd_finish_claiming(struct block_device *bdev, + struct block_device *whole, void *holder); +extern void bd_abort_claiming(struct block_device *bdev, + struct block_device *whole, void *holder); extern void blkdev_put(struct block_device *bdev, fmode_t mode); extern int __blkdev_reread_part(struct block_device *bdev); extern int blkdev_reread_part(struct block_device *bdev); @@ -2712,6 +2701,8 @@ extern int filemap_flush(struct address_space *); extern int filemap_fdatawait_keep_errors(struct address_space *mapping); extern int filemap_fdatawait_range(struct address_space *, loff_t lstart, loff_t lend); +extern int filemap_fdatawait_range_keep_errors(struct address_space *mapping, + loff_t start_byte, loff_t end_byte); static inline int filemap_fdatawait(struct address_space *mapping) { @@ -2890,7 +2881,7 @@ static inline bool inode_is_open_for_write(const struct inode *inode) return atomic_read(&inode->i_writecount) > 0; } -#ifdef CONFIG_IMA +#if defined(CONFIG_IMA) || defined(CONFIG_FILE_LOCKING) static inline void i_readcount_dec(struct inode *inode) { BUG_ON(!atomic_read(&inode->i_readcount)); @@ -3046,6 +3037,10 @@ extern ssize_t generic_write_checks(struct kiocb *, struct iov_iter *); extern int generic_remap_checks(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, loff_t *count, unsigned int remap_flags); +extern int generic_file_rw_checks(struct file *file_in, struct file *file_out); +extern int generic_copy_file_checks(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + size_t *count, unsigned int flags); extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *); extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *); extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *); @@ -3546,4 +3541,16 @@ static inline struct sock *io_uring_get_socket(struct file *file) } #endif +int vfs_ioc_setflags_prepare(struct inode *inode, unsigned int oldflags, + unsigned int flags); + +int vfs_ioc_fssetxattr_check(struct inode *inode, const struct fsxattr *old_fa, + struct fsxattr *fa); + +static inline void simple_fill_fsxattr(struct fsxattr *fa, __u32 xflags) +{ + memset(fa, 0, sizeof(*fa)); + fa->fsx_xflags = xflags; +} + #endif /* _LINUX_FS_H */ diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h index d476ff0c10df..7c6fe3d47fa6 100644 --- a/include/linux/fs_context.h +++ b/include/linux/fs_context.h @@ -81,7 +81,7 @@ struct fs_parameter { * Superblock creation fills in ->root whereas reconfiguration begins with this * already set. * - * See Documentation/filesystems/mounting.txt + * See Documentation/filesystems/mount_api.txt */ struct fs_context { const struct fs_context_operations *ops; @@ -99,6 +99,7 @@ struct fs_context { void *s_fs_info; /* Proposed s_fs_info */ unsigned int sb_flags; /* Proposed superblock flags (SB_*) */ unsigned int sb_flags_mask; /* Superblock flags that were changed */ + unsigned int s_iflags; /* OR'd with sb->s_iflags */ unsigned int lsm_flags; /* Information flags from the fs to the LSM */ enum fs_context_purpose purpose:8; enum fs_context_phase phase:8; /* The phase the context is in */ @@ -146,6 +147,12 @@ extern int vfs_get_super(struct fs_context *fc, enum vfs_get_super_keying keying, int (*fill_super)(struct super_block *sb, struct fs_context *fc)); +extern int get_tree_nodev(struct fs_context *fc, + int (*fill_super)(struct super_block *sb, + struct fs_context *fc)); +extern int get_tree_single(struct fs_context *fc, + int (*fill_super)(struct super_block *sb, + struct fs_context *fc)); extern const struct file_operations fscontext_fops; diff --git a/include/linux/fs_pin.h b/include/linux/fs_pin.h index 7cab74d66f85..bdd09fd2520c 100644 --- a/include/linux/fs_pin.h +++ b/include/linux/fs_pin.h @@ -20,6 +20,5 @@ static inline void init_fs_pin(struct fs_pin *p, void (*kill)(struct fs_pin *)) } void pin_remove(struct fs_pin *); -void pin_insert_group(struct fs_pin *, struct vfsmount *, struct hlist_head *); void pin_insert(struct fs_pin *, struct vfsmount *); void pin_kill(struct fs_pin *); diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index f7680ef1abd2..bd8f207a2fb6 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -63,16 +63,13 @@ struct fscrypt_operations { unsigned int max_namelen; }; +/* Decryption work */ struct fscrypt_ctx { union { struct { - struct page *bounce_page; /* Ciphertext page */ - struct page *control_page; /* Original page */ - } w; - struct { struct bio *bio; struct work_struct work; - } r; + }; struct list_head free_list; /* Free list */ }; u8 flags; /* Flags */ @@ -106,18 +103,33 @@ static inline void fscrypt_handle_d_move(struct dentry *dentry) extern void fscrypt_enqueue_decrypt_work(struct work_struct *); extern struct fscrypt_ctx *fscrypt_get_ctx(gfp_t); extern void fscrypt_release_ctx(struct fscrypt_ctx *); -extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *, - unsigned int, unsigned int, - u64, gfp_t); -extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int, - unsigned int, u64); -static inline struct page *fscrypt_control_page(struct page *page) +extern struct page *fscrypt_encrypt_pagecache_blocks(struct page *page, + unsigned int len, + unsigned int offs, + gfp_t gfp_flags); +extern int fscrypt_encrypt_block_inplace(const struct inode *inode, + struct page *page, unsigned int len, + unsigned int offs, u64 lblk_num, + gfp_t gfp_flags); + +extern int fscrypt_decrypt_pagecache_blocks(struct page *page, unsigned int len, + unsigned int offs); +extern int fscrypt_decrypt_block_inplace(const struct inode *inode, + struct page *page, unsigned int len, + unsigned int offs, u64 lblk_num); + +static inline bool fscrypt_is_bounce_page(struct page *page) +{ + return page->mapping == NULL; +} + +static inline struct page *fscrypt_pagecache_page(struct page *bounce_page) { - return ((struct fscrypt_ctx *)page_private(page))->w.control_page; + return (struct page *)page_private(bounce_page); } -extern void fscrypt_restore_control_page(struct page *); +extern void fscrypt_free_bounce_page(struct page *bounce_page); /* policy.c */ extern int fscrypt_ioctl_set_policy(struct file *, const void __user *); @@ -223,7 +235,6 @@ static inline bool fscrypt_match_name(const struct fscrypt_name *fname, extern void fscrypt_decrypt_bio(struct bio *); extern void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, struct bio *bio); -extern void fscrypt_pullback_bio_page(struct page **, bool); extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t, unsigned int); @@ -283,32 +294,51 @@ static inline void fscrypt_release_ctx(struct fscrypt_ctx *ctx) return; } -static inline struct page *fscrypt_encrypt_page(const struct inode *inode, +static inline struct page *fscrypt_encrypt_pagecache_blocks(struct page *page, + unsigned int len, + unsigned int offs, + gfp_t gfp_flags) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page, unsigned int len, - unsigned int offs, - u64 lblk_num, gfp_t gfp_flags) + unsigned int offs, u64 lblk_num, + gfp_t gfp_flags) { - return ERR_PTR(-EOPNOTSUPP); + return -EOPNOTSUPP; +} + +static inline int fscrypt_decrypt_pagecache_blocks(struct page *page, + unsigned int len, + unsigned int offs) +{ + return -EOPNOTSUPP; } -static inline int fscrypt_decrypt_page(const struct inode *inode, - struct page *page, - unsigned int len, unsigned int offs, - u64 lblk_num) +static inline int fscrypt_decrypt_block_inplace(const struct inode *inode, + struct page *page, + unsigned int len, + unsigned int offs, u64 lblk_num) { return -EOPNOTSUPP; } -static inline struct page *fscrypt_control_page(struct page *page) +static inline bool fscrypt_is_bounce_page(struct page *page) +{ + return false; +} + +static inline struct page *fscrypt_pagecache_page(struct page *bounce_page) { WARN_ON_ONCE(1); return ERR_PTR(-EINVAL); } -static inline void fscrypt_restore_control_page(struct page *page) +static inline void fscrypt_free_bounce_page(struct page *bounce_page) { - return; } /* policy.c */ @@ -410,11 +440,6 @@ static inline void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, { } -static inline void fscrypt_pullback_bio_page(struct page **page, bool restore) -{ - return; -} - static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, sector_t pblk, unsigned int len) { @@ -692,4 +717,15 @@ static inline int fscrypt_encrypt_symlink(struct inode *inode, return 0; } +/* If *pagep is a bounce page, free it and set *pagep to the pagecache page */ +static inline void fscrypt_finalize_bounce_page(struct page **pagep) +{ + struct page *page = *pagep; + + if (fscrypt_is_bounce_page(page)) { + *pagep = fscrypt_pagecache_page(page); + fscrypt_free_bounce_page(page); + } +} + #endif /* _LINUX_FSCRYPT_H */ diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h index cb2b46f57af3..5d231ce8709b 100644 --- a/include/linux/fsl_devices.h +++ b/include/linux/fsl_devices.h @@ -98,6 +98,7 @@ struct fsl_usb2_platform_data { unsigned has_fsl_erratum_14:1; unsigned has_fsl_erratum_a005275:1; unsigned has_fsl_erratum_a005697:1; + unsigned has_fsl_erratum_a006918:1; unsigned check_phy_clk_valid:1; /* register save area for suspend/resume */ diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index 94972e8eb6d1..a2d5d175d3c1 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -189,6 +189,19 @@ static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct } /* + * fsnotify_unlink - 'name' was unlinked + * + * Caller must make sure that dentry->d_name is stable. + */ +static inline void fsnotify_unlink(struct inode *dir, struct dentry *dentry) +{ + /* Expected to be called before d_delete() */ + WARN_ON_ONCE(d_is_negative(dentry)); + + fsnotify_dirent(dir, dentry, FS_DELETE); +} + +/* * fsnotify_mkdir - directory 'name' was created */ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry) @@ -199,6 +212,19 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry) } /* + * fsnotify_rmdir - directory 'name' was removed + * + * Caller must make sure that dentry->d_name is stable. + */ +static inline void fsnotify_rmdir(struct inode *dir, struct dentry *dentry) +{ + /* Expected to be called before d_delete() */ + WARN_ON_ONCE(d_is_negative(dentry)); + + fsnotify_dirent(dir, dentry, FS_DELETE | FS_ISDIR); +} + +/* * fsnotify_access - file was read */ static inline void fsnotify_access(struct file *file) diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index d4844cad2c2b..2de3b2ddd19a 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -357,7 +357,6 @@ extern int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u extern void __fsnotify_inode_delete(struct inode *inode); extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt); extern void fsnotify_sb_delete(struct super_block *sb); -extern void fsnotify_nameremove(struct dentry *dentry, int isdir); extern u32 fsnotify_get_cookie(void); static inline int fsnotify_inode_watches_children(struct inode *inode) @@ -527,9 +526,6 @@ static inline void __fsnotify_vfsmount_delete(struct vfsmount *mnt) static inline void fsnotify_sb_delete(struct super_block *sb) {} -static inline void fsnotify_nameremove(struct dentry *dentry, int isdir) -{} - static inline void fsnotify_update_flags(struct dentry *dentry) {} diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 25e2995d4a4c..8a8cb3c401b2 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -427,8 +427,8 @@ struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter); iter = ftrace_rec_iter_next(iter)) -int ftrace_update_record(struct dyn_ftrace *rec, int enable); -int ftrace_test_record(struct dyn_ftrace *rec, int enable); +int ftrace_update_record(struct dyn_ftrace *rec, bool enable); +int ftrace_test_record(struct dyn_ftrace *rec, bool enable); void ftrace_run_stop_machine(int command); unsigned long ftrace_location(unsigned long ip); unsigned long ftrace_location_range(unsigned long start, unsigned long end); diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h index 205f62b8d291..4bd583bd6934 100644 --- a/include/linux/genalloc.h +++ b/include/linux/genalloc.h @@ -155,6 +155,15 @@ static inline unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) extern void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma); +extern void *gen_pool_dma_alloc_algo(struct gen_pool *pool, size_t size, + dma_addr_t *dma, genpool_algo_t algo, void *data); +extern void *gen_pool_dma_alloc_align(struct gen_pool *pool, size_t size, + dma_addr_t *dma, int align); +extern void *gen_pool_dma_zalloc(struct gen_pool *pool, size_t size, dma_addr_t *dma); +extern void *gen_pool_dma_zalloc_algo(struct gen_pool *pool, size_t size, + dma_addr_t *dma, genpool_algo_t algo, void *data); +extern void *gen_pool_dma_zalloc_align(struct gen_pool *pool, size_t size, + dma_addr_t *dma, int align); extern void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr, size_t size, void **owner); static inline void gen_pool_free(struct gen_pool *pool, unsigned long addr, diff --git a/include/linux/gpio.h b/include/linux/gpio.h index 39745b8bdd65..40915b461f18 100644 --- a/include/linux/gpio.h +++ b/include/linux/gpio.h @@ -106,6 +106,7 @@ void devm_gpio_free(struct device *dev, unsigned int gpio); struct device; struct gpio_chip; +struct pinctrl_dev; static inline bool gpio_is_valid(int number) { diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index 9ddcf50a3c59..a7f08fb0f865 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -247,7 +247,7 @@ static inline void gpiod_put(struct gpio_desc *desc) might_sleep(); /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); } static inline void devm_gpiod_unhinge(struct device *dev, @@ -256,7 +256,7 @@ static inline void devm_gpiod_unhinge(struct device *dev, might_sleep(); /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); } static inline void gpiod_put_array(struct gpio_descs *descs) @@ -264,7 +264,7 @@ static inline void gpiod_put_array(struct gpio_descs *descs) might_sleep(); /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(descs); } static inline struct gpio_desc *__must_check @@ -317,7 +317,7 @@ static inline void devm_gpiod_put(struct device *dev, struct gpio_desc *desc) might_sleep(); /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); } static inline void devm_gpiod_put_array(struct device *dev, @@ -326,32 +326,32 @@ static inline void devm_gpiod_put_array(struct device *dev, might_sleep(); /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(descs); } static inline int gpiod_get_direction(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return -ENOSYS; } static inline int gpiod_direction_input(struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return -ENOSYS; } static inline int gpiod_direction_output(struct gpio_desc *desc, int value) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return -ENOSYS; } static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return -ENOSYS; } @@ -359,7 +359,7 @@ static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value) static inline int gpiod_get_value(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return 0; } static inline int gpiod_get_array_value(unsigned int array_size, @@ -368,13 +368,13 @@ static inline int gpiod_get_array_value(unsigned int array_size, unsigned long *value_bitmap) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc_array); return 0; } static inline void gpiod_set_value(struct gpio_desc *desc, int value) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); } static inline int gpiod_set_array_value(unsigned int array_size, struct gpio_desc **desc_array, @@ -382,13 +382,13 @@ static inline int gpiod_set_array_value(unsigned int array_size, unsigned long *value_bitmap) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc_array); return 0; } static inline int gpiod_get_raw_value(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return 0; } static inline int gpiod_get_raw_array_value(unsigned int array_size, @@ -397,13 +397,13 @@ static inline int gpiod_get_raw_array_value(unsigned int array_size, unsigned long *value_bitmap) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc_array); return 0; } static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); } static inline int gpiod_set_raw_array_value(unsigned int array_size, struct gpio_desc **desc_array, @@ -411,14 +411,14 @@ static inline int gpiod_set_raw_array_value(unsigned int array_size, unsigned long *value_bitmap) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc_array); return 0; } static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return 0; } static inline int gpiod_get_array_value_cansleep(unsigned int array_size, @@ -427,13 +427,13 @@ static inline int gpiod_get_array_value_cansleep(unsigned int array_size, unsigned long *value_bitmap) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc_array); return 0; } static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); } static inline int gpiod_set_array_value_cansleep(unsigned int array_size, struct gpio_desc **desc_array, @@ -441,13 +441,13 @@ static inline int gpiod_set_array_value_cansleep(unsigned int array_size, unsigned long *value_bitmap) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc_array); return 0; } static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return 0; } static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size, @@ -456,14 +456,14 @@ static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size, unsigned long *value_bitmap) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc_array); return 0; } static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); } static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size, struct gpio_desc **desc_array, @@ -471,41 +471,41 @@ static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size, unsigned long *value_bitmap) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc_array); return 0; } static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return -ENOSYS; } static inline int gpiod_set_transitory(struct gpio_desc *desc, bool transitory) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return -ENOSYS; } static inline int gpiod_is_active_low(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return 0; } static inline int gpiod_cansleep(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return 0; } static inline int gpiod_to_irq(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return -EINVAL; } @@ -513,7 +513,7 @@ static inline int gpiod_set_consumer_name(struct gpio_desc *desc, const char *name) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return -EINVAL; } @@ -525,7 +525,7 @@ static inline struct gpio_desc *gpio_to_desc(unsigned gpio) static inline int desc_to_gpio(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return -EINVAL; } diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index a1d273c96016..6a0e420915a3 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -18,6 +18,7 @@ struct seq_file; struct gpio_device; struct module; enum gpiod_flags; +enum gpio_lookup_flags; #ifdef CONFIG_GPIOLIB @@ -102,13 +103,6 @@ struct gpio_irq_chip { unsigned int num_parents; /** - * @parent_irq: - * - * For use by gpiochip_set_cascaded_irqchip() - */ - unsigned int parent_irq; - - /** * @parents: * * A list of interrupt parents of a GPIO chip. This is owned by the @@ -167,7 +161,7 @@ struct gpio_irq_chip { */ void (*irq_disable)(struct irq_data *data); }; -#endif +#endif /* CONFIG_GPIOLIB_IRQCHIP */ /** * struct gpio_chip - abstract a GPIO controller @@ -200,6 +194,8 @@ struct gpio_irq_chip { * @dbg_show: optional routine to show contents in debugfs; default code * will be used when this is omitted, but custom code can show extra * state (such as pullup/pulldown configuration). + * @init_valid_mask: optional routine to initialize @valid_mask, to be used if + * not all GPIOs are valid. * @base: identifies the first GPIO number handled by this chip; * or, if negative during registration, requests dynamic ID allocation. * DEPRECATION: providing anything non-negative and nailing the base @@ -307,7 +303,7 @@ struct gpio_chip { spinlock_t bgpio_lock; unsigned long bgpio_data; unsigned long bgpio_dir; -#endif +#endif /* CONFIG_GPIO_GENERIC */ #ifdef CONFIG_GPIOLIB_IRQCHIP /* @@ -322,7 +318,7 @@ struct gpio_chip { * used to handle IRQs for most practical cases. */ struct gpio_irq_chip irq; -#endif +#endif /* CONFIG_GPIOLIB_IRQCHIP */ /** * @need_valid_mask: @@ -369,7 +365,7 @@ struct gpio_chip { */ int (*of_xlate)(struct gpio_chip *gc, const struct of_phandle_args *gpiospec, u32 *flags); -#endif +#endif /* CONFIG_OF_GPIO */ }; extern const char *gpiochip_is_requested(struct gpio_chip *chip, @@ -412,7 +408,7 @@ extern int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, }) #else #define gpiochip_add_data(chip, data) gpiochip_add_data_with_key(chip, data, NULL, NULL) -#endif +#endif /* CONFIG_LOCKDEP */ static inline int gpiochip_add(struct gpio_chip *chip) { @@ -467,7 +463,7 @@ int bgpio_init(struct gpio_chip *gc, struct device *dev, #define BGPIOF_READ_OUTPUT_REG_SET BIT(4) /* reg_set stores output value */ #define BGPIOF_NO_OUTPUT BIT(5) /* only input */ -#endif +#endif /* CONFIG_GPIO_GENERIC */ #ifdef CONFIG_GPIOLIB_IRQCHIP @@ -537,7 +533,7 @@ static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, handler, type, true, &lock_key, &request_key); } -#else +#else /* ! CONFIG_LOCKDEP */ static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip, struct irq_chip *irqchip, unsigned int first_irq, @@ -588,7 +584,9 @@ int gpiochip_add_pingroup_range(struct gpio_chip *chip, unsigned int gpio_offset, const char *pin_group); void gpiochip_remove_pin_ranges(struct gpio_chip *chip); -#else +#else /* ! CONFIG_PINCTRL */ + +struct pinctrl_dev; static inline int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name, @@ -614,7 +612,8 @@ gpiochip_remove_pin_ranges(struct gpio_chip *chip) struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, u16 hwnum, const char *label, - enum gpiod_flags flags); + enum gpio_lookup_flags lflags, + enum gpiod_flags dflags); void gpiochip_free_own_desc(struct gpio_desc *desc); void devprop_gpiochip_set_names(struct gpio_chip *chip, diff --git a/include/linux/gpio/gpio-reg.h b/include/linux/gpio/gpio-reg.h index 5c6efd394cb0..39b888c40b39 100644 --- a/include/linux/gpio/gpio-reg.h +++ b/include/linux/gpio/gpio-reg.h @@ -11,4 +11,4 @@ struct gpio_chip *gpio_reg_init(struct device *dev, void __iomem *reg, int gpio_reg_resume(struct gpio_chip *gc); -#endif +#endif /* GPIO_REG_H */ diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h index 35f299d1f6a7..1ebe5be05d5f 100644 --- a/include/linux/gpio/machine.h +++ b/include/linux/gpio/machine.h @@ -97,7 +97,7 @@ void gpiod_add_lookup_table(struct gpiod_lookup_table *table); void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n); void gpiod_remove_lookup_table(struct gpiod_lookup_table *table); void gpiod_add_hogs(struct gpiod_hog *hogs); -#else +#else /* ! CONFIG_GPIOLIB */ static inline void gpiod_add_lookup_table(struct gpiod_lookup_table *table) {} static inline @@ -105,6 +105,6 @@ void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n) {} static inline void gpiod_remove_lookup_table(struct gpiod_lookup_table *table) {} static inline void gpiod_add_hogs(struct gpiod_hog *hogs) {} -#endif +#endif /* CONFIG_GPIOLIB */ #endif /* __LINUX_GPIO_MACHINE_H */ diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h index 927ad6451105..9918a6c910c5 100644 --- a/include/linux/hdmi.h +++ b/include/linux/hdmi.h @@ -47,6 +47,7 @@ enum hdmi_infoframe_type { HDMI_INFOFRAME_TYPE_AVI = 0x82, HDMI_INFOFRAME_TYPE_SPD = 0x83, HDMI_INFOFRAME_TYPE_AUDIO = 0x84, + HDMI_INFOFRAME_TYPE_DRM = 0x87, }; #define HDMI_IEEE_OUI 0x000c03 @@ -55,6 +56,7 @@ enum hdmi_infoframe_type { #define HDMI_AVI_INFOFRAME_SIZE 13 #define HDMI_SPD_INFOFRAME_SIZE 25 #define HDMI_AUDIO_INFOFRAME_SIZE 10 +#define HDMI_DRM_INFOFRAME_SIZE 26 #define HDMI_INFOFRAME_SIZE(type) \ (HDMI_INFOFRAME_HEADER_SIZE + HDMI_ ## type ## _INFOFRAME_SIZE) @@ -152,6 +154,17 @@ enum hdmi_content_type { HDMI_CONTENT_TYPE_GAME, }; +enum hdmi_metadata_type { + HDMI_STATIC_METADATA_TYPE1 = 1, +}; + +enum hdmi_eotf { + HDMI_EOTF_TRADITIONAL_GAMMA_SDR, + HDMI_EOTF_TRADITIONAL_GAMMA_HDR, + HDMI_EOTF_SMPTE_ST2084, + HDMI_EOTF_BT_2100_HLG, +}; + struct hdmi_avi_infoframe { enum hdmi_infoframe_type type; unsigned char version; @@ -175,12 +188,37 @@ struct hdmi_avi_infoframe { unsigned short right_bar; }; +/* DRM Infoframe as per CTA 861.G spec */ +struct hdmi_drm_infoframe { + enum hdmi_infoframe_type type; + unsigned char version; + unsigned char length; + enum hdmi_eotf eotf; + enum hdmi_metadata_type metadata_type; + struct { + u16 x, y; + } display_primaries[3]; + struct { + u16 x, y; + } white_point; + u16 max_display_mastering_luminance; + u16 min_display_mastering_luminance; + u16 max_cll; + u16 max_fall; +}; + int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame); ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer, size_t size); ssize_t hdmi_avi_infoframe_pack_only(const struct hdmi_avi_infoframe *frame, void *buffer, size_t size); int hdmi_avi_infoframe_check(struct hdmi_avi_infoframe *frame); +int hdmi_drm_infoframe_init(struct hdmi_drm_infoframe *frame); +ssize_t hdmi_drm_infoframe_pack(struct hdmi_drm_infoframe *frame, void *buffer, + size_t size); +ssize_t hdmi_drm_infoframe_pack_only(const struct hdmi_drm_infoframe *frame, + void *buffer, size_t size); +int hdmi_drm_infoframe_check(struct hdmi_drm_infoframe *frame); enum hdmi_spd_sdi { HDMI_SPD_SDI_UNKNOWN, @@ -320,6 +358,33 @@ struct hdmi_vendor_infoframe { unsigned int s3d_ext_data; }; +/* HDR Metadata as per 861.G spec */ +struct hdr_static_metadata { + __u8 eotf; + __u8 metadata_type; + __u16 max_cll; + __u16 max_fall; + __u16 min_cll; +}; + +/** + * struct hdr_sink_metadata - HDR sink metadata + * + * Metadata Information read from Sink's EDID + */ +struct hdr_sink_metadata { + /** + * @metadata_type: Static_Metadata_Descriptor_ID. + */ + __u32 metadata_type; + /** + * @hdmi_type1: HDR Metadata Infoframe. + */ + union { + struct hdr_static_metadata hdmi_type1; + }; +}; + int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame); ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame, void *buffer, size_t size); @@ -344,6 +409,7 @@ union hdmi_vendor_any_infoframe { * @spd: spd infoframe * @vendor: union of all vendor infoframes * @audio: audio infoframe + * @drm: Dynamic Range and Mastering infoframe * * This is used by the generic pack function. This works since all infoframes * have the same header which also indicates which type of infoframe should be @@ -355,6 +421,7 @@ union hdmi_infoframe { struct hdmi_spd_infoframe spd; union hdmi_vendor_any_infoframe vendor; struct hdmi_audio_infoframe audio; + struct hdmi_drm_infoframe drm; }; ssize_t hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, diff --git a/include/linux/hmm.h b/include/linux/hmm.h index 044a36d7c3f8..7ef56dc18050 100644 --- a/include/linux/hmm.h +++ b/include/linux/hmm.h @@ -21,8 +21,8 @@ * * HMM address space mirroring API: * - * Use HMM address space mirroring if you want to mirror range of the CPU page - * table of a process into a device page table. Here, "mirror" means "keep + * Use HMM address space mirroring if you want to mirror a range of the CPU + * page tables of a process into a device page table. Here, "mirror" means "keep * synchronized". Prerequisites: the device must provide the ability to write- * protect its page tables (at PAGE_SIZE granularity), and must be able to * recover from the resulting potential page faults. @@ -62,7 +62,7 @@ #include <linux/kconfig.h> #include <asm/pgtable.h> -#if IS_ENABLED(CONFIG_HMM) +#ifdef CONFIG_HMM_MIRROR #include <linux/device.h> #include <linux/migrate.h> @@ -82,19 +82,18 @@ * @mirrors_sem: read/write semaphore protecting the mirrors list * @wq: wait queue for user waiting on a range invalidation * @notifiers: count of active mmu notifiers - * @dead: is the mm dead ? */ struct hmm { struct mm_struct *mm; struct kref kref; - struct mutex lock; + spinlock_t ranges_lock; struct list_head ranges; struct list_head mirrors; struct mmu_notifier mmu_notifier; struct rw_semaphore mirrors_sem; wait_queue_head_t wq; + struct rcu_head rcu; long notifiers; - bool dead; }; /* @@ -105,10 +104,11 @@ struct hmm { * HMM_PFN_WRITE: CPU page table has write permission set * HMM_PFN_DEVICE_PRIVATE: private device memory (ZONE_DEVICE) * - * The driver provide a flags array, if driver valid bit for an entry is bit - * 3 ie (entry & (1 << 3)) is true if entry is valid then driver must provide + * The driver provides a flags array for mapping page protections to device + * PTE bits. If the driver valid bit for an entry is bit 3, + * i.e., (entry & (1 << 3)), then the driver must provide * an array in hmm_range.flags with hmm_range.flags[HMM_PFN_VALID] == 1 << 3. - * Same logic apply to all flags. This is same idea as vm_page_prot in vma + * Same logic apply to all flags. This is the same idea as vm_page_prot in vma * except that this is per device driver rather than per architecture. */ enum hmm_pfn_flag_e { @@ -129,13 +129,13 @@ enum hmm_pfn_flag_e { * be mirrored by a device, because the entry will never have HMM_PFN_VALID * set and the pfn value is undefined. * - * Driver provide entry value for none entry, error entry and special entry, - * driver can alias (ie use same value for error and special for instance). It - * should not alias none and error or special. + * Driver provides values for none entry, error entry, and special entry. + * Driver can alias (i.e., use same value) error and special, but + * it should not alias none with error or special. * * HMM pfn value returned by hmm_vma_get_pfns() or hmm_vma_fault() will be: * hmm_range.values[HMM_PFN_ERROR] if CPU page table entry is poisonous, - * hmm_range.values[HMM_PFN_NONE] if there is no CPU page table + * hmm_range.values[HMM_PFN_NONE] if there is no CPU page table entry, * hmm_range.values[HMM_PFN_SPECIAL] if CPU page table entry is a special one */ enum hmm_pfn_value_e { @@ -158,6 +158,7 @@ enum hmm_pfn_value_e { * @values: pfn value for some special case (none, special, error, ...) * @default_flags: default flags for the range (write, read, ... see hmm doc) * @pfn_flags_mask: allows to mask pfn flags so that only default_flags matter + * @page_shift: device virtual address shift value (should be >= PAGE_SHIFT) * @pfn_shifts: pfn shift value (should be <= PAGE_SHIFT) * @valid: pfns array did not change since it has been fill by an HMM function */ @@ -180,7 +181,7 @@ struct hmm_range { /* * hmm_range_page_shift() - return the page shift for the range * @range: range being queried - * Returns: page shift (page size = 1 << page shift) for the range + * Return: page shift (page size = 1 << page shift) for the range */ static inline unsigned hmm_range_page_shift(const struct hmm_range *range) { @@ -190,7 +191,7 @@ static inline unsigned hmm_range_page_shift(const struct hmm_range *range) /* * hmm_range_page_size() - return the page size for the range * @range: range being queried - * Returns: page size for the range in bytes + * Return: page size for the range in bytes */ static inline unsigned long hmm_range_page_size(const struct hmm_range *range) { @@ -201,28 +202,19 @@ static inline unsigned long hmm_range_page_size(const struct hmm_range *range) * hmm_range_wait_until_valid() - wait for range to be valid * @range: range affected by invalidation to wait on * @timeout: time out for wait in ms (ie abort wait after that period of time) - * Returns: true if the range is valid, false otherwise. + * Return: true if the range is valid, false otherwise. */ static inline bool hmm_range_wait_until_valid(struct hmm_range *range, unsigned long timeout) { - /* Check if mm is dead ? */ - if (range->hmm == NULL || range->hmm->dead || range->hmm->mm == NULL) { - range->valid = false; - return false; - } - if (range->valid) - return true; - wait_event_timeout(range->hmm->wq, range->valid || range->hmm->dead, - msecs_to_jiffies(timeout)); - /* Return current valid status just in case we get lucky */ - return range->valid; + return wait_event_timeout(range->hmm->wq, range->valid, + msecs_to_jiffies(timeout)) != 0; } /* * hmm_range_valid() - test if a range is valid or not * @range: range - * Returns: true if the range is valid, false otherwise. + * Return: true if the range is valid, false otherwise. */ static inline bool hmm_range_valid(struct hmm_range *range) { @@ -233,7 +225,7 @@ static inline bool hmm_range_valid(struct hmm_range *range) * hmm_device_entry_to_page() - return struct page pointed to by a device entry * @range: range use to decode device entry value * @entry: device entry value to get corresponding struct page from - * Returns: struct page pointer if entry is a valid, NULL otherwise + * Return: struct page pointer if entry is a valid, NULL otherwise * * If the device entry is valid (ie valid flag set) then return the struct page * matching the entry value. Otherwise return NULL. @@ -256,7 +248,7 @@ static inline struct page *hmm_device_entry_to_page(const struct hmm_range *rang * hmm_device_entry_to_pfn() - return pfn value store in a device entry * @range: range use to decode device entry value * @entry: device entry to extract pfn from - * Returns: pfn value if device entry is valid, -1UL otherwise + * Return: pfn value if device entry is valid, -1UL otherwise */ static inline unsigned long hmm_device_entry_to_pfn(const struct hmm_range *range, uint64_t pfn) @@ -276,7 +268,7 @@ hmm_device_entry_to_pfn(const struct hmm_range *range, uint64_t pfn) * hmm_device_entry_from_page() - create a valid device entry for a page * @range: range use to encode HMM pfn value * @page: page for which to create the device entry - * Returns: valid device entry for the page + * Return: valid device entry for the page */ static inline uint64_t hmm_device_entry_from_page(const struct hmm_range *range, struct page *page) @@ -289,7 +281,7 @@ static inline uint64_t hmm_device_entry_from_page(const struct hmm_range *range, * hmm_device_entry_from_pfn() - create a valid device entry value from pfn * @range: range use to encode HMM pfn value * @pfn: pfn value for which to create the device entry - * Returns: valid device entry for the pfn + * Return: valid device entry for the pfn */ static inline uint64_t hmm_device_entry_from_pfn(const struct hmm_range *range, unsigned long pfn) @@ -332,9 +324,6 @@ static inline uint64_t hmm_pfn_from_pfn(const struct hmm_range *range, return hmm_device_entry_from_pfn(range, pfn); } - - -#if IS_ENABLED(CONFIG_HMM_MIRROR) /* * Mirroring: how to synchronize device page table with CPU page table. * @@ -394,7 +383,7 @@ enum hmm_update_event { }; /* - * struct hmm_update - HMM update informations for callback + * struct hmm_update - HMM update information for callback * * @start: virtual start address of the range to update * @end: virtual end address of the range to update @@ -418,17 +407,18 @@ struct hmm_mirror_ops { * * @mirror: pointer to struct hmm_mirror * - * This is called when the mm_struct is being released. - * The callback should make sure no references to the mirror occur - * after the callback returns. + * This is called when the mm_struct is being released. The callback + * must ensure that all access to any pages obtained from this mirror + * is halted before the callback returns. All future access should + * fault. */ void (*release)(struct hmm_mirror *mirror); /* sync_cpu_device_pagetables() - synchronize page tables * * @mirror: pointer to struct hmm_mirror - * @update: update informations (see struct hmm_update) - * Returns: -EAGAIN if update.blockable false and callback need to + * @update: update information (see struct hmm_update) + * Return: -EAGAIN if update.blockable false and callback need to * block, 0 otherwise. * * This callback ultimately originates from mmu_notifiers when the CPU @@ -465,35 +455,10 @@ int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm); void hmm_mirror_unregister(struct hmm_mirror *mirror); /* - * hmm_mirror_mm_is_alive() - test if mm is still alive - * @mirror: the HMM mm mirror for which we want to lock the mmap_sem - * Returns: false if the mm is dead, true otherwise - * - * This is an optimization it will not accurately always return -EINVAL if the - * mm is dead ie there can be false negative (process is being kill but HMM is - * not yet inform of that). It is only intented to be use to optimize out case - * where driver is about to do something time consuming and it would be better - * to skip it if the mm is dead. - */ -static inline bool hmm_mirror_mm_is_alive(struct hmm_mirror *mirror) -{ - struct mm_struct *mm; - - if (!mirror || !mirror->hmm) - return false; - mm = READ_ONCE(mirror->hmm->mm); - if (mirror->hmm->dead || !mm) - return false; - - return true; -} - - -/* * Please see Documentation/vm/hmm.rst for how to use the range API. */ int hmm_range_register(struct hmm_range *range, - struct mm_struct *mm, + struct hmm_mirror *mirror, unsigned long start, unsigned long end, unsigned page_shift); @@ -519,262 +484,13 @@ long hmm_range_dma_unmap(struct hmm_range *range, */ #define HMM_RANGE_DEFAULT_TIMEOUT 1000 -/* This is a temporary helper to avoid merge conflict between trees. */ -static inline bool hmm_vma_range_done(struct hmm_range *range) -{ - bool ret = hmm_range_valid(range); - - hmm_range_unregister(range); - return ret; -} - -/* This is a temporary helper to avoid merge conflict between trees. */ -static inline int hmm_vma_fault(struct hmm_range *range, bool block) -{ - long ret; - - /* - * With the old API the driver must set each individual entries with - * the requested flags (valid, write, ...). So here we set the mask to - * keep intact the entries provided by the driver and zero out the - * default_flags. - */ - range->default_flags = 0; - range->pfn_flags_mask = -1UL; - - ret = hmm_range_register(range, range->vma->vm_mm, - range->start, range->end, - PAGE_SHIFT); - if (ret) - return (int)ret; - - if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) { - /* - * The mmap_sem was taken by driver we release it here and - * returns -EAGAIN which correspond to mmap_sem have been - * drop in the old API. - */ - up_read(&range->vma->vm_mm->mmap_sem); - return -EAGAIN; - } - - ret = hmm_range_fault(range, block); - if (ret <= 0) { - if (ret == -EBUSY || !ret) { - /* Same as above drop mmap_sem to match old API. */ - up_read(&range->vma->vm_mm->mmap_sem); - ret = -EBUSY; - } else if (ret == -EAGAIN) - ret = -EBUSY; - hmm_range_unregister(range); - return ret; - } - return 0; -} - /* Below are for HMM internal use only! Not to be used by device driver! */ -void hmm_mm_destroy(struct mm_struct *mm); - static inline void hmm_mm_init(struct mm_struct *mm) { mm->hmm = NULL; } #else /* IS_ENABLED(CONFIG_HMM_MIRROR) */ -static inline void hmm_mm_destroy(struct mm_struct *mm) {} static inline void hmm_mm_init(struct mm_struct *mm) {} #endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */ -#if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC) -struct hmm_devmem; - -struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma, - unsigned long addr); - -/* - * struct hmm_devmem_ops - callback for ZONE_DEVICE memory events - * - * @free: call when refcount on page reach 1 and thus is no longer use - * @fault: call when there is a page fault to unaddressable memory - * - * Both callback happens from page_free() and page_fault() callback of struct - * dev_pagemap respectively. See include/linux/memremap.h for more details on - * those. - * - * The hmm_devmem_ops callback are just here to provide a coherent and - * uniq API to device driver and device driver should not register their - * own page_free() or page_fault() but rely on the hmm_devmem_ops call- - * back. - */ -struct hmm_devmem_ops { - /* - * free() - free a device page - * @devmem: device memory structure (see struct hmm_devmem) - * @page: pointer to struct page being freed - * - * Call back occurs whenever a device page refcount reach 1 which - * means that no one is holding any reference on the page anymore - * (ZONE_DEVICE page have an elevated refcount of 1 as default so - * that they are not release to the general page allocator). - * - * Note that callback has exclusive ownership of the page (as no - * one is holding any reference). - */ - void (*free)(struct hmm_devmem *devmem, struct page *page); - /* - * fault() - CPU page fault or get user page (GUP) - * @devmem: device memory structure (see struct hmm_devmem) - * @vma: virtual memory area containing the virtual address - * @addr: virtual address that faulted or for which there is a GUP - * @page: pointer to struct page backing virtual address (unreliable) - * @flags: FAULT_FLAG_* (see include/linux/mm.h) - * @pmdp: page middle directory - * Returns: VM_FAULT_MINOR/MAJOR on success or one of VM_FAULT_ERROR - * on error - * - * The callback occurs whenever there is a CPU page fault or GUP on a - * virtual address. This means that the device driver must migrate the - * page back to regular memory (CPU accessible). - * - * The device driver is free to migrate more than one page from the - * fault() callback as an optimization. However if device decide to - * migrate more than one page it must always priotirize the faulting - * address over the others. - * - * The struct page pointer is only given as an hint to allow quick - * lookup of internal device driver data. A concurrent migration - * might have already free that page and the virtual address might - * not longer be back by it. So it should not be modified by the - * callback. - * - * Note that mmap semaphore is held in read mode at least when this - * callback occurs, hence the vma is valid upon callback entry. - */ - vm_fault_t (*fault)(struct hmm_devmem *devmem, - struct vm_area_struct *vma, - unsigned long addr, - const struct page *page, - unsigned int flags, - pmd_t *pmdp); -}; - -/* - * struct hmm_devmem - track device memory - * - * @completion: completion object for device memory - * @pfn_first: first pfn for this resource (set by hmm_devmem_add()) - * @pfn_last: last pfn for this resource (set by hmm_devmem_add()) - * @resource: IO resource reserved for this chunk of memory - * @pagemap: device page map for that chunk - * @device: device to bind resource to - * @ops: memory operations callback - * @ref: per CPU refcount - * @page_fault: callback when CPU fault on an unaddressable device page - * - * This an helper structure for device drivers that do not wish to implement - * the gory details related to hotplugging new memoy and allocating struct - * pages. - * - * Device drivers can directly use ZONE_DEVICE memory on their own if they - * wish to do so. - * - * The page_fault() callback must migrate page back, from device memory to - * system memory, so that the CPU can access it. This might fail for various - * reasons (device issues, device have been unplugged, ...). When such error - * conditions happen, the page_fault() callback must return VM_FAULT_SIGBUS and - * set the CPU page table entry to "poisoned". - * - * Note that because memory cgroup charges are transferred to the device memory, - * this should never fail due to memory restrictions. However, allocation - * of a regular system page might still fail because we are out of memory. If - * that happens, the page_fault() callback must return VM_FAULT_OOM. - * - * The page_fault() callback can also try to migrate back multiple pages in one - * chunk, as an optimization. It must, however, prioritize the faulting address - * over all the others. - */ -typedef vm_fault_t (*dev_page_fault_t)(struct vm_area_struct *vma, - unsigned long addr, - const struct page *page, - unsigned int flags, - pmd_t *pmdp); - -struct hmm_devmem { - struct completion completion; - unsigned long pfn_first; - unsigned long pfn_last; - struct resource *resource; - struct device *device; - struct dev_pagemap pagemap; - const struct hmm_devmem_ops *ops; - struct percpu_ref ref; - dev_page_fault_t page_fault; -}; - -/* - * To add (hotplug) device memory, HMM assumes that there is no real resource - * that reserves a range in the physical address space (this is intended to be - * use by unaddressable device memory). It will reserve a physical range big - * enough and allocate struct page for it. - * - * The device driver can wrap the hmm_devmem struct inside a private device - * driver struct. - */ -struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops, - struct device *device, - unsigned long size); -struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops, - struct device *device, - struct resource *res); - -/* - * hmm_devmem_page_set_drvdata - set per-page driver data field - * - * @page: pointer to struct page - * @data: driver data value to set - * - * Because page can not be on lru we have an unsigned long that driver can use - * to store a per page field. This just a simple helper to do that. - */ -static inline void hmm_devmem_page_set_drvdata(struct page *page, - unsigned long data) -{ - page->hmm_data = data; -} - -/* - * hmm_devmem_page_get_drvdata - get per page driver data field - * - * @page: pointer to struct page - * Return: driver data value - */ -static inline unsigned long hmm_devmem_page_get_drvdata(const struct page *page) -{ - return page->hmm_data; -} - - -/* - * struct hmm_device - fake device to hang device memory onto - * - * @device: device struct - * @minor: device minor number - */ -struct hmm_device { - struct device device; - unsigned int minor; -}; - -/* - * A device driver that wants to handle multiple devices memory through a - * single fake device can use hmm_device to do so. This is purely a helper and - * it is not strictly needed, in order to make use of any HMM functionality. - */ -struct hmm_device *hmm_device_new(void *drvdata); -void hmm_device_put(struct hmm_device *hmm_device); -#endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */ -#else /* IS_ENABLED(CONFIG_HMM) */ -static inline void hmm_mm_destroy(struct mm_struct *mm) {} -static inline void hmm_mm_init(struct mm_struct *mm) {} -#endif /* IS_ENABLED(CONFIG_HMM) */ - #endif /* LINUX_HMM_H */ diff --git a/include/linux/host1x.h b/include/linux/host1x.h index cfff30b9a62e..e6eea45e1154 100644 --- a/include/linux/host1x.h +++ b/include/linux/host1x.h @@ -297,6 +297,8 @@ struct host1x_device { struct list_head clients; bool registered; + + struct device_dma_parameters dma_parms; }; static inline struct host1x_device *to_host1x_device(struct device *dev) diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 2e8957eac4d4..4971100a8cab 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -12,8 +12,8 @@ #ifndef _LINUX_HRTIMER_H #define _LINUX_HRTIMER_H +#include <linux/hrtimer_defs.h> #include <linux/rbtree.h> -#include <linux/ktime.h> #include <linux/init.h> #include <linux/list.h> #include <linux/percpu.h> @@ -298,26 +298,12 @@ struct clock_event_device; extern void hrtimer_interrupt(struct clock_event_device *dev); -/* - * The resolution of the clocks. The resolution value is returned in - * the clock_getres() system call to give application programmers an - * idea of the (in)accuracy of timers. Timer values are rounded up to - * this resolution values. - */ -# define HIGH_RES_NSEC 1 -# define KTIME_HIGH_RES (HIGH_RES_NSEC) -# define MONOTONIC_RES_NSEC HIGH_RES_NSEC -# define KTIME_MONOTONIC_RES KTIME_HIGH_RES - extern void clock_was_set_delayed(void); extern unsigned int hrtimer_resolution; #else -# define MONOTONIC_RES_NSEC LOW_RES_NSEC -# define KTIME_MONOTONIC_RES KTIME_LOW_RES - #define hrtimer_resolution (unsigned int)LOW_RES_NSEC static inline void clock_was_set_delayed(void) { } diff --git a/include/linux/hrtimer_defs.h b/include/linux/hrtimer_defs.h new file mode 100644 index 000000000000..2d3e3c5fb946 --- /dev/null +++ b/include/linux/hrtimer_defs.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_HRTIMER_DEFS_H +#define _LINUX_HRTIMER_DEFS_H + +#include <linux/ktime.h> + +#ifdef CONFIG_HIGH_RES_TIMERS + +/* + * The resolution of the clocks. The resolution value is returned in + * the clock_getres() system call to give application programmers an + * idea of the (in)accuracy of timers. Timer values are rounded up to + * this resolution values. + */ +# define HIGH_RES_NSEC 1 +# define KTIME_HIGH_RES (HIGH_RES_NSEC) +# define MONOTONIC_RES_NSEC HIGH_RES_NSEC +# define KTIME_MONOTONIC_RES KTIME_HIGH_RES + +#else + +# define MONOTONIC_RES_NSEC LOW_RES_NSEC +# define KTIME_MONOTONIC_RES KTIME_LOW_RES + +#endif + +#endif diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 7cd5c150c21d..45ede62aa85b 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -121,6 +121,23 @@ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) bool transparent_hugepage_enabled(struct vm_area_struct *vma); +#define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1) + +static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, + unsigned long haddr) +{ + /* Don't have to check pgoff for anonymous vma */ + if (!vma_is_anonymous(vma)) { + if (((vma->vm_start >> PAGE_SHIFT) & HPAGE_CACHE_INDEX_MASK) != + (vma->vm_pgoff & HPAGE_CACHE_INDEX_MASK)) + return false; + } + + if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) + return false; + return true; +} + #define transparent_hugepage_use_zero_page() \ (transparent_hugepage_flags & \ (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG)) @@ -271,6 +288,12 @@ static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma) return false; } +static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, + unsigned long haddr) +{ + return false; +} + static inline void prep_transhuge_page(struct page *page) {} #define transparent_hugepage_flags 0UL diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index edf476c8cfb9..edfca4278319 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -16,29 +16,11 @@ struct user_struct; struct mmu_gather; #ifndef is_hugepd -/* - * Some architectures requires a hugepage directory format that is - * required to support multiple hugepage sizes. For example - * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables" - * introduced the same on powerpc. This allows for a more flexible hugepage - * pagetable layout. - */ typedef struct { unsigned long pd; } hugepd_t; #define is_hugepd(hugepd) (0) #define __hugepd(x) ((hugepd_t) { (x) }) -static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr, - unsigned pdshift, unsigned long end, - int write, struct page **pages, int *nr) -{ - return 0; -} -#else -extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr, - unsigned pdshift, unsigned long end, - int write, struct page **pages, int *nr); #endif - #ifdef CONFIG_HUGETLB_PAGE #include <linux/mempolicy.h> @@ -608,22 +590,92 @@ static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, #else /* CONFIG_HUGETLB_PAGE */ struct hstate {}; -#define alloc_huge_page(v, a, r) NULL -#define alloc_huge_page_node(h, nid) NULL -#define alloc_huge_page_nodemask(h, preferred_nid, nmask) NULL -#define alloc_huge_page_vma(h, vma, address) NULL -#define alloc_bootmem_huge_page(h) NULL -#define hstate_file(f) NULL -#define hstate_sizelog(s) NULL -#define hstate_vma(v) NULL -#define hstate_inode(i) NULL -#define page_hstate(page) NULL -#define huge_page_size(h) PAGE_SIZE -#define huge_page_mask(h) PAGE_MASK -#define vma_kernel_pagesize(v) PAGE_SIZE -#define vma_mmu_pagesize(v) PAGE_SIZE -#define huge_page_order(h) 0 -#define huge_page_shift(h) PAGE_SHIFT + +static inline struct page *alloc_huge_page(struct vm_area_struct *vma, + unsigned long addr, + int avoid_reserve) +{ + return NULL; +} + +static inline struct page *alloc_huge_page_node(struct hstate *h, int nid) +{ + return NULL; +} + +static inline struct page * +alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask) +{ + return NULL; +} + +static inline struct page *alloc_huge_page_vma(struct hstate *h, + struct vm_area_struct *vma, + unsigned long address) +{ + return NULL; +} + +static inline int __alloc_bootmem_huge_page(struct hstate *h) +{ + return 0; +} + +static inline struct hstate *hstate_file(struct file *f) +{ + return NULL; +} + +static inline struct hstate *hstate_sizelog(int page_size_log) +{ + return NULL; +} + +static inline struct hstate *hstate_vma(struct vm_area_struct *vma) +{ + return NULL; +} + +static inline struct hstate *hstate_inode(struct inode *i) +{ + return NULL; +} + +static inline struct hstate *page_hstate(struct page *page) +{ + return NULL; +} + +static inline unsigned long huge_page_size(struct hstate *h) +{ + return PAGE_SIZE; +} + +static inline unsigned long huge_page_mask(struct hstate *h) +{ + return PAGE_MASK; +} + +static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) +{ + return PAGE_SIZE; +} + +static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) +{ + return PAGE_SIZE; +} + +static inline unsigned int huge_page_order(struct hstate *h) +{ + return 0; +} + +static inline unsigned int huge_page_shift(struct hstate *h) +{ + return PAGE_SHIFT; +} + static inline bool hstate_is_gigantic(struct hstate *h) { return false; diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h index c0b93e0ff0c0..8e6dd908da21 100644 --- a/include/linux/hw_random.h +++ b/include/linux/hw_random.h @@ -1,7 +1,7 @@ /* Hardware Random Number Generator - Please read Documentation/hw_random.txt for details on use. + Please read Documentation/admin-guide/hw_random.rst for details on use. ---------------------------------------------------------- This software may be used and distributed according to the terms diff --git a/include/linux/hwspinlock.h b/include/linux/hwspinlock.h index 0afe693be5f4..bfe7c1f1ac6d 100644 --- a/include/linux/hwspinlock.h +++ b/include/linux/hwspinlock.h @@ -14,9 +14,10 @@ #include <linux/sched.h> /* hwspinlock mode argument */ -#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */ -#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */ -#define HWLOCK_RAW 0x03 +#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */ +#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */ +#define HWLOCK_RAW 0x03 +#define HWLOCK_IN_ATOMIC 0x04 /* Called while in atomic context */ struct device; struct device_node; @@ -223,6 +224,23 @@ static inline int hwspin_trylock_raw(struct hwspinlock *hwlock) } /** + * hwspin_trylock_in_atomic() - attempt to lock a specific hwspinlock + * @hwlock: an hwspinlock which we want to trylock + * + * This function attempts to lock an hwspinlock, and will immediately fail + * if the hwspinlock is already taken. + * + * This function shall be called only from an atomic context. + * + * Returns 0 if we successfully locked the hwspinlock, -EBUSY if + * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid. + */ +static inline int hwspin_trylock_in_atomic(struct hwspinlock *hwlock) +{ + return __hwspin_trylock(hwlock, HWLOCK_IN_ATOMIC, NULL); +} + +/** * hwspin_trylock() - attempt to lock a specific hwspinlock * @hwlock: an hwspinlock which we want to trylock * @@ -313,6 +331,28 @@ int hwspin_lock_timeout_raw(struct hwspinlock *hwlock, unsigned int to) } /** + * hwspin_lock_timeout_in_atomic() - lock an hwspinlock with timeout limit + * @hwlock: the hwspinlock to be locked + * @to: timeout value in msecs + * + * This function locks the underlying @hwlock. If the @hwlock + * is already taken, the function will busy loop waiting for it to + * be released, but give up when @timeout msecs have elapsed. + * + * This function shall be called only from an atomic context and the timeout + * value shall not exceed a few msecs. + * + * Returns 0 when the @hwlock was successfully taken, and an appropriate + * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still + * busy after @timeout msecs). The function will never sleep. + */ +static inline +int hwspin_lock_timeout_in_atomic(struct hwspinlock *hwlock, unsigned int to) +{ + return __hwspin_lock_timeout(hwlock, to, HWLOCK_IN_ATOMIC, NULL); +} + +/** * hwspin_lock_timeout() - lock an hwspinlock with timeout limit * @hwlock: the hwspinlock to be locked * @to: timeout value in msecs @@ -387,6 +427,21 @@ static inline void hwspin_unlock_raw(struct hwspinlock *hwlock) } /** + * hwspin_unlock_in_atomic() - unlock hwspinlock + * @hwlock: a previously-acquired hwspinlock which we want to unlock + * + * This function will unlock a specific hwspinlock. + * + * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling + * this function: it is a bug to call unlock on a @hwlock that is already + * unlocked. + */ +static inline void hwspin_unlock_in_atomic(struct hwspinlock *hwlock) +{ + __hwspin_unlock(hwlock, HWLOCK_IN_ATOMIC, NULL); +} + +/** * hwspin_unlock() - unlock hwspinlock * @hwlock: a previously-acquired hwspinlock which we want to unlock * diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 1308126fc384..fa5552c2307b 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -1,19 +1,16 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* ------------------------------------------------------------------------- */ -/* */ -/* i2c.h - definitions for the i2c-bus interface */ -/* */ -/* ------------------------------------------------------------------------- */ -/* Copyright (C) 1995-2000 Simon G. Vogl - +/* + * i2c.h - definitions for the Linux i2c bus interface + * Copyright (C) 1995-2000 Simon G. Vogl + * Copyright (C) 2013-2019 Wolfram Sang <wsa@the-dreams.de> + * + * With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and + * Frodo Looijaard <frodol@dds.nl> */ -/* ------------------------------------------------------------------------- */ - -/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and - Frodo Looijaard <frodol@dds.nl> */ #ifndef _LINUX_I2C_H #define _LINUX_I2C_H +#include <linux/acpi.h> /* for acpi_handle */ #include <linux/mod_devicetable.h> #include <linux/device.h> /* for struct device */ #include <linux/sched.h> /* for completion */ @@ -39,7 +36,8 @@ struct i2c_device_identity; union i2c_smbus_data; struct i2c_board_info; enum i2c_slave_event; -typedef int (*i2c_slave_cb_t)(struct i2c_client *, enum i2c_slave_event, u8 *); +typedef int (*i2c_slave_cb_t)(struct i2c_client *client, + enum i2c_slave_event event, u8 *val); struct module; struct property_entry; @@ -256,16 +254,16 @@ struct i2c_driver { unsigned int class; /* Standard driver model interfaces */ - int (*probe)(struct i2c_client *, const struct i2c_device_id *); - int (*remove)(struct i2c_client *); + int (*probe)(struct i2c_client *client, const struct i2c_device_id *id); + int (*remove)(struct i2c_client *client); /* New driver model interface to aid the seamless removal of the * current probe()'s, more commonly unused than used second parameter. */ - int (*probe_new)(struct i2c_client *); + int (*probe_new)(struct i2c_client *client); /* driver model interfaces that don't relate to enumeration */ - void (*shutdown)(struct i2c_client *); + void (*shutdown)(struct i2c_client *client); /* Alert callback, for example for the SMBus alert protocol. * The format and meaning of the data value depends on the protocol. @@ -274,7 +272,7 @@ struct i2c_driver { * For the SMBus Host Notify protocol, the data corresponds to the * 16-bit payload data reported by the slave device acting as master. */ - void (*alert)(struct i2c_client *, enum i2c_alert_protocol protocol, + void (*alert)(struct i2c_client *client, enum i2c_alert_protocol protocol, unsigned int data); /* a ioctl like command that can be used to perform specific functions @@ -286,7 +284,7 @@ struct i2c_driver { const struct i2c_device_id *id_table; /* Device detection callback for automatic device creation */ - int (*detect)(struct i2c_client *, struct i2c_board_info *); + int (*detect)(struct i2c_client *client, struct i2c_board_info *info); const unsigned short *address_list; struct list_head clients; @@ -296,8 +294,7 @@ struct i2c_driver { /** * struct i2c_client - represent an I2C slave device - * @flags: I2C_CLIENT_TEN indicates the device uses a ten bit chip address; - * I2C_CLIENT_PEC indicates it uses SMBus Packet Error Checking + * @flags: see I2C_CLIENT_* for possible flags * @addr: Address used on the I2C bus connected to the parent adapter. * @name: Indicates the type of the device, usually a chip name that's * generic enough to hide second-sourcing and compatible revisions. @@ -315,6 +312,15 @@ struct i2c_driver { */ struct i2c_client { unsigned short flags; /* div., see below */ +#define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */ +#define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */ + /* Must equal I2C_M_TEN below */ +#define I2C_CLIENT_SLAVE 0x20 /* we are the slave */ +#define I2C_CLIENT_HOST_NOTIFY 0x40 /* We want to use I2C host notify */ +#define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */ +#define I2C_CLIENT_SCCB 0x9000 /* Use Omnivision SCCB protocol */ + /* Must match I2C_M_STOP|IGNORE_NAK */ + unsigned short addr; /* chip address - NOTE: 7bit */ /* addresses are stored in the */ /* _LOWER_ 7 bits */ @@ -436,6 +442,9 @@ struct i2c_board_info { extern struct i2c_client * i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info); +extern struct i2c_client * +i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *info); + /* If you don't know the exact address of an I2C device, use this variant * instead, which can probe for device presence in a list of possible * addresses. The "probe" callback function is optional. If it is provided, @@ -446,10 +455,10 @@ extern struct i2c_client * i2c_new_probed_device(struct i2c_adapter *adap, struct i2c_board_info *info, unsigned short const *addr_list, - int (*probe)(struct i2c_adapter *, unsigned short addr)); + int (*probe)(struct i2c_adapter *adap, unsigned short addr)); /* Common custom probe functions */ -extern int i2c_probe_func_quick_read(struct i2c_adapter *, unsigned short addr); +extern int i2c_probe_func_quick_read(struct i2c_adapter *adap, unsigned short addr); /* For devices that use several addresses, use i2c_new_dummy() to make * client handles for the extra addresses. @@ -458,6 +467,9 @@ extern struct i2c_client * i2c_new_dummy(struct i2c_adapter *adap, u16 address); extern struct i2c_client * +i2c_new_dummy_device(struct i2c_adapter *adapter, u16 address); + +extern struct i2c_client * devm_i2c_new_dummy_device(struct device *dev, struct i2c_adapter *adap, u16 address); extern struct i2c_client * @@ -465,7 +477,7 @@ i2c_new_secondary_device(struct i2c_client *client, const char *name, u16 default_addr); -extern void i2c_unregister_device(struct i2c_client *); +extern void i2c_unregister_device(struct i2c_client *client); #endif /* I2C */ /* Mainboard arch_initcall() code should register all its I2C devices. @@ -550,9 +562,9 @@ struct i2c_algorithm { * The main operations are wrapped by i2c_lock_bus and i2c_unlock_bus. */ struct i2c_lock_operations { - void (*lock_bus)(struct i2c_adapter *, unsigned int flags); - int (*trylock_bus)(struct i2c_adapter *, unsigned int flags); - void (*unlock_bus)(struct i2c_adapter *, unsigned int flags); + void (*lock_bus)(struct i2c_adapter *adapter, unsigned int flags); + int (*trylock_bus)(struct i2c_adapter *adapter, unsigned int flags); + void (*unlock_bus)(struct i2c_adapter *adapter, unsigned int flags); }; /** @@ -702,14 +714,14 @@ struct i2c_adapter { }; #define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev) -static inline void *i2c_get_adapdata(const struct i2c_adapter *dev) +static inline void *i2c_get_adapdata(const struct i2c_adapter *adap) { - return dev_get_drvdata(&dev->dev); + return dev_get_drvdata(&adap->dev); } -static inline void i2c_set_adapdata(struct i2c_adapter *dev, void *data) +static inline void i2c_set_adapdata(struct i2c_adapter *adap, void *data) { - dev_set_drvdata(&dev->dev, data); + dev_set_drvdata(&adap->dev, data); } static inline struct i2c_adapter * @@ -725,7 +737,7 @@ i2c_parent_is_i2c_adapter(const struct i2c_adapter *adapter) return NULL; } -int i2c_for_each_dev(void *data, int (*fn)(struct device *, void *)); +int i2c_for_each_dev(void *data, int (*fn)(struct device *dev, void *data)); /* Adapter locking functions, exported for shared pin cases */ #define I2C_LOCK_ROOT_ADAPTER BIT(0) @@ -801,16 +813,6 @@ static inline void i2c_mark_adapter_resumed(struct i2c_adapter *adap) i2c_unlock_bus(adap, I2C_LOCK_ROOT_ADAPTER); } -/*flags for the client struct: */ -#define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */ -#define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */ - /* Must equal I2C_M_TEN below */ -#define I2C_CLIENT_SLAVE 0x20 /* we are the slave */ -#define I2C_CLIENT_HOST_NOTIFY 0x40 /* We want to use I2C host notify */ -#define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */ -#define I2C_CLIENT_SCCB 0x9000 /* Use Omnivision SCCB protocol */ - /* Must match I2C_M_STOP|IGNORE_NAK */ - /* i2c adapter classes (bitmask) */ #define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */ #define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */ @@ -831,12 +833,12 @@ static inline void i2c_mark_adapter_resumed(struct i2c_adapter *adap) /* administration... */ #if IS_ENABLED(CONFIG_I2C) -extern int i2c_add_adapter(struct i2c_adapter *); -extern void i2c_del_adapter(struct i2c_adapter *); -extern int i2c_add_numbered_adapter(struct i2c_adapter *); +extern int i2c_add_adapter(struct i2c_adapter *adap); +extern void i2c_del_adapter(struct i2c_adapter *adap); +extern int i2c_add_numbered_adapter(struct i2c_adapter *adap); -extern int i2c_register_driver(struct module *, struct i2c_driver *); -extern void i2c_del_driver(struct i2c_driver *); +extern int i2c_register_driver(struct module *owner, struct i2c_driver *driver); +extern void i2c_del_driver(struct i2c_driver *driver); /* use a define to avoid include chaining to get THIS_MODULE */ #define i2c_add_driver(driver) \ @@ -981,6 +983,7 @@ bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares, u32 i2c_acpi_find_bus_speed(struct device *dev); struct i2c_client *i2c_acpi_new_device(struct device *dev, int index, struct i2c_board_info *info); +struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle); #else static inline bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares, struct acpi_resource_i2c_serialbus **i2c) @@ -996,6 +999,10 @@ static inline struct i2c_client *i2c_acpi_new_device(struct device *dev, { return NULL; } +static inline struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle) +{ + return NULL; +} #endif /* CONFIG_ACPI */ #endif /* _LINUX_I2C_H */ diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h index f13fd8b1dd79..1f08fa8d69d2 100644 --- a/include/linux/i3c/master.h +++ b/include/linux/i3c/master.h @@ -48,7 +48,7 @@ struct i3c_i2c_dev_desc { #define I3C_LVR_I2C_INDEX(x) ((x) << 5) #define I3C_LVR_I2C_FM_MODE BIT(4) -#define I2C_MAX_ADDR GENMASK(9, 0) +#define I2C_MAX_ADDR GENMASK(6, 0) /** * struct i2c_dev_boardinfo - I2C device board information @@ -250,12 +250,17 @@ struct i3c_device { * the bus. The only impact in this mode is that the * high SCL pulse has to stay below 50ns to trick I2C * devices when transmitting I3C frames + * @I3C_BUS_MODE_MIXED_LIMITED: I2C devices without 50ns spike filter are + * present on the bus. However they allow + * compliance up to the maximum SDR SCL clock + * frequency. * @I3C_BUS_MODE_MIXED_SLOW: I2C devices without 50ns spike filter are present * on the bus */ enum i3c_bus_mode { I3C_BUS_MODE_PURE, I3C_BUS_MODE_MIXED_FAST, + I3C_BUS_MODE_MIXED_LIMITED, I3C_BUS_MODE_MIXED_SLOW, }; @@ -390,8 +395,6 @@ struct i3c_bus { * and i2c_put_dma_safe_msg_buf() helpers provided by the I2C * framework. * This method is mandatory. - * @i2c_funcs: expose the supported I2C functionalities. - * This method is mandatory. * @request_ibi: attach an IBI handler to an I3C device. This implies defining * an IBI handler and the constraints of the IBI (maximum payload * length and number of pre-allocated slots). @@ -437,7 +440,6 @@ struct i3c_master_controller_ops { void (*detach_i2c_dev)(struct i2c_dev_desc *dev); int (*i2c_xfers)(struct i2c_dev_desc *dev, const struct i2c_msg *xfers, int nxfers); - u32 (*i2c_funcs)(struct i3c_master_controller *master); int (*request_ibi)(struct i3c_dev_desc *dev, const struct i3c_ibi_setup *req); void (*free_ibi)(struct i3c_dev_desc *dev); diff --git a/include/linux/ide.h b/include/linux/ide.h index 971cf76a78a0..46b771d6999e 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -253,9 +253,9 @@ static inline void ide_std_init_ports(struct ide_hw *hw, * Special Driver Flags */ enum { - IDE_SFLAG_SET_GEOMETRY = (1 << 0), - IDE_SFLAG_RECALIBRATE = (1 << 1), - IDE_SFLAG_SET_MULTMODE = (1 << 2), + IDE_SFLAG_SET_GEOMETRY = BIT(0), + IDE_SFLAG_RECALIBRATE = BIT(1), + IDE_SFLAG_SET_MULTMODE = BIT(2), }; /* @@ -267,13 +267,13 @@ typedef enum { } ide_startstop_t; enum { - IDE_VALID_ERROR = (1 << 1), + IDE_VALID_ERROR = BIT(1), IDE_VALID_FEATURE = IDE_VALID_ERROR, - IDE_VALID_NSECT = (1 << 2), - IDE_VALID_LBAL = (1 << 3), - IDE_VALID_LBAM = (1 << 4), - IDE_VALID_LBAH = (1 << 5), - IDE_VALID_DEVICE = (1 << 6), + IDE_VALID_NSECT = BIT(2), + IDE_VALID_LBAL = BIT(3), + IDE_VALID_LBAM = BIT(4), + IDE_VALID_LBAH = BIT(5), + IDE_VALID_DEVICE = BIT(6), IDE_VALID_LBA = IDE_VALID_LBAL | IDE_VALID_LBAM | IDE_VALID_LBAH, @@ -289,24 +289,24 @@ enum { }; enum { - IDE_TFLAG_LBA48 = (1 << 0), - IDE_TFLAG_WRITE = (1 << 1), - IDE_TFLAG_CUSTOM_HANDLER = (1 << 2), - IDE_TFLAG_DMA_PIO_FALLBACK = (1 << 3), + IDE_TFLAG_LBA48 = BIT(0), + IDE_TFLAG_WRITE = BIT(1), + IDE_TFLAG_CUSTOM_HANDLER = BIT(2), + IDE_TFLAG_DMA_PIO_FALLBACK = BIT(3), /* force 16-bit I/O operations */ - IDE_TFLAG_IO_16BIT = (1 << 4), + IDE_TFLAG_IO_16BIT = BIT(4), /* struct ide_cmd was allocated using kmalloc() */ - IDE_TFLAG_DYN = (1 << 5), - IDE_TFLAG_FS = (1 << 6), - IDE_TFLAG_MULTI_PIO = (1 << 7), - IDE_TFLAG_SET_XFER = (1 << 8), + IDE_TFLAG_DYN = BIT(5), + IDE_TFLAG_FS = BIT(6), + IDE_TFLAG_MULTI_PIO = BIT(7), + IDE_TFLAG_SET_XFER = BIT(8), }; enum { - IDE_FTFLAG_FLAGGED = (1 << 0), - IDE_FTFLAG_SET_IN_FLAGS = (1 << 1), - IDE_FTFLAG_OUT_DATA = (1 << 2), - IDE_FTFLAG_IN_DATA = (1 << 3), + IDE_FTFLAG_FLAGGED = BIT(0), + IDE_FTFLAG_SET_IN_FLAGS = BIT(1), + IDE_FTFLAG_OUT_DATA = BIT(2), + IDE_FTFLAG_IN_DATA = BIT(3), }; struct ide_taskfile { @@ -357,13 +357,13 @@ struct ide_cmd { /* ATAPI packet command flags */ enum { /* set when an error is considered normal - no retry (ide-tape) */ - PC_FLAG_ABORT = (1 << 0), - PC_FLAG_SUPPRESS_ERROR = (1 << 1), - PC_FLAG_WAIT_FOR_DSC = (1 << 2), - PC_FLAG_DMA_OK = (1 << 3), - PC_FLAG_DMA_IN_PROGRESS = (1 << 4), - PC_FLAG_DMA_ERROR = (1 << 5), - PC_FLAG_WRITING = (1 << 6), + PC_FLAG_ABORT = BIT(0), + PC_FLAG_SUPPRESS_ERROR = BIT(1), + PC_FLAG_WAIT_FOR_DSC = BIT(2), + PC_FLAG_DMA_OK = BIT(3), + PC_FLAG_DMA_IN_PROGRESS = BIT(4), + PC_FLAG_DMA_ERROR = BIT(5), + PC_FLAG_WRITING = BIT(6), }; #define ATAPI_WAIT_PC (60 * HZ) @@ -417,111 +417,111 @@ struct ide_disk_ops { /* ATAPI device flags */ enum { - IDE_AFLAG_DRQ_INTERRUPT = (1 << 0), + IDE_AFLAG_DRQ_INTERRUPT = BIT(0), /* ide-cd */ /* Drive cannot eject the disc. */ - IDE_AFLAG_NO_EJECT = (1 << 1), + IDE_AFLAG_NO_EJECT = BIT(1), /* Drive is a pre ATAPI 1.2 drive. */ - IDE_AFLAG_PRE_ATAPI12 = (1 << 2), + IDE_AFLAG_PRE_ATAPI12 = BIT(2), /* TOC addresses are in BCD. */ - IDE_AFLAG_TOCADDR_AS_BCD = (1 << 3), + IDE_AFLAG_TOCADDR_AS_BCD = BIT(3), /* TOC track numbers are in BCD. */ - IDE_AFLAG_TOCTRACKS_AS_BCD = (1 << 4), + IDE_AFLAG_TOCTRACKS_AS_BCD = BIT(4), /* Saved TOC information is current. */ - IDE_AFLAG_TOC_VALID = (1 << 6), + IDE_AFLAG_TOC_VALID = BIT(6), /* We think that the drive door is locked. */ - IDE_AFLAG_DOOR_LOCKED = (1 << 7), + IDE_AFLAG_DOOR_LOCKED = BIT(7), /* SET_CD_SPEED command is unsupported. */ - IDE_AFLAG_NO_SPEED_SELECT = (1 << 8), - IDE_AFLAG_VERTOS_300_SSD = (1 << 9), - IDE_AFLAG_VERTOS_600_ESD = (1 << 10), - IDE_AFLAG_SANYO_3CD = (1 << 11), - IDE_AFLAG_FULL_CAPS_PAGE = (1 << 12), - IDE_AFLAG_PLAY_AUDIO_OK = (1 << 13), - IDE_AFLAG_LE_SPEED_FIELDS = (1 << 14), + IDE_AFLAG_NO_SPEED_SELECT = BIT(8), + IDE_AFLAG_VERTOS_300_SSD = BIT(9), + IDE_AFLAG_VERTOS_600_ESD = BIT(10), + IDE_AFLAG_SANYO_3CD = BIT(11), + IDE_AFLAG_FULL_CAPS_PAGE = BIT(12), + IDE_AFLAG_PLAY_AUDIO_OK = BIT(13), + IDE_AFLAG_LE_SPEED_FIELDS = BIT(14), /* ide-floppy */ /* Avoid commands not supported in Clik drive */ - IDE_AFLAG_CLIK_DRIVE = (1 << 15), + IDE_AFLAG_CLIK_DRIVE = BIT(15), /* Requires BH algorithm for packets */ - IDE_AFLAG_ZIP_DRIVE = (1 << 16), + IDE_AFLAG_ZIP_DRIVE = BIT(16), /* Supports format progress report */ - IDE_AFLAG_SRFP = (1 << 17), + IDE_AFLAG_SRFP = BIT(17), /* ide-tape */ - IDE_AFLAG_IGNORE_DSC = (1 << 18), + IDE_AFLAG_IGNORE_DSC = BIT(18), /* 0 When the tape position is unknown */ - IDE_AFLAG_ADDRESS_VALID = (1 << 19), + IDE_AFLAG_ADDRESS_VALID = BIT(19), /* Device already opened */ - IDE_AFLAG_BUSY = (1 << 20), + IDE_AFLAG_BUSY = BIT(20), /* Attempt to auto-detect the current user block size */ - IDE_AFLAG_DETECT_BS = (1 << 21), + IDE_AFLAG_DETECT_BS = BIT(21), /* Currently on a filemark */ - IDE_AFLAG_FILEMARK = (1 << 22), + IDE_AFLAG_FILEMARK = BIT(22), /* 0 = no tape is loaded, so we don't rewind after ejecting */ - IDE_AFLAG_MEDIUM_PRESENT = (1 << 23), + IDE_AFLAG_MEDIUM_PRESENT = BIT(23), - IDE_AFLAG_NO_AUTOCLOSE = (1 << 24), + IDE_AFLAG_NO_AUTOCLOSE = BIT(24), }; /* device flags */ enum { /* restore settings after device reset */ - IDE_DFLAG_KEEP_SETTINGS = (1 << 0), + IDE_DFLAG_KEEP_SETTINGS = BIT(0), /* device is using DMA for read/write */ - IDE_DFLAG_USING_DMA = (1 << 1), + IDE_DFLAG_USING_DMA = BIT(1), /* okay to unmask other IRQs */ - IDE_DFLAG_UNMASK = (1 << 2), + IDE_DFLAG_UNMASK = BIT(2), /* don't attempt flushes */ - IDE_DFLAG_NOFLUSH = (1 << 3), + IDE_DFLAG_NOFLUSH = BIT(3), /* DSC overlap */ - IDE_DFLAG_DSC_OVERLAP = (1 << 4), + IDE_DFLAG_DSC_OVERLAP = BIT(4), /* give potential excess bandwidth */ - IDE_DFLAG_NICE1 = (1 << 5), + IDE_DFLAG_NICE1 = BIT(5), /* device is physically present */ - IDE_DFLAG_PRESENT = (1 << 6), + IDE_DFLAG_PRESENT = BIT(6), /* disable Host Protected Area */ - IDE_DFLAG_NOHPA = (1 << 7), + IDE_DFLAG_NOHPA = BIT(7), /* id read from device (synthetic if not set) */ - IDE_DFLAG_ID_READ = (1 << 8), - IDE_DFLAG_NOPROBE = (1 << 9), + IDE_DFLAG_ID_READ = BIT(8), + IDE_DFLAG_NOPROBE = BIT(9), /* need to do check_media_change() */ - IDE_DFLAG_REMOVABLE = (1 << 10), + IDE_DFLAG_REMOVABLE = BIT(10), /* needed for removable devices */ - IDE_DFLAG_ATTACH = (1 << 11), - IDE_DFLAG_FORCED_GEOM = (1 << 12), + IDE_DFLAG_ATTACH = BIT(11), + IDE_DFLAG_FORCED_GEOM = BIT(12), /* disallow setting unmask bit */ - IDE_DFLAG_NO_UNMASK = (1 << 13), + IDE_DFLAG_NO_UNMASK = BIT(13), /* disallow enabling 32-bit I/O */ - IDE_DFLAG_NO_IO_32BIT = (1 << 14), + IDE_DFLAG_NO_IO_32BIT = BIT(14), /* for removable only: door lock/unlock works */ - IDE_DFLAG_DOORLOCKING = (1 << 15), + IDE_DFLAG_DOORLOCKING = BIT(15), /* disallow DMA */ - IDE_DFLAG_NODMA = (1 << 16), + IDE_DFLAG_NODMA = BIT(16), /* powermanagement told us not to do anything, so sleep nicely */ - IDE_DFLAG_BLOCKED = (1 << 17), + IDE_DFLAG_BLOCKED = BIT(17), /* sleeping & sleep field valid */ - IDE_DFLAG_SLEEPING = (1 << 18), - IDE_DFLAG_POST_RESET = (1 << 19), - IDE_DFLAG_UDMA33_WARNED = (1 << 20), - IDE_DFLAG_LBA48 = (1 << 21), + IDE_DFLAG_SLEEPING = BIT(18), + IDE_DFLAG_POST_RESET = BIT(19), + IDE_DFLAG_UDMA33_WARNED = BIT(20), + IDE_DFLAG_LBA48 = BIT(21), /* status of write cache */ - IDE_DFLAG_WCACHE = (1 << 22), + IDE_DFLAG_WCACHE = BIT(22), /* used for ignoring ATA_DF */ - IDE_DFLAG_NOWERR = (1 << 23), + IDE_DFLAG_NOWERR = BIT(23), /* retrying in PIO */ - IDE_DFLAG_DMA_PIO_RETRY = (1 << 24), - IDE_DFLAG_LBA = (1 << 25), + IDE_DFLAG_DMA_PIO_RETRY = BIT(24), + IDE_DFLAG_LBA = BIT(25), /* don't unload heads */ - IDE_DFLAG_NO_UNLOAD = (1 << 26), + IDE_DFLAG_NO_UNLOAD = BIT(26), /* heads unloaded, please don't reset port */ - IDE_DFLAG_PARKED = (1 << 27), - IDE_DFLAG_MEDIA_CHANGED = (1 << 28), + IDE_DFLAG_PARKED = BIT(27), + IDE_DFLAG_MEDIA_CHANGED = BIT(28), /* write protect */ - IDE_DFLAG_WP = (1 << 29), - IDE_DFLAG_FORMAT_IN_PROGRESS = (1 << 30), - IDE_DFLAG_NIEN_QUIRK = (1 << 31), + IDE_DFLAG_WP = BIT(29), + IDE_DFLAG_FORMAT_IN_PROGRESS = BIT(30), + IDE_DFLAG_NIEN_QUIRK = BIT(31), }; struct ide_drive_s { @@ -709,7 +709,7 @@ struct ide_dma_ops { }; enum { - IDE_PFLAG_PROBING = (1 << 0), + IDE_PFLAG_PROBING = BIT(0), }; struct ide_host; @@ -862,7 +862,7 @@ extern struct mutex ide_setting_mtx; * configurable drive settings */ -#define DS_SYNC (1 << 0) +#define DS_SYNC BIT(0) struct ide_devset { int (*get)(ide_drive_t *); @@ -1000,15 +1000,15 @@ static inline void ide_proc_unregister_driver(ide_drive_t *drive, enum { /* enter/exit functions */ - IDE_DBG_FUNC = (1 << 0), + IDE_DBG_FUNC = BIT(0), /* sense key/asc handling */ - IDE_DBG_SENSE = (1 << 1), + IDE_DBG_SENSE = BIT(1), /* packet commands handling */ - IDE_DBG_PC = (1 << 2), + IDE_DBG_PC = BIT(2), /* request handling */ - IDE_DBG_RQ = (1 << 3), + IDE_DBG_RQ = BIT(3), /* driver probing/setup */ - IDE_DBG_PROBE = (1 << 4), + IDE_DBG_PROBE = BIT(4), }; /* DRV_NAME has to be defined in the driver before using the macro below */ @@ -1171,10 +1171,10 @@ ssize_t ide_park_store(struct device *dev, struct device_attribute *attr, * the tail of our block device request queue and wait for their completion. */ enum { - REQ_IDETAPE_PC1 = (1 << 0), /* packet command (first stage) */ - REQ_IDETAPE_PC2 = (1 << 1), /* packet command (second stage) */ - REQ_IDETAPE_READ = (1 << 2), - REQ_IDETAPE_WRITE = (1 << 3), + REQ_IDETAPE_PC1 = BIT(0), /* packet command (first stage) */ + REQ_IDETAPE_PC2 = BIT(1), /* packet command (second stage) */ + REQ_IDETAPE_READ = BIT(2), + REQ_IDETAPE_WRITE = BIT(3), }; int ide_queue_pc_tail(ide_drive_t *, struct gendisk *, struct ide_atapi_pc *, @@ -1264,71 +1264,71 @@ struct ide_pci_enablebit { enum { /* Uses ISA control ports not PCI ones. */ - IDE_HFLAG_ISA_PORTS = (1 << 0), + IDE_HFLAG_ISA_PORTS = BIT(0), /* single port device */ - IDE_HFLAG_SINGLE = (1 << 1), + IDE_HFLAG_SINGLE = BIT(1), /* don't use legacy PIO blacklist */ - IDE_HFLAG_PIO_NO_BLACKLIST = (1 << 2), + IDE_HFLAG_PIO_NO_BLACKLIST = BIT(2), /* set for the second port of QD65xx */ - IDE_HFLAG_QD_2ND_PORT = (1 << 3), + IDE_HFLAG_QD_2ND_PORT = BIT(3), /* use PIO8/9 for prefetch off/on */ - IDE_HFLAG_ABUSE_PREFETCH = (1 << 4), + IDE_HFLAG_ABUSE_PREFETCH = BIT(4), /* use PIO6/7 for fast-devsel off/on */ - IDE_HFLAG_ABUSE_FAST_DEVSEL = (1 << 5), + IDE_HFLAG_ABUSE_FAST_DEVSEL = BIT(5), /* use 100-102 and 200-202 PIO values to set DMA modes */ - IDE_HFLAG_ABUSE_DMA_MODES = (1 << 6), + IDE_HFLAG_ABUSE_DMA_MODES = BIT(6), /* * keep DMA setting when programming PIO mode, may be used only * for hosts which have separate PIO and DMA timings (ie. PMAC) */ - IDE_HFLAG_SET_PIO_MODE_KEEP_DMA = (1 << 7), + IDE_HFLAG_SET_PIO_MODE_KEEP_DMA = BIT(7), /* program host for the transfer mode after programming device */ - IDE_HFLAG_POST_SET_MODE = (1 << 8), + IDE_HFLAG_POST_SET_MODE = BIT(8), /* don't program host/device for the transfer mode ("smart" hosts) */ - IDE_HFLAG_NO_SET_MODE = (1 << 9), + IDE_HFLAG_NO_SET_MODE = BIT(9), /* trust BIOS for programming chipset/device for DMA */ - IDE_HFLAG_TRUST_BIOS_FOR_DMA = (1 << 10), + IDE_HFLAG_TRUST_BIOS_FOR_DMA = BIT(10), /* host is CS5510/CS5520 */ - IDE_HFLAG_CS5520 = (1 << 11), + IDE_HFLAG_CS5520 = BIT(11), /* ATAPI DMA is unsupported */ - IDE_HFLAG_NO_ATAPI_DMA = (1 << 12), + IDE_HFLAG_NO_ATAPI_DMA = BIT(12), /* set if host is a "non-bootable" controller */ - IDE_HFLAG_NON_BOOTABLE = (1 << 13), + IDE_HFLAG_NON_BOOTABLE = BIT(13), /* host doesn't support DMA */ - IDE_HFLAG_NO_DMA = (1 << 14), + IDE_HFLAG_NO_DMA = BIT(14), /* check if host is PCI IDE device before allowing DMA */ - IDE_HFLAG_NO_AUTODMA = (1 << 15), + IDE_HFLAG_NO_AUTODMA = BIT(15), /* host uses MMIO */ - IDE_HFLAG_MMIO = (1 << 16), + IDE_HFLAG_MMIO = BIT(16), /* no LBA48 */ - IDE_HFLAG_NO_LBA48 = (1 << 17), + IDE_HFLAG_NO_LBA48 = BIT(17), /* no LBA48 DMA */ - IDE_HFLAG_NO_LBA48_DMA = (1 << 18), + IDE_HFLAG_NO_LBA48_DMA = BIT(18), /* data FIFO is cleared by an error */ - IDE_HFLAG_ERROR_STOPS_FIFO = (1 << 19), + IDE_HFLAG_ERROR_STOPS_FIFO = BIT(19), /* serialize ports */ - IDE_HFLAG_SERIALIZE = (1 << 20), + IDE_HFLAG_SERIALIZE = BIT(20), /* host is DTC2278 */ - IDE_HFLAG_DTC2278 = (1 << 21), + IDE_HFLAG_DTC2278 = BIT(21), /* 4 devices on a single set of I/O ports */ - IDE_HFLAG_4DRIVES = (1 << 22), + IDE_HFLAG_4DRIVES = BIT(22), /* host is TRM290 */ - IDE_HFLAG_TRM290 = (1 << 23), + IDE_HFLAG_TRM290 = BIT(23), /* use 32-bit I/O ops */ - IDE_HFLAG_IO_32BIT = (1 << 24), + IDE_HFLAG_IO_32BIT = BIT(24), /* unmask IRQs */ - IDE_HFLAG_UNMASK_IRQS = (1 << 25), - IDE_HFLAG_BROKEN_ALTSTATUS = (1 << 26), + IDE_HFLAG_UNMASK_IRQS = BIT(25), + IDE_HFLAG_BROKEN_ALTSTATUS = BIT(26), /* serialize ports if DMA is possible (for sl82c105) */ - IDE_HFLAG_SERIALIZE_DMA = (1 << 27), + IDE_HFLAG_SERIALIZE_DMA = BIT(27), /* force host out of "simplex" mode */ - IDE_HFLAG_CLEAR_SIMPLEX = (1 << 28), + IDE_HFLAG_CLEAR_SIMPLEX = BIT(28), /* DSC overlap is unsupported */ - IDE_HFLAG_NO_DSC = (1 << 29), + IDE_HFLAG_NO_DSC = BIT(29), /* never use 32-bit I/O ops */ - IDE_HFLAG_NO_IO_32BIT = (1 << 30), + IDE_HFLAG_NO_IO_32BIT = BIT(30), /* never unmask IRQs */ - IDE_HFLAG_NO_UNMASK_IRQS = (1 << 31), + IDE_HFLAG_NO_UNMASK_IRQS = BIT(31), }; #ifdef CONFIG_BLK_DEV_OFFBOARD @@ -1536,16 +1536,16 @@ struct ide_timing { }; enum { - IDE_TIMING_SETUP = (1 << 0), - IDE_TIMING_ACT8B = (1 << 1), - IDE_TIMING_REC8B = (1 << 2), - IDE_TIMING_CYC8B = (1 << 3), + IDE_TIMING_SETUP = BIT(0), + IDE_TIMING_ACT8B = BIT(1), + IDE_TIMING_REC8B = BIT(2), + IDE_TIMING_CYC8B = BIT(3), IDE_TIMING_8BIT = IDE_TIMING_ACT8B | IDE_TIMING_REC8B | IDE_TIMING_CYC8B, - IDE_TIMING_ACTIVE = (1 << 4), - IDE_TIMING_RECOVER = (1 << 5), - IDE_TIMING_CYCLE = (1 << 6), - IDE_TIMING_UDMA = (1 << 7), + IDE_TIMING_ACTIVE = BIT(4), + IDE_TIMING_RECOVER = BIT(5), + IDE_TIMING_CYCLE = BIT(6), + IDE_TIMING_UDMA = BIT(7), IDE_TIMING_ALL = IDE_TIMING_SETUP | IDE_TIMING_8BIT | IDE_TIMING_ACTIVE | IDE_TIMING_RECOVER | IDE_TIMING_CYCLE | IDE_TIMING_UDMA, diff --git a/include/linux/idr.h b/include/linux/idr.h index ee7abae143d3..4ec8986e5dfb 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -191,14 +191,17 @@ static inline void idr_preload_end(void) * idr_for_each_entry_ul() - Iterate over an IDR's elements of a given type. * @idr: IDR handle. * @entry: The type * to use as cursor. + * @tmp: A temporary placeholder for ID. * @id: Entry ID. * * @entry and @id do not need to be initialized before the loop, and * after normal termination @entry is left with the value NULL. This * is convenient for a "not found" value. */ -#define idr_for_each_entry_ul(idr, entry, id) \ - for (id = 0; ((entry) = idr_get_next_ul(idr, &(id))) != NULL; ++id) +#define idr_for_each_entry_ul(idr, entry, tmp, id) \ + for (tmp = 0, id = 0; \ + tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \ + tmp = id, ++id) /** * idr_for_each_entry_continue() - Continue iteration over an IDR's elements of a given type @@ -213,6 +216,20 @@ static inline void idr_preload_end(void) entry; \ ++id, (entry) = idr_get_next((idr), &(id))) +/** + * idr_for_each_entry_continue_ul() - Continue iteration over an IDR's elements of a given type + * @idr: IDR handle. + * @entry: The type * to use as a cursor. + * @tmp: A temporary placeholder for ID. + * @id: Entry ID. + * + * Continue to iterate over entries, continuing after the current position. + */ +#define idr_for_each_entry_continue_ul(idr, entry, tmp, id) \ + for (tmp = id; \ + tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \ + tmp = id, ++id) + /* * IDA - ID Allocator, use when translation from id to pointer isn't necessary. */ diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 42690007d612..8511fadc0935 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -2609,6 +2609,7 @@ enum ieee80211_key_len { #define FILS_ERP_MAX_RRK_LEN 64 #define PMK_MAX_LEN 64 +#define SAE_PASSWORD_MAX_LEN 128 /* Public action codes (IEEE Std 802.11-2016, 9.6.8.1, Table 9-307) */ enum ieee80211_pub_actioncode { @@ -2709,6 +2710,13 @@ enum ieee80211_tdls_actioncode { #define WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT BIT(5) #define WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT BIT(6) +/* + * When set, indicates that the AP is able to tolerate 26-tone RU UL + * OFDMA transmissions using HE TB PPDU from OBSS (not falsely classify the + * 26-tone RU UL OFDMA transmissions as radar pulses). + */ +#define WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT BIT(7) + /* Defines support for enhanced multi-bssid advertisement*/ #define WLAN_EXT_CAPA11_EMA_SUPPORT BIT(1) diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index f3fab5d0ea97..9e57c4411734 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -88,6 +88,8 @@ static inline bool br_multicast_router(const struct net_device *dev) #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING) bool br_vlan_enabled(const struct net_device *dev); int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid); +int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid); +int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto); int br_vlan_get_info(const struct net_device *dev, u16 vid, struct bridge_vlan_info *p_vinfo); #else @@ -101,6 +103,16 @@ static inline int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid) return -EINVAL; } +static inline int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto) +{ + return -EINVAL; +} + +static inline int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid) +{ + return -EINVAL; +} + static inline int br_vlan_get_info(const struct net_device *dev, u16 vid, struct bridge_vlan_info *p_vinfo) { diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h index 8b728750a625..69e813bcb947 100644 --- a/include/linux/if_pppox.h +++ b/include/linux/if_pppox.h @@ -80,6 +80,9 @@ extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp); extern void unregister_pppox_proto(int proto_num); extern void pppox_unbind_sock(struct sock *sk);/* delete ppp-channel binding */ extern int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); +extern int pppox_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); + +#define PPPOEIOCSFWD32 _IOW(0xB1 ,0, compat_size_t) /* PPPoX socket states */ enum { diff --git a/include/linux/if_rmnet.h b/include/linux/if_rmnet.h new file mode 100644 index 000000000000..9661416a9bb4 --- /dev/null +++ b/include/linux/if_rmnet.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. + */ + +#ifndef _LINUX_IF_RMNET_H_ +#define _LINUX_IF_RMNET_H_ + +struct rmnet_map_header { +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 pad_len:6; + u8 reserved_bit:1; + u8 cd_bit:1; +#elif defined (__BIG_ENDIAN_BITFIELD) + u8 cd_bit:1; + u8 reserved_bit:1; + u8 pad_len:6; +#else +#error "Please fix <asm/byteorder.h>" +#endif + u8 mux_id; + __be16 pkt_len; +} __aligned(1); + +struct rmnet_map_dl_csum_trailer { + u8 reserved1; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 valid:1; + u8 reserved2:7; +#elif defined (__BIG_ENDIAN_BITFIELD) + u8 reserved2:7; + u8 valid:1; +#else +#error "Please fix <asm/byteorder.h>" +#endif + u16 csum_start_offset; + u16 csum_length; + __be16 csum_value; +} __aligned(1); + +struct rmnet_map_ul_csum_header { + __be16 csum_start_offset; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u16 csum_insert_offset:14; + u16 udp_ind:1; + u16 csum_enabled:1; +#elif defined (__BIG_ENDIAN_BITFIELD) + u16 csum_enabled:1; + u16 udp_ind:1; + u16 csum_insert_offset:14; +#else +#error "Please fix <asm/byteorder.h>" +#endif +} __aligned(1); + +#endif /* !(_LINUX_IF_RMNET_H_) */ diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h index 8e66866c11be..915a187cfabd 100644 --- a/include/linux/if_tap.h +++ b/include/linux/if_tap.h @@ -62,7 +62,6 @@ struct tap_dev { struct tap_queue { struct sock sk; struct socket sock; - struct socket_wq wq; int vnet_hdr_sz; struct tap_dev __rcu *tap; struct file *file; diff --git a/include/linux/igmp.h b/include/linux/igmp.h index 9cbbd1baaf85..463047d0190b 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h @@ -60,8 +60,8 @@ struct ip_mc_socklist { struct ip_sf_list { struct ip_sf_list *sf_next; - __be32 sf_inaddr; unsigned long sf_count[2]; /* include/exclude counts */ + __be32 sf_inaddr; unsigned char sf_gsresp; /* include in g & s response? */ unsigned char sf_oldin; /* change state */ unsigned char sf_crcount; /* retrans. left to send */ diff --git a/include/linux/ima.h b/include/linux/ima.h index 00036d2f57c3..a20ad398d260 100644 --- a/include/linux/ima.h +++ b/include/linux/ima.h @@ -23,6 +23,7 @@ extern int ima_read_file(struct file *file, enum kernel_read_file_id id); extern int ima_post_read_file(struct file *file, void *buf, loff_t size, enum kernel_read_file_id id); extern void ima_post_path_mknod(struct dentry *dentry); +extern void ima_kexec_cmdline(const void *buf, int size); #ifdef CONFIG_IMA_KEXEC extern void ima_add_kexec_buffer(struct kimage *image); @@ -89,6 +90,7 @@ static inline void ima_post_path_mknod(struct dentry *dentry) return; } +static inline void ima_kexec_cmdline(const void *buf, int size) {} #endif /* CONFIG_IMA */ #ifndef CONFIG_IMA_KEXEC diff --git a/include/linux/in.h b/include/linux/in.h index 4d2fedfb753a..1873ef642605 100644 --- a/include/linux/in.h +++ b/include/linux/in.h @@ -63,7 +63,7 @@ static inline bool ipv4_is_all_snoopers(__be32 addr) static inline bool ipv4_is_zeronet(__be32 addr) { - return (addr & htonl(0xff000000)) == htonl(0x00000000); + return (addr == 0); } /* Special-Use IPv4 Addresses (RFC3330) */ diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index 367dc2a0f84a..3515ca64e638 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -26,7 +26,7 @@ struct in_device { struct net_device *dev; refcount_t refcnt; int dead; - struct in_ifaddr *ifa_list; /* IP ifaddr chain */ + struct in_ifaddr __rcu *ifa_list;/* IP ifaddr chain */ struct ip_mc_list __rcu *mc_list; /* IP multicast filter chain */ struct ip_mc_list __rcu * __rcu *mc_hash; @@ -136,7 +136,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev) struct in_ifaddr { struct hlist_node hash; - struct in_ifaddr *ifa_next; + struct in_ifaddr __rcu *ifa_next; struct in_device *ifa_dev; struct rcu_head rcu_head; __be32 ifa_local; @@ -186,7 +186,7 @@ __be32 inet_confirm_addr(struct net *net, struct in_device *in_dev, __be32 dst, struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix, __be32 mask); struct in_ifaddr *inet_lookup_ifaddr_rcu(struct net *net, __be32 addr); -static __inline__ bool inet_ifa_match(__be32 addr, struct in_ifaddr *ifa) +static inline bool inet_ifa_match(__be32 addr, const struct in_ifaddr *ifa) { return !((addr^ifa->ifa_address)&ifa->ifa_mask); } @@ -206,14 +206,13 @@ static __inline__ bool bad_mask(__be32 mask, __be32 addr) return false; } -#define for_primary_ifa(in_dev) { struct in_ifaddr *ifa; \ - for (ifa = (in_dev)->ifa_list; ifa && !(ifa->ifa_flags&IFA_F_SECONDARY); ifa = ifa->ifa_next) +#define in_dev_for_each_ifa_rtnl(ifa, in_dev) \ + for (ifa = rtnl_dereference((in_dev)->ifa_list); ifa; \ + ifa = rtnl_dereference(ifa->ifa_next)) -#define for_ifa(in_dev) { struct in_ifaddr *ifa; \ - for (ifa = (in_dev)->ifa_list; ifa; ifa = ifa->ifa_next) - - -#define endfor_ifa(in_dev) } +#define in_dev_for_each_ifa_rcu(ifa, in_dev) \ + for (ifa = rcu_dereference((in_dev)->ifa_list); ifa; \ + ifa = rcu_dereference(ifa->ifa_next)) static inline struct in_device *__in_dev_get_rcu(const struct net_device *dev) { diff --git a/include/linux/init.h b/include/linux/init.h index 5255069f5a9f..212fc9e2f691 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -137,6 +137,8 @@ extern initcall_entry_t __con_initcall_start[], __con_initcall_end[]; /* Used for contructor calls. */ typedef void (*ctor_fn_t)(void); +struct file_system_type; + /* Defined in init/main.c */ extern int do_one_initcall(initcall_t fn); extern char __initdata boot_command_line[]; @@ -146,7 +148,8 @@ extern unsigned int reset_devices; /* used by init/main.c */ void setup_arch(char **); void prepare_namespace(void); -int __init init_rootfs(void); +void __init init_rootfs(void); +extern struct file_system_type rootfs_fs_type; #if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_STRICT_MODULE_RWX) extern bool rodata_enabled; diff --git a/include/linux/input.h b/include/linux/input.h index 510e78558c10..94f277cd806a 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -21,6 +21,8 @@ #include <linux/timer.h> #include <linux/mod_devicetable.h> +struct input_dev_poller; + /** * struct input_value - input value representation * @type: type of value (EV_KEY, EV_ABS, etc) @@ -33,6 +35,13 @@ struct input_value { __s32 value; }; +enum input_clock_type { + INPUT_CLK_REAL = 0, + INPUT_CLK_MONO, + INPUT_CLK_BOOT, + INPUT_CLK_MAX +}; + /** * struct input_dev - represents an input device * @name: name of the device @@ -64,6 +73,8 @@ struct input_value { * not sleep * @ff: force feedback structure associated with the device if device * supports force feedback effects + * @poller: poller structure associated with the device if device is + * set up to use polling mode * @repeat_key: stores key code of the last key pressed; used to implement * software autorepeat * @timer: timer for software autorepeat @@ -114,6 +125,8 @@ struct input_value { * @vals: array of values queued in the current frame * @devres_managed: indicates that devices is managed with devres framework * and needs not be explicitly unregistered or freed. + * @timestamp: storage for a timestamp set by input_set_timestamp called + * by a driver */ struct input_dev { const char *name; @@ -147,6 +160,8 @@ struct input_dev { struct ff_device *ff; + struct input_dev_poller *poller; + unsigned int repeat_key; struct timer_list timer; @@ -184,6 +199,8 @@ struct input_dev { struct input_value *vals; bool devres_managed; + + ktime_t timestamp[INPUT_CLK_MAX]; }; #define to_input_dev(d) container_of(d, struct input_dev, dev) @@ -361,6 +378,12 @@ void input_unregister_device(struct input_dev *); void input_reset_device(struct input_dev *); +int input_setup_polling(struct input_dev *dev, + void (*poll_fn)(struct input_dev *dev)); +void input_set_poll_interval(struct input_dev *dev, unsigned int interval); +void input_set_min_poll_interval(struct input_dev *dev, unsigned int interval); +void input_set_max_poll_interval(struct input_dev *dev, unsigned int interval); + int __must_check input_register_handler(struct input_handler *); void input_unregister_handler(struct input_handler *); @@ -382,6 +405,9 @@ void input_close_device(struct input_handle *); int input_flush_device(struct input_handle *handle, struct file *file); +void input_set_timestamp(struct input_dev *dev, ktime_t timestamp); +ktime_t *input_get_timestamp(struct input_dev *dev); + void input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value); void input_inject_event(struct input_handle *handle, unsigned int type, unsigned int code, int value); diff --git a/include/linux/input/bu21013.h b/include/linux/input/bu21013.h deleted file mode 100644 index 7e5b7e978e8a..000000000000 --- a/include/linux/input/bu21013.h +++ /dev/null @@ -1,34 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) ST-Ericsson SA 2010 - * Author: Naveen Kumar G <naveen.gaddipati@stericsson.com> for ST-Ericsson - */ - -#ifndef _BU21013_H -#define _BU21013_H - -/** - * struct bu21013_platform_device - Handle the platform data - * @touch_x_max: touch x max - * @touch_y_max: touch y max - * @cs_pin: chip select pin - * @touch_pin: touch gpio pin - * @ext_clk: external clock flag - * @x_flip: x flip flag - * @y_flip: y flip flag - * @wakeup: wakeup flag - * - * This is used to handle the platform data - */ -struct bu21013_platform_device { - int touch_x_max; - int touch_y_max; - unsigned int cs_pin; - unsigned int touch_pin; - bool ext_clk; - bool x_flip; - bool y_flip; - bool wakeup; -}; - -#endif diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 6a8dd4af0147..f2ae8a006ff8 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -435,6 +435,12 @@ enum { #define VTD_FLAG_TRANS_PRE_ENABLED (1 << 0) #define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1) +extern int intel_iommu_sm; + +#define sm_supported(iommu) (intel_iommu_sm && ecap_smts((iommu)->ecap)) +#define pasid_supported(iommu) (sm_supported(iommu) && \ + ecap_pasid((iommu)->ecap)) + struct pasid_entry; struct pasid_state_entry; struct page_req_dsc; @@ -642,7 +648,6 @@ extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); extern int dmar_ir_support(void); -struct dmar_domain *get_valid_domain_for_dev(struct device *dev); void *alloc_pgtable_page(int node); void free_pgtable_page(void *vaddr); struct intel_iommu *domain_get_iommu(struct dmar_domain *domain); diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h index 54ffcc6a322e..94f047a8a845 100644 --- a/include/linux/intel-svm.h +++ b/include/linux/intel-svm.h @@ -49,7 +49,7 @@ struct svm_dev_ops { /** * intel_svm_bind_mm() - Bind the current process to a PASID - * @dev: Device to be granted acccess + * @dev: Device to be granted access * @pasid: Address for allocated PASID * @flags: Flags. Later for requesting supervisor mode, etc. * @ops: Callbacks to device driver diff --git a/include/linux/intel_rapl.h b/include/linux/intel_rapl.h new file mode 100644 index 000000000000..efb3ce892c20 --- /dev/null +++ b/include/linux/intel_rapl.h @@ -0,0 +1,155 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Data types and headers for RAPL support + * + * Copyright (C) 2019 Intel Corporation. + * + * Author: Zhang Rui <rui.zhang@intel.com> + */ + +#ifndef __INTEL_RAPL_H__ +#define __INTEL_RAPL_H__ + +#include <linux/types.h> +#include <linux/powercap.h> +#include <linux/cpuhotplug.h> + +enum rapl_domain_type { + RAPL_DOMAIN_PACKAGE, /* entire package/socket */ + RAPL_DOMAIN_PP0, /* core power plane */ + RAPL_DOMAIN_PP1, /* graphics uncore */ + RAPL_DOMAIN_DRAM, /* DRAM control_type */ + RAPL_DOMAIN_PLATFORM, /* PSys control_type */ + RAPL_DOMAIN_MAX, +}; + +enum rapl_domain_reg_id { + RAPL_DOMAIN_REG_LIMIT, + RAPL_DOMAIN_REG_STATUS, + RAPL_DOMAIN_REG_PERF, + RAPL_DOMAIN_REG_POLICY, + RAPL_DOMAIN_REG_INFO, + RAPL_DOMAIN_REG_MAX, +}; + +struct rapl_package; + +enum rapl_primitives { + ENERGY_COUNTER, + POWER_LIMIT1, + POWER_LIMIT2, + FW_LOCK, + + PL1_ENABLE, /* power limit 1, aka long term */ + PL1_CLAMP, /* allow frequency to go below OS request */ + PL2_ENABLE, /* power limit 2, aka short term, instantaneous */ + PL2_CLAMP, + + TIME_WINDOW1, /* long term */ + TIME_WINDOW2, /* short term */ + THERMAL_SPEC_POWER, + MAX_POWER, + + MIN_POWER, + MAX_TIME_WINDOW, + THROTTLED_TIME, + PRIORITY_LEVEL, + + /* below are not raw primitive data */ + AVERAGE_POWER, + NR_RAPL_PRIMITIVES, +}; + +struct rapl_domain_data { + u64 primitives[NR_RAPL_PRIMITIVES]; + unsigned long timestamp; +}; + +#define NR_POWER_LIMITS (2) +struct rapl_power_limit { + struct powercap_zone_constraint *constraint; + int prim_id; /* primitive ID used to enable */ + struct rapl_domain *domain; + const char *name; + u64 last_power_limit; +}; + +struct rapl_package; + +struct rapl_domain { + const char *name; + enum rapl_domain_type id; + u64 regs[RAPL_DOMAIN_REG_MAX]; + struct powercap_zone power_zone; + struct rapl_domain_data rdd; + struct rapl_power_limit rpl[NR_POWER_LIMITS]; + u64 attr_map; /* track capabilities */ + unsigned int state; + unsigned int domain_energy_unit; + struct rapl_package *rp; +}; + +struct reg_action { + u64 reg; + u64 mask; + u64 value; + int err; +}; + +/** + * struct rapl_if_priv: private data for different RAPL interfaces + * @control_type: Each RAPL interface must have its own powercap + * control type. + * @platform_rapl_domain: Optional. Some RAPL interface may have platform + * level RAPL control. + * @pcap_rapl_online: CPU hotplug state for each RAPL interface. + * @reg_unit: Register for getting energy/power/time unit. + * @regs: Register sets for different RAPL Domains. + * @limits: Number of power limits supported by each domain. + * @read_raw: Callback for reading RAPL interface specific + * registers. + * @write_raw: Callback for writing RAPL interface specific + * registers. + */ +struct rapl_if_priv { + struct powercap_control_type *control_type; + struct rapl_domain *platform_rapl_domain; + enum cpuhp_state pcap_rapl_online; + u64 reg_unit; + u64 regs[RAPL_DOMAIN_MAX][RAPL_DOMAIN_REG_MAX]; + int limits[RAPL_DOMAIN_MAX]; + int (*read_raw)(int cpu, struct reg_action *ra); + int (*write_raw)(int cpu, struct reg_action *ra); +}; + +/* maximum rapl package domain name: package-%d-die-%d */ +#define PACKAGE_DOMAIN_NAME_LENGTH 30 + +struct rapl_package { + unsigned int id; /* logical die id, equals physical 1-die systems */ + unsigned int nr_domains; + unsigned long domain_map; /* bit map of active domains */ + unsigned int power_unit; + unsigned int energy_unit; + unsigned int time_unit; + struct rapl_domain *domains; /* array of domains, sized at runtime */ + struct powercap_zone *power_zone; /* keep track of parent zone */ + unsigned long power_limit_irq; /* keep track of package power limit + * notify interrupt enable status. + */ + struct list_head plist; + int lead_cpu; /* one active cpu per package for access */ + /* Track active cpus */ + struct cpumask cpumask; + char name[PACKAGE_DOMAIN_NAME_LENGTH]; + struct rapl_if_priv *priv; +}; + +struct rapl_package *rapl_find_package_domain(int cpu, struct rapl_if_priv *priv); +struct rapl_package *rapl_add_package(int cpu, struct rapl_if_priv *priv); +void rapl_remove_package(struct rapl_package *rp); + +int rapl_add_platform_domain(struct rapl_if_priv *priv); +void rapl_remove_platform_domain(struct rapl_if_priv *priv); + +#endif /* __INTEL_RAPL_H__ */ diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index c7eef32e7739..5b8328a99b2a 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -52,7 +52,7 @@ * irq line disabled until the threaded handler has been run. * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend. Does not guarantee * that this interrupt will wake the system from a suspended - * state. See Documentation/power/suspend-and-interrupts.txt + * state. See Documentation/power/suspend-and-interrupts.rst * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set * IRQF_NO_THREAD - Interrupt cannot be threaded * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index 76969a564831..b5a450a3bb47 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -44,6 +44,8 @@ struct iommu_gather_ops { * tables. * @ias: Input address (iova) size, in bits. * @oas: Output address (paddr) size, in bits. + * @coherent_walk A flag to indicate whether or not page table walks made + * by the IOMMU are coherent with the CPU caches. * @tlb: TLB management callbacks for this set of tables. * @iommu_dev: The device representing the DMA configuration for the * page table walker. @@ -68,11 +70,6 @@ struct io_pgtable_cfg { * when the SoC is in "4GB mode" and they can only access the high * remap of DRAM (0x1_00000000 to 0x1_ffffffff). * - * IO_PGTABLE_QUIRK_NO_DMA: Guarantees that the tables will only ever - * be accessed by a fully cache-coherent IOMMU or CPU (e.g. for a - * software-emulated IOMMU), such that pagetable updates need not - * be treated as explicit DMA data. - * * IO_PGTABLE_QUIRK_NON_STRICT: Skip issuing synchronous leaf TLBIs * on unmap, for DMA domains using the flush queue mechanism for * delayed invalidation. @@ -81,12 +78,12 @@ struct io_pgtable_cfg { #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1) #define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2) #define IO_PGTABLE_QUIRK_ARM_MTK_4GB BIT(3) - #define IO_PGTABLE_QUIRK_NO_DMA BIT(4) - #define IO_PGTABLE_QUIRK_NON_STRICT BIT(5) + #define IO_PGTABLE_QUIRK_NON_STRICT BIT(4) unsigned long quirks; unsigned long pgsize_bitmap; unsigned int ias; unsigned int oas; + bool coherent_walk; const struct iommu_gather_ops *tlb; struct device *iommu_dev; diff --git a/include/linux/io.h b/include/linux/io.h index 9876e5801a9d..accac822336a 100644 --- a/include/linux/io.h +++ b/include/linux/io.h @@ -33,6 +33,7 @@ static inline int ioremap_page_range(unsigned long addr, unsigned long end, #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP void __init ioremap_huge_init(void); +int arch_ioremap_p4d_supported(void); int arch_ioremap_pud_supported(void); int arch_ioremap_pmd_supported(void); #else diff --git a/include/linux/iomap.h b/include/linux/iomap.h index 2103b94cb1bf..bc499ceae392 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -7,6 +7,7 @@ #include <linux/mm.h> #include <linux/types.h> #include <linux/mm_types.h> +#include <linux/blkdev.h> struct address_space; struct fiemap_extent_info; @@ -35,6 +36,7 @@ struct vm_fault; #define IOMAP_F_NEW 0x01 /* blocks have been newly allocated */ #define IOMAP_F_DIRTY 0x02 /* uncommitted metadata */ #define IOMAP_F_BUFFER_HEAD 0x04 /* file system requires buffer heads */ +#define IOMAP_F_SIZE_CHANGED 0x08 /* file size has changed */ /* * Flags that only need to be reported for IOMAP_REPORT requests: @@ -68,6 +70,12 @@ struct iomap { const struct iomap_page_ops *page_ops; }; +static inline sector_t +iomap_sector(struct iomap *iomap, loff_t pos) +{ + return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT; +} + /* * When a filesystem sets page_ops in an iomap mapping it returns, page_prepare * and page_done will be called for each page written to. This only applies to @@ -115,6 +123,16 @@ struct iomap_ops { }; /* + * Main iomap iterator function. + */ +typedef loff_t (*iomap_actor_t)(struct inode *inode, loff_t pos, loff_t len, + void *data, struct iomap *iomap); + +loff_t iomap_apply(struct inode *inode, loff_t pos, loff_t length, + unsigned flags, const struct iomap_ops *ops, void *data, + iomap_actor_t actor); + +/* * Structure allocate for each page when block size < PAGE_SIZE to track * sub-page uptodate status and I/O completions. */ diff --git a/include/linux/iommu.h b/include/linux/iommu.h index e552c3b63f6f..fdc355ccc570 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -13,6 +13,7 @@ #include <linux/errno.h> #include <linux/err.h> #include <linux/of.h> +#include <uapi/linux/iommu.h> #define IOMMU_READ (1 << 0) #define IOMMU_WRITE (1 << 1) @@ -29,6 +30,12 @@ * if the IOMMU page table format is equivalent. */ #define IOMMU_PRIV (1 << 5) +/* + * Non-coherent masters on few Qualcomm SoCs can use this page protection flag + * to set correct cacheability attributes to use an outer level of cache - + * last level cache, aka system cache. + */ +#define IOMMU_QCOM_SYS_CACHE (1 << 6) struct iommu_ops; struct iommu_group; @@ -37,6 +44,7 @@ struct device; struct iommu_domain; struct notifier_block; struct iommu_sva; +struct iommu_fault_event; /* iommu fault flags */ #define IOMMU_FAULT_READ 0x0 @@ -46,6 +54,7 @@ typedef int (*iommu_fault_handler_t)(struct iommu_domain *, struct device *, unsigned long, int, void *); typedef int (*iommu_mm_exit_handler_t)(struct device *dev, struct iommu_sva *, void *); +typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *); struct iommu_domain_geometry { dma_addr_t aperture_start; /* First address that can be mapped */ @@ -123,6 +132,12 @@ enum iommu_attr { enum iommu_resv_type { /* Memory regions which must be mapped 1:1 at all times */ IOMMU_RESV_DIRECT, + /* + * Memory regions which are advertised to be 1:1 but are + * commonly considered relaxable in some conditions, + * for instance in device assignment use case (USB, Graphics) + */ + IOMMU_RESV_DIRECT_RELAXABLE, /* Arbitrary "never map this or give it to a device" address ranges */ IOMMU_RESV_RESERVED, /* Hardware MSI region (untranslated) */ @@ -212,6 +227,7 @@ struct iommu_sva_ops { * @sva_bind: Bind process address space to device * @sva_unbind: Unbind process address space from device * @sva_get_pasid: Get PASID associated to a SVA handle + * @page_response: handle page request response * @pgsize_bitmap: bitmap of all possible supported page sizes */ struct iommu_ops { @@ -272,6 +288,10 @@ struct iommu_ops { void (*sva_unbind)(struct iommu_sva *handle); int (*sva_get_pasid)(struct iommu_sva *handle); + int (*page_response)(struct device *dev, + struct iommu_fault_event *evt, + struct iommu_page_response *msg); + unsigned long pgsize_bitmap; }; @@ -289,6 +309,48 @@ struct iommu_device { struct device *dev; }; +/** + * struct iommu_fault_event - Generic fault event + * + * Can represent recoverable faults such as a page requests or + * unrecoverable faults such as DMA or IRQ remapping faults. + * + * @fault: fault descriptor + * @list: pending fault event list, used for tracking responses + */ +struct iommu_fault_event { + struct iommu_fault fault; + struct list_head list; +}; + +/** + * struct iommu_fault_param - per-device IOMMU fault data + * @handler: Callback function to handle IOMMU faults at device level + * @data: handler private data + * @faults: holds the pending faults which needs response + * @lock: protect pending faults list + */ +struct iommu_fault_param { + iommu_dev_fault_handler_t handler; + void *data; + struct list_head faults; + struct mutex lock; +}; + +/** + * struct iommu_param - collection of per-device IOMMU data + * + * @fault_param: IOMMU detected device fault reporting data + * + * TODO: migrate other per device data pointers under iommu_dev_data, e.g. + * struct iommu_group *iommu_group; + * struct iommu_fwspec *iommu_fwspec; + */ +struct iommu_param { + struct mutex lock; + struct iommu_fault_param *fault_param; +}; + int iommu_device_register(struct iommu_device *iommu); void iommu_device_unregister(struct iommu_device *iommu); int iommu_device_sysfs_add(struct iommu_device *iommu, @@ -350,6 +412,7 @@ extern void iommu_set_fault_handler(struct iommu_domain *domain, extern void iommu_get_resv_regions(struct device *dev, struct list_head *list); extern void iommu_put_resv_regions(struct device *dev, struct list_head *list); extern int iommu_request_dm_for_dev(struct device *dev); +extern int iommu_request_dma_domain_for_dev(struct device *dev); extern struct iommu_resv_region * iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, enum iommu_resv_type type); @@ -378,6 +441,17 @@ extern int iommu_group_register_notifier(struct iommu_group *group, struct notifier_block *nb); extern int iommu_group_unregister_notifier(struct iommu_group *group, struct notifier_block *nb); +extern int iommu_register_device_fault_handler(struct device *dev, + iommu_dev_fault_handler_t handler, + void *data); + +extern int iommu_unregister_device_fault_handler(struct device *dev); + +extern int iommu_report_device_fault(struct device *dev, + struct iommu_fault_event *evt); +extern int iommu_page_response(struct device *dev, + struct iommu_page_response *msg); + extern int iommu_group_id(struct iommu_group *group); extern struct iommu_group *iommu_group_get_for_dev(struct device *dev); extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *); @@ -492,6 +566,7 @@ struct iommu_ops {}; struct iommu_group {}; struct iommu_fwspec {}; struct iommu_device {}; +struct iommu_fault_param {}; static inline bool iommu_present(struct bus_type *bus) { @@ -614,6 +689,11 @@ static inline int iommu_request_dm_for_dev(struct device *dev) return -ENODEV; } +static inline int iommu_request_dma_domain_for_dev(struct device *dev) +{ + return -ENODEV; +} + static inline int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) { @@ -685,6 +765,31 @@ static inline int iommu_group_unregister_notifier(struct iommu_group *group, return 0; } +static inline +int iommu_register_device_fault_handler(struct device *dev, + iommu_dev_fault_handler_t handler, + void *data) +{ + return -ENODEV; +} + +static inline int iommu_unregister_device_fault_handler(struct device *dev) +{ + return 0; +} + +static inline +int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt) +{ + return -ENODEV; +} + +static inline int iommu_page_response(struct device *dev, + struct iommu_page_response *msg) +{ + return -ENODEV; +} + static inline int iommu_group_id(struct iommu_group *group) { return -ENODEV; diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h index 3908353deec6..35e15dfd4155 100644 --- a/include/linux/iopoll.h +++ b/include/linux/iopoll.h @@ -21,7 +21,7 @@ * @cond: Break condition (usually involving @val) * @sleep_us: Maximum time to sleep between reads in us (0 * tight-loops). Should be less than ~20ms since usleep_range - * is used (see Documentation/timers/timers-howto.txt). + * is used (see Documentation/timers/timers-howto.rst). * @timeout_us: Timeout in us, 0 means never timeout * * Returns 0 on success and -ETIMEDOUT upon a timeout. In either @@ -60,7 +60,7 @@ * @cond: Break condition (usually involving @val) * @delay_us: Time to udelay between reads in us (0 tight-loops). Should * be less than ~10us since udelay is used (see - * Documentation/timers/timers-howto.txt). + * Documentation/timers/timers-howto.rst). * @timeout_us: Timeout in us, 0 means never timeout * * Returns 0 on success and -ETIMEDOUT upon a timeout. In either diff --git a/include/linux/ioport.h b/include/linux/ioport.h index da0ebaec25f0..5b6a7121c9f0 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -12,6 +12,7 @@ #ifndef __ASSEMBLY__ #include <linux/compiler.h> #include <linux/types.h> +#include <linux/bits.h> /* * Resources are tree-like, allowing * nesting etc.. @@ -132,7 +133,15 @@ enum { IORES_DESC_PERSISTENT_MEMORY = 4, IORES_DESC_PERSISTENT_MEMORY_LEGACY = 5, IORES_DESC_DEVICE_PRIVATE_MEMORY = 6, - IORES_DESC_DEVICE_PUBLIC_MEMORY = 7, + IORES_DESC_RESERVED = 7, +}; + +/* + * Flags controlling ioremap() behavior. + */ +enum { + IORES_MAP_SYSTEM_RAM = BIT(0), + IORES_MAP_ENCRYPTED = BIT(1), }; /* helpers to define resources */ @@ -286,6 +295,8 @@ static inline bool resource_overlaps(struct resource *r1, struct resource *r2) return (r1->start <= r2->end && r1->end >= r2->start); } +struct resource *devm_request_free_mem_region(struct device *dev, + struct resource *base, unsigned long size); #endif /* __ASSEMBLY__ */ #endif /* _LINUX_IOPORT_H */ diff --git a/include/linux/iova.h b/include/linux/iova.h index 781b96ac706f..a0637abffee8 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -155,6 +155,7 @@ struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to); void init_iova_domain(struct iova_domain *iovad, unsigned long granule, unsigned long start_pfn); +bool has_iova_flush_queue(struct iova_domain *iovad); int init_iova_flush_queue(struct iova_domain *iovad, iova_flush_cb flush_cb, iova_entry_dtor entry_dtor); struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); @@ -235,6 +236,11 @@ static inline void init_iova_domain(struct iova_domain *iovad, { } +static inline bool has_iova_flush_queue(struct iova_domain *iovad) +{ + return false; +} + static inline int init_iova_flush_queue(struct iova_domain *iovad, iova_flush_cb flush_cb, iova_entry_dtor entry_dtor) diff --git a/include/linux/irqchip/arm-gic-common.h b/include/linux/irqchip/arm-gic-common.h index 626283858563..b9850f5f1906 100644 --- a/include/linux/irqchip/arm-gic-common.h +++ b/include/linux/irqchip/arm-gic-common.h @@ -36,4 +36,9 @@ struct gic_kvm_info { const struct gic_kvm_info *gic_get_kvm_info(void); +struct irq_domain; +struct fwnode_handle; +int gicv2m_init(struct fwnode_handle *parent_handle, + struct irq_domain *parent); + #endif /* __LINUX_IRQCHIP_ARM_GIC_COMMON_H */ diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h index 316087da1d09..5686711b0f40 100644 --- a/include/linux/irqchip/arm-gic.h +++ b/include/linux/irqchip/arm-gic.h @@ -157,9 +157,6 @@ int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq); */ void gic_init(void __iomem *dist , void __iomem *cpu); -int gicv2m_init(struct fwnode_handle *parent_handle, - struct irq_domain *parent); - void gic_send_sgi(unsigned int cpu_id, unsigned int irq); int gic_get_cpu_id(unsigned int cpu); void gic_migrate_target(unsigned int new_cpu_id); diff --git a/include/linux/isdn.h b/include/linux/isdn.h deleted file mode 100644 index df97c8444f5d..000000000000 --- a/include/linux/isdn.h +++ /dev/null @@ -1,473 +0,0 @@ -/* $Id: isdn.h,v 1.125.2.3 2004/02/10 01:07:14 keil Exp $ - * - * Main header for the Linux ISDN subsystem (linklevel). - * - * Copyright 1994,95,96 by Fritz Elfert (fritz@isdn4linux.de) - * Copyright 1995,96 by Thinking Objects Software GmbH Wuerzburg - * Copyright 1995,96 by Michael Hipp (Michael.Hipp@student.uni-tuebingen.de) - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - */ -#ifndef __ISDN_H__ -#define __ISDN_H__ - - -#include <linux/errno.h> -#include <linux/fs.h> -#include <linux/major.h> -#include <asm/io.h> -#include <linux/kernel.h> -#include <linux/signal.h> -#include <linux/slab.h> -#include <linux/timer.h> -#include <linux/wait.h> -#include <linux/tty.h> -#include <linux/tty_flip.h> -#include <linux/serial_reg.h> -#include <linux/fcntl.h> -#include <linux/types.h> -#include <linux/interrupt.h> -#include <linux/ip.h> -#include <linux/in.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/tcp.h> -#include <linux/mutex.h> -#include <uapi/linux/isdn.h> - -#define ISDN_TTY_MAJOR 43 -#define ISDN_TTYAUX_MAJOR 44 -#define ISDN_MAJOR 45 - -/* The minor-devicenumbers for Channel 0 and 1 are used as arguments for - * physical Channel-Mapping, so they MUST NOT be changed without changing - * the correspondent code in isdn.c - */ - -#define ISDN_MINOR_B 0 -#define ISDN_MINOR_BMAX (ISDN_MAX_CHANNELS-1) -#define ISDN_MINOR_CTRL 64 -#define ISDN_MINOR_CTRLMAX (64 + (ISDN_MAX_CHANNELS-1)) -#define ISDN_MINOR_PPP 128 -#define ISDN_MINOR_PPPMAX (128 + (ISDN_MAX_CHANNELS-1)) -#define ISDN_MINOR_STATUS 255 - -#ifdef CONFIG_ISDN_PPP - -#ifdef CONFIG_ISDN_PPP_VJ -# include <net/slhc_vj.h> -#endif - -#include <linux/ppp_defs.h> -#include <linux/ppp-ioctl.h> - -#include <linux/isdn_ppp.h> -#endif - -#ifdef CONFIG_ISDN_X25 -# include <linux/concap.h> -#endif - -#include <linux/isdnif.h> - -#define ISDN_DRVIOCTL_MASK 0x7f /* Mask for Device-ioctl */ - -/* Until now unused */ -#define ISDN_SERVICE_VOICE 1 -#define ISDN_SERVICE_AB 1<<1 -#define ISDN_SERVICE_X21 1<<2 -#define ISDN_SERVICE_G4 1<<3 -#define ISDN_SERVICE_BTX 1<<4 -#define ISDN_SERVICE_DFUE 1<<5 -#define ISDN_SERVICE_X25 1<<6 -#define ISDN_SERVICE_TTX 1<<7 -#define ISDN_SERVICE_MIXED 1<<8 -#define ISDN_SERVICE_FW 1<<9 -#define ISDN_SERVICE_GTEL 1<<10 -#define ISDN_SERVICE_BTXN 1<<11 -#define ISDN_SERVICE_BTEL 1<<12 - -/* Macros checking plain usage */ -#define USG_NONE(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_NONE) -#define USG_RAW(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_RAW) -#define USG_MODEM(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_MODEM) -#define USG_VOICE(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_VOICE) -#define USG_NET(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_NET) -#define USG_FAX(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_FAX) -#define USG_OUTGOING(x) ((x & ISDN_USAGE_OUTGOING)==ISDN_USAGE_OUTGOING) -#define USG_MODEMORVOICE(x) (((x & ISDN_USAGE_MASK)==ISDN_USAGE_MODEM) || \ - ((x & ISDN_USAGE_MASK)==ISDN_USAGE_VOICE) ) - -/* Timer-delays and scheduling-flags */ -#define ISDN_TIMER_RES 4 /* Main Timer-Resolution */ -#define ISDN_TIMER_02SEC (HZ/ISDN_TIMER_RES/5) /* Slow-Timer1 .2 sec */ -#define ISDN_TIMER_1SEC (HZ/ISDN_TIMER_RES) /* Slow-Timer2 1 sec */ -#define ISDN_TIMER_RINGING 5 /* tty RINGs = ISDN_TIMER_1SEC * this factor */ -#define ISDN_TIMER_KEEPINT 10 /* Cisco-Keepalive = ISDN_TIMER_1SEC * this factor */ -#define ISDN_TIMER_MODEMREAD 1 -#define ISDN_TIMER_MODEMPLUS 2 -#define ISDN_TIMER_MODEMRING 4 -#define ISDN_TIMER_MODEMXMIT 8 -#define ISDN_TIMER_NETDIAL 16 -#define ISDN_TIMER_NETHANGUP 32 -#define ISDN_TIMER_CARRIER 256 /* Wait for Carrier */ -#define ISDN_TIMER_FAST (ISDN_TIMER_MODEMREAD | ISDN_TIMER_MODEMPLUS | \ - ISDN_TIMER_MODEMXMIT) -#define ISDN_TIMER_SLOW (ISDN_TIMER_MODEMRING | ISDN_TIMER_NETHANGUP | \ - ISDN_TIMER_NETDIAL | ISDN_TIMER_CARRIER) - -/* Timeout-Values for isdn_net_dial() */ -#define ISDN_TIMER_DTIMEOUT10 (10*HZ/(ISDN_TIMER_02SEC*(ISDN_TIMER_RES+1))) -#define ISDN_TIMER_DTIMEOUT15 (15*HZ/(ISDN_TIMER_02SEC*(ISDN_TIMER_RES+1))) -#define ISDN_TIMER_DTIMEOUT60 (60*HZ/(ISDN_TIMER_02SEC*(ISDN_TIMER_RES+1))) - -/* GLOBAL_FLAGS */ -#define ISDN_GLOBAL_STOPPED 1 - -/*=================== Start of ip-over-ISDN stuff =========================*/ - -/* Feature- and status-flags for a net-interface */ -#define ISDN_NET_CONNECTED 0x01 /* Bound to ISDN-Channel */ -#define ISDN_NET_SECURE 0x02 /* Accept calls from phonelist only */ -#define ISDN_NET_CALLBACK 0x04 /* activate callback */ -#define ISDN_NET_CBHUP 0x08 /* hangup before callback */ -#define ISDN_NET_CBOUT 0x10 /* remote machine does callback */ - -#define ISDN_NET_MAGIC 0x49344C02 /* for paranoia-checking */ - -/* Phone-list-element */ -typedef struct { - void *next; - char num[ISDN_MSNLEN]; -} isdn_net_phone; - -/* - Principles when extending structures for generic encapsulation protocol - ("concap") support: - - Stuff which is hardware specific (here i4l-specific) goes in - the netdev -> local structure (here: isdn_net_local) - - Stuff which is encapsulation protocol specific goes in the structure - which holds the linux device structure (here: isdn_net_device) -*/ - -/* Local interface-data */ -typedef struct isdn_net_local_s { - ulong magic; - struct net_device_stats stats; /* Ethernet Statistics */ - int isdn_device; /* Index to isdn-device */ - int isdn_channel; /* Index to isdn-channel */ - int ppp_slot; /* PPPD device slot number */ - int pre_device; /* Preselected isdn-device */ - int pre_channel; /* Preselected isdn-channel */ - int exclusive; /* If non-zero idx to reserved chan.*/ - int flags; /* Connection-flags */ - int dialretry; /* Counter for Dialout-retries */ - int dialmax; /* Max. Number of Dial-retries */ - int cbdelay; /* Delay before Callback starts */ - int dtimer; /* Timeout-counter for dialing */ - char msn[ISDN_MSNLEN]; /* MSNs/EAZs for this interface */ - u_char cbhup; /* Flag: Reject Call before Callback*/ - u_char dialstate; /* State for dialing */ - u_char p_encap; /* Packet encapsulation */ - /* 0 = Ethernet over ISDN */ - /* 1 = RAW-IP */ - /* 2 = IP with type field */ - u_char l2_proto; /* Layer-2-protocol */ - /* See ISDN_PROTO_L2..-constants in */ - /* isdnif.h */ - /* 0 = X75/LAPB with I-Frames */ - /* 1 = X75/LAPB with UI-Frames */ - /* 2 = X75/LAPB with BUI-Frames */ - /* 3 = HDLC */ - u_char l3_proto; /* Layer-3-protocol */ - /* See ISDN_PROTO_L3..-constants in */ - /* isdnif.h */ - /* 0 = Transparent */ - int huptimer; /* Timeout-counter for auto-hangup */ - int charge; /* Counter for charging units */ - ulong chargetime; /* Timer for Charging info */ - int hupflags; /* Flags for charge-unit-hangup: */ - /* bit0: chargeint is invalid */ - /* bit1: Getting charge-interval */ - /* bit2: Do charge-unit-hangup */ - /* bit3: Do hangup even on incoming */ - int outgoing; /* Flag: outgoing call */ - int onhtime; /* Time to keep link up */ - int chargeint; /* Interval between charge-infos */ - int onum; /* Flag: at least 1 outgoing number */ - int cps; /* current speed of this interface */ - int transcount; /* byte-counter for cps-calculation */ - int sqfull; /* Flag: netdev-queue overloaded */ - ulong sqfull_stamp; /* Start-Time of overload */ - ulong slavedelay; /* Dynamic bundling delaytime */ - int triggercps; /* BogoCPS needed for trigger slave */ - isdn_net_phone *phone[2]; /* List of remote-phonenumbers */ - /* phone[0] = Incoming Numbers */ - /* phone[1] = Outgoing Numbers */ - isdn_net_phone *dial; /* Pointer to dialed number */ - struct net_device *master; /* Ptr to Master device for slaves */ - struct net_device *slave; /* Ptr to Slave device for masters */ - struct isdn_net_local_s *next; /* Ptr to next link in bundle */ - struct isdn_net_local_s *last; /* Ptr to last link in bundle */ - struct isdn_net_dev_s *netdev; /* Ptr to netdev */ - struct sk_buff_head super_tx_queue; /* List of supervisory frames to */ - /* be transmitted asap */ - atomic_t frame_cnt; /* number of frames currently */ - /* queued in HL driver */ - /* Ptr to orig. hard_header_cache */ - spinlock_t xmit_lock; /* used to protect the xmit path of */ - /* a particular channel (including */ - /* the frame_cnt */ - - int pppbind; /* ippp device for bindings */ - int dialtimeout; /* How long shall we try on dialing? (jiffies) */ - int dialwait; /* How long shall we wait after failed attempt? (jiffies) */ - ulong dialstarted; /* jiffies of first dialing-attempt */ - ulong dialwait_timer; /* jiffies of earliest next dialing-attempt */ - int huptimeout; /* How long will the connection be up? (seconds) */ -#ifdef CONFIG_ISDN_X25 - struct concap_device_ops *dops; /* callbacks used by encapsulator */ -#endif - /* use an own struct for that in later versions */ - ulong cisco_myseq; /* Local keepalive seq. for Cisco */ - ulong cisco_mineseen; /* returned keepalive seq. from remote */ - ulong cisco_yourseq; /* Remote keepalive seq. for Cisco */ - int cisco_keepalive_period; /* keepalive period */ - ulong cisco_last_slarp_in; /* jiffie of last keepalive packet we received */ - char cisco_line_state; /* state of line according to keepalive packets */ - char cisco_debserint; /* debugging flag of cisco hdlc with slarp */ - struct timer_list cisco_timer; - struct work_struct tqueue; -} isdn_net_local; - -/* the interface itself */ -typedef struct isdn_net_dev_s { - isdn_net_local *local; - isdn_net_local *queue; /* circular list of all bundled - channels, which are currently - online */ - spinlock_t queue_lock; /* lock to protect queue */ - void *next; /* Pointer to next isdn-interface */ - struct net_device *dev; /* interface to upper levels */ -#ifdef CONFIG_ISDN_PPP - ippp_bundle * pb; /* pointer to the common bundle structure - * with the per-bundle data */ -#endif -#ifdef CONFIG_ISDN_X25 - struct concap_proto *cprot; /* connection oriented encapsulation protocol */ -#endif - -} isdn_net_dev; - -/*===================== End of ip-over-ISDN stuff ===========================*/ - -/*======================= Start of ISDN-tty stuff ===========================*/ - -#define ISDN_ASYNC_MAGIC 0x49344C01 /* for paranoia-checking */ -#define ISDN_SERIAL_XMIT_SIZE 1024 /* Default bufsize for write */ -#define ISDN_SERIAL_XMIT_MAX 4000 /* Maximum bufsize for write */ - -#ifdef CONFIG_ISDN_AUDIO -/* For using sk_buffs with audio we need some private variables - * within each sk_buff. For this purpose, we declare a struct here, - * and put it always at the private skb->cb data array. A few macros help - * accessing the variables. - */ -typedef struct _isdn_audio_data { - unsigned short dle_count; - unsigned char lock; -} isdn_audio_data_t; - -#define ISDN_AUDIO_SKB_DLECOUNT(skb) (((isdn_audio_data_t *)&skb->cb[0])->dle_count) -#define ISDN_AUDIO_SKB_LOCK(skb) (((isdn_audio_data_t *)&skb->cb[0])->lock) -#endif - -/* Private data of AT-command-interpreter */ -typedef struct atemu { - u_char profile[ISDN_MODEM_NUMREG]; /* Modem-Regs. Profile 0 */ - u_char mdmreg[ISDN_MODEM_NUMREG]; /* Modem-Registers */ - char pmsn[ISDN_MSNLEN]; /* EAZ/MSNs Profile 0 */ - char msn[ISDN_MSNLEN]; /* EAZ/MSN */ - char plmsn[ISDN_LMSNLEN]; /* Listening MSNs Profile 0 */ - char lmsn[ISDN_LMSNLEN]; /* Listening MSNs */ - char cpn[ISDN_MSNLEN]; /* CalledPartyNumber on incoming call */ - char connmsg[ISDN_CMSGLEN]; /* CONNECT-Msg from HL-Driver */ -#ifdef CONFIG_ISDN_AUDIO - u_char vpar[10]; /* Voice-parameters */ - int lastDLE; /* Flag for voice-coding: DLE seen */ -#endif - int mdmcmdl; /* Length of Modem-Commandbuffer */ - int pluscount; /* Counter for +++ sequence */ - u_long lastplus; /* Timestamp of last + */ - int carrierwait; /* Seconds of carrier waiting */ - char mdmcmd[255]; /* Modem-Commandbuffer */ - unsigned int charge; /* Charge units of current connection */ -} atemu; - -/* Private data (similar to async_struct in <linux/serial.h>) */ -typedef struct modem_info { - int magic; - struct tty_port port; - int x_char; /* xon/xoff character */ - int mcr; /* Modem control register */ - int msr; /* Modem status register */ - int lsr; /* Line status register */ - int line; - int online; /* 1 = B-Channel is up, drop data */ - /* 2 = B-Channel is up, deliver d.*/ - int dialing; /* Dial in progress or ATA */ - int closing; - int rcvsched; /* Receive needs schedule */ - int isdn_driver; /* Index to isdn-driver */ - int isdn_channel; /* Index to isdn-channel */ - int drv_index; /* Index to dev->usage */ - int ncarrier; /* Flag: schedule NO CARRIER */ - unsigned char last_cause[8]; /* Last cause message */ - unsigned char last_num[ISDN_MSNLEN]; - /* Last phone-number */ - unsigned char last_l2; /* Last layer-2 protocol */ - unsigned char last_si; /* Last service */ - unsigned char last_lhup; /* Last hangup local? */ - unsigned char last_dir; /* Last direction (in or out) */ - struct timer_list nc_timer; /* Timer for delayed NO CARRIER */ - int send_outstanding;/* # of outstanding send-requests */ - int xmit_size; /* max. # of chars in xmit_buf */ - int xmit_count; /* # of chars in xmit_buf */ - struct sk_buff_head xmit_queue; /* transmit queue */ - atomic_t xmit_lock; /* Semaphore for isdn_tty_write */ -#ifdef CONFIG_ISDN_AUDIO - int vonline; /* Voice-channel status */ - /* Bit 0 = recording */ - /* Bit 1 = playback */ - /* Bit 2 = playback, DLE-ETX seen */ - struct sk_buff_head dtmf_queue; /* queue for dtmf results */ - void *adpcms; /* state for adpcm decompression */ - void *adpcmr; /* state for adpcm compression */ - void *dtmf_state; /* state for dtmf decoder */ - void *silence_state; /* state for silence detection */ -#endif -#ifdef CONFIG_ISDN_TTY_FAX - struct T30_s *fax; /* T30 Fax Group 3 data/interface */ - int faxonline; /* Fax-channel status */ -#endif - atemu emu; /* AT-emulator data */ - spinlock_t readlock; -} modem_info; - -#define ISDN_MODEM_WINSIZE 8 - -/* Description of one ISDN-tty */ -typedef struct _isdn_modem { - int refcount; /* Number of opens */ - struct tty_driver *tty_modem; /* tty-device */ - struct tty_struct *modem_table[ISDN_MAX_CHANNELS]; /* ?? copied from Orig */ - struct ktermios *modem_termios[ISDN_MAX_CHANNELS]; - struct ktermios *modem_termios_locked[ISDN_MAX_CHANNELS]; - modem_info info[ISDN_MAX_CHANNELS]; /* Private data */ -} isdn_modem_t; - -/*======================= End of ISDN-tty stuff ============================*/ - -/*======================== Start of V.110 stuff ============================*/ -#define V110_BUFSIZE 1024 - -typedef struct { - int nbytes; /* 1 Matrixbyte -> nbytes in stream */ - int nbits; /* Number of used bits in streambyte */ - unsigned char key; /* Bitmask in stream eg. 11 (nbits=2) */ - int decodelen; /* Amount of data in decodebuf */ - int SyncInit; /* Number of sync frames to send */ - unsigned char *OnlineFrame; /* Precalculated V110 idle frame */ - unsigned char *OfflineFrame; /* Precalculated V110 sync Frame */ - int framelen; /* Length of frames */ - int skbuser; /* Number of unacked userdata skbs */ - int skbidle; /* Number of unacked idle/sync skbs */ - int introducer; /* Local vars for decoder */ - int dbit; - unsigned char b; - int skbres; /* space to reserve in outgoing skb */ - int maxsize; /* maxbufsize of lowlevel driver */ - unsigned char *encodebuf; /* temporary buffer for encoding */ - unsigned char decodebuf[V110_BUFSIZE]; /* incomplete V110 matrices */ -} isdn_v110_stream; - -/*========================= End of V.110 stuff =============================*/ - -/*======================= Start of general stuff ===========================*/ - -typedef struct { - char *next; - char *private; -} infostruct; - -#define DRV_FLAG_RUNNING 1 -#define DRV_FLAG_REJBUS 2 -#define DRV_FLAG_LOADED 4 - -/* Description of hardware-level-driver */ -typedef struct _isdn_driver { - ulong online; /* Channel-Online flags */ - ulong flags; /* Misc driver Flags */ - int locks; /* Number of locks for this driver */ - int channels; /* Number of channels */ - wait_queue_head_t st_waitq; /* Wait-Queue for status-read's */ - int maxbufsize; /* Maximum Buffersize supported */ - unsigned long pktcount; /* Until now: unused */ - int stavail; /* Chars avail on Status-device */ - isdn_if *interface; /* Interface to driver */ - int *rcverr; /* Error-counters for B-Ch.-receive */ - int *rcvcount; /* Byte-counters for B-Ch.-receive */ -#ifdef CONFIG_ISDN_AUDIO - unsigned long DLEflag; /* Flags: Insert DLE at next read */ -#endif - struct sk_buff_head *rpqueue; /* Pointers to start of Rcv-Queue */ - wait_queue_head_t *rcv_waitq; /* Wait-Queues for B-Channel-Reads */ - wait_queue_head_t *snd_waitq; /* Wait-Queue for B-Channel-Send's */ - char msn2eaz[10][ISDN_MSNLEN]; /* Mapping-Table MSN->EAZ */ -} isdn_driver_t; - -/* Main driver-data */ -typedef struct isdn_devt { - struct module *owner; - spinlock_t lock; - unsigned short flags; /* Bitmapped Flags: */ - int drivers; /* Current number of drivers */ - int channels; /* Current number of channels */ - int net_verbose; /* Verbose-Flag */ - int modempoll; /* Flag: tty-read active */ - spinlock_t timerlock; - int tflags; /* Timer-Flags: */ - /* see ISDN_TIMER_..defines */ - int global_flags; - infostruct *infochain; /* List of open info-devs. */ - wait_queue_head_t info_waitq; /* Wait-Queue for isdninfo */ - struct timer_list timer; /* Misc.-function Timer */ - int chanmap[ISDN_MAX_CHANNELS]; /* Map minor->device-channel */ - int drvmap[ISDN_MAX_CHANNELS]; /* Map minor->driver-index */ - int usage[ISDN_MAX_CHANNELS]; /* Used by tty/ip/voice */ - char num[ISDN_MAX_CHANNELS][ISDN_MSNLEN]; - /* Remote number of active ch.*/ - int m_idx[ISDN_MAX_CHANNELS]; /* Index for mdm.... */ - isdn_driver_t *drv[ISDN_MAX_DRIVERS]; /* Array of drivers */ - isdn_net_dev *netdev; /* Linked list of net-if's */ - char drvid[ISDN_MAX_DRIVERS][20];/* Driver-ID */ - struct task_struct *profd; /* For iprofd */ - isdn_modem_t mdm; /* tty-driver-data */ - isdn_net_dev *rx_netdev[ISDN_MAX_CHANNELS]; /* rx netdev-pointers */ - isdn_net_dev *st_netdev[ISDN_MAX_CHANNELS]; /* stat netdev-pointers */ - ulong ibytes[ISDN_MAX_CHANNELS]; /* Statistics incoming bytes */ - ulong obytes[ISDN_MAX_CHANNELS]; /* Statistics outgoing bytes */ - int v110emu[ISDN_MAX_CHANNELS]; /* V.110 emulator-mode 0=none */ - atomic_t v110use[ISDN_MAX_CHANNELS]; /* Usage-Semaphore for stream */ - isdn_v110_stream *v110[ISDN_MAX_CHANNELS]; /* V.110 private data */ - struct mutex mtx; /* serialize list access*/ - unsigned long global_features; -} isdn_dev; - -extern isdn_dev *dev; - - -#endif /* __ISDN_H__ */ diff --git a/include/linux/isdn/hdlc.h b/include/linux/isdn/hdlc.h deleted file mode 100644 index fe2c1279c139..000000000000 --- a/include/linux/isdn/hdlc.h +++ /dev/null @@ -1,69 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * hdlc.h -- General purpose ISDN HDLC decoder. - * - * Implementation of a HDLC decoder/encoder in software. - * Necessary because some ISDN devices don't have HDLC - * controllers. - * - * Copyright (C) - * 2009 Karsten Keil <keil@b1-systems.de> - * 2002 Wolfgang Mües <wolfgang@iksw-muees.de> - * 2001 Frode Isaksen <fisaksen@bewan.com> - * 2001 Kai Germaschewski <kai.germaschewski@gmx.de> - */ - -#ifndef __ISDNHDLC_H__ -#define __ISDNHDLC_H__ - -struct isdnhdlc_vars { - int bit_shift; - int hdlc_bits1; - int data_bits; - int ffbit_shift; /* encoding only */ - int state; - int dstpos; - - u16 crc; - - u8 cbin; - u8 shift_reg; - u8 ffvalue; - - /* set if transferring data */ - u32 data_received:1; - /* set if D channel (send idle instead of flags) */ - u32 dchannel:1; - /* set if 56K adaptation */ - u32 do_adapt56:1; - /* set if in closing phase (need to send CRC + flag) */ - u32 do_closing:1; - /* set if data is bitreverse */ - u32 do_bitreverse:1; -}; - -/* Feature Flags */ -#define HDLC_56KBIT 0x01 -#define HDLC_DCHANNEL 0x02 -#define HDLC_BITREVERSE 0x04 - -/* - The return value from isdnhdlc_decode is - the frame length, 0 if no complete frame was decoded, - or a negative error number -*/ -#define HDLC_FRAMING_ERROR 1 -#define HDLC_CRC_ERROR 2 -#define HDLC_LENGTH_ERROR 3 - -extern void isdnhdlc_rcv_init(struct isdnhdlc_vars *hdlc, u32 features); - -extern int isdnhdlc_decode(struct isdnhdlc_vars *hdlc, const u8 *src, - int slen, int *count, u8 *dst, int dsize); - -extern void isdnhdlc_out_init(struct isdnhdlc_vars *hdlc, u32 features); - -extern int isdnhdlc_encode(struct isdnhdlc_vars *hdlc, const u8 *src, - u16 slen, int *count, u8 *dst, int dsize); - -#endif /* __ISDNHDLC_H__ */ diff --git a/include/linux/isdn_divertif.h b/include/linux/isdn_divertif.h deleted file mode 100644 index 19ab361f9f07..000000000000 --- a/include/linux/isdn_divertif.h +++ /dev/null @@ -1,35 +0,0 @@ -/* $Id: isdn_divertif.h,v 1.4.6.1 2001/09/23 22:25:05 kai Exp $ - * - * Header for the diversion supplementary interface for i4l. - * - * Author Werner Cornelius (werner@titro.de) - * Copyright by Werner Cornelius (werner@titro.de) - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - */ -#ifndef _LINUX_ISDN_DIVERTIF_H -#define _LINUX_ISDN_DIVERTIF_H - -#include <linux/isdnif.h> -#include <linux/types.h> -#include <uapi/linux/isdn_divertif.h> - -/***************************************************************/ -/* structure exchanging data between isdn hl and divert module */ -/***************************************************************/ -typedef struct - { ulong if_magic; /* magic info and version */ - int cmd; /* command */ - int (*stat_callback)(isdn_ctrl *); /* supplied by divert module when calling */ - int (*ll_cmd)(isdn_ctrl *); /* supplied by hl on return */ - char * (*drv_to_name)(int); /* map a driver id to name, supplied by hl */ - int (*name_to_drv)(char *); /* map a driver id to name, supplied by hl */ - } isdn_divert_if; - -/*********************/ -/* function register */ -/*********************/ -extern int DIVERT_REG_NAME(isdn_divert_if *); -#endif /* _LINUX_ISDN_DIVERTIF_H */ diff --git a/include/linux/isdn_ppp.h b/include/linux/isdn_ppp.h deleted file mode 100644 index a0070c6dfaf8..000000000000 --- a/include/linux/isdn_ppp.h +++ /dev/null @@ -1,194 +0,0 @@ -/* Linux ISDN subsystem, sync PPP, interface to ipppd - * - * Copyright 1994-1999 by Fritz Elfert (fritz@isdn4linux.de) - * Copyright 1995,96 Thinking Objects Software GmbH Wuerzburg - * Copyright 1995,96 by Michael Hipp (Michael.Hipp@student.uni-tuebingen.de) - * Copyright 2000-2002 by Kai Germaschewski (kai@germaschewski.name) - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - */ -#ifndef _LINUX_ISDN_PPP_H -#define _LINUX_ISDN_PPP_H - - - - -#ifdef CONFIG_IPPP_FILTER -#include <linux/filter.h> -#endif -#include <uapi/linux/isdn_ppp.h> - -#define DECOMP_ERR_NOMEM (-10) - -#define MP_END_FRAG 0x40 -#define MP_BEGIN_FRAG 0x80 - -#define MP_MAX_QUEUE_LEN 16 - -/* - * We need a way for the decompressor to influence the generation of CCP - * Reset-Requests in a variety of ways. The decompressor is already returning - * a lot of information (generated skb length, error conditions) so we use - * another parameter. This parameter is a pointer to a structure which is - * to be marked valid by the decompressor and only in this case is ever used. - * Furthermore, the only case where this data is used is when the decom- - * pressor returns DECOMP_ERROR. - * - * We use this same struct for the reset entry of the compressor to commu- - * nicate to its caller how to deal with sending of a Reset Ack. In this - * case, expra is not used, but other options still apply (suppressing - * sending with rsend, appending arbitrary data, etc). - */ - -#define IPPP_RESET_MAXDATABYTES 32 - -struct isdn_ppp_resetparams { - unsigned char valid:1; /* rw Is this structure filled at all ? */ - unsigned char rsend:1; /* rw Should we send one at all ? */ - unsigned char idval:1; /* rw Is the id field valid ? */ - unsigned char dtval:1; /* rw Is the data field valid ? */ - unsigned char expra:1; /* rw Is an Ack expected for this Req ? */ - unsigned char id; /* wo Send CCP ResetReq with this id */ - unsigned short maxdlen; /* ro Max bytes to be stored in data field */ - unsigned short dlen; /* rw Bytes stored in data field */ - unsigned char *data; /* wo Data for ResetReq info field */ -}; - -/* - * this is an 'old friend' from ppp-comp.h under a new name - * check the original include for more information - */ -struct isdn_ppp_compressor { - struct isdn_ppp_compressor *next, *prev; - struct module *owner; - int num; /* CCP compression protocol number */ - - void *(*alloc) (struct isdn_ppp_comp_data *); - void (*free) (void *state); - int (*init) (void *state, struct isdn_ppp_comp_data *, - int unit,int debug); - - /* The reset entry needs to get more exact information about the - ResetReq or ResetAck it was called with. The parameters are - obvious. If reset is called without a Req or Ack frame which - could be handed into it, code MUST be set to 0. Using rsparm, - the reset entry can control if and how a ResetAck is returned. */ - - void (*reset) (void *state, unsigned char code, unsigned char id, - unsigned char *data, unsigned len, - struct isdn_ppp_resetparams *rsparm); - - int (*compress) (void *state, struct sk_buff *in, - struct sk_buff *skb_out, int proto); - - int (*decompress) (void *state,struct sk_buff *in, - struct sk_buff *skb_out, - struct isdn_ppp_resetparams *rsparm); - - void (*incomp) (void *state, struct sk_buff *in,int proto); - void (*stat) (void *state, struct compstat *stats); -}; - -extern int isdn_ppp_register_compressor(struct isdn_ppp_compressor *); -extern int isdn_ppp_unregister_compressor(struct isdn_ppp_compressor *); -extern int isdn_ppp_dial_slave(char *); -extern int isdn_ppp_hangup_slave(char *); - -typedef struct { - unsigned long seqerrs; - unsigned long frame_drops; - unsigned long overflows; - unsigned long max_queue_len; -} isdn_mppp_stats; - -typedef struct { - int mp_mrru; /* unused */ - struct sk_buff * frags; /* fragments sl list -- use skb->next */ - long frames; /* number of frames in the frame list */ - unsigned int seq; /* last processed packet seq #: any packets - * with smaller seq # will be dropped - * unconditionally */ - spinlock_t lock; - int ref_ct; - /* statistics */ - isdn_mppp_stats stats; -} ippp_bundle; - -#define NUM_RCV_BUFFS 64 - -struct ippp_buf_queue { - struct ippp_buf_queue *next; - struct ippp_buf_queue *last; - char *buf; /* NULL here indicates end of queue */ - int len; -}; - -/* The data structure for one CCP reset transaction */ -enum ippp_ccp_reset_states { - CCPResetIdle, - CCPResetSentReq, - CCPResetRcvdReq, - CCPResetSentAck, - CCPResetRcvdAck -}; - -struct ippp_ccp_reset_state { - enum ippp_ccp_reset_states state; /* State of this transaction */ - struct ippp_struct *is; /* Backlink to device stuff */ - unsigned char id; /* Backlink id index */ - unsigned char ta:1; /* The timer is active (flag) */ - unsigned char expra:1; /* We expect a ResetAck at all */ - int dlen; /* Databytes stored in data */ - struct timer_list timer; /* For timeouts/retries */ - /* This is a hack but seems sufficient for the moment. We do not want - to have this be yet another allocation for some bytes, it is more - memory management overhead than the whole mess is worth. */ - unsigned char data[IPPP_RESET_MAXDATABYTES]; -}; - -/* The data structure keeping track of the currently outstanding CCP Reset - transactions. */ -struct ippp_ccp_reset { - struct ippp_ccp_reset_state *rs[256]; /* One per possible id */ - unsigned char lastid; /* Last id allocated by the engine */ -}; - -struct ippp_struct { - struct ippp_struct *next_link; - int state; - spinlock_t buflock; - struct ippp_buf_queue rq[NUM_RCV_BUFFS]; /* packet queue for isdn_ppp_read() */ - struct ippp_buf_queue *first; /* pointer to (current) first packet */ - struct ippp_buf_queue *last; /* pointer to (current) last used packet in queue */ - wait_queue_head_t wq; - struct task_struct *tk; - unsigned int mpppcfg; - unsigned int pppcfg; - unsigned int mru; - unsigned int mpmru; - unsigned int mpmtu; - unsigned int maxcid; - struct isdn_net_local_s *lp; - int unit; - int minor; - unsigned int last_link_seqno; - long mp_seqno; -#ifdef CONFIG_ISDN_PPP_VJ - unsigned char *cbuf; - struct slcompress *slcomp; -#endif -#ifdef CONFIG_IPPP_FILTER - struct bpf_prog *pass_filter; /* filter for packets to pass */ - struct bpf_prog *active_filter; /* filter for pkts to reset idle */ -#endif - unsigned long debug; - struct isdn_ppp_compressor *compressor,*decompressor; - struct isdn_ppp_compressor *link_compressor,*link_decompressor; - void *decomp_stat,*comp_stat,*link_decomp_stat,*link_comp_stat; - struct ippp_ccp_reset *reset; /* Allocated on demand, may never be needed */ - unsigned long compflags; -}; - -#endif /* _LINUX_ISDN_PPP_H */ diff --git a/include/linux/isdnif.h b/include/linux/isdnif.h deleted file mode 100644 index 8d80fdc68647..000000000000 --- a/include/linux/isdnif.h +++ /dev/null @@ -1,505 +0,0 @@ -/* $Id: isdnif.h,v 1.43.2.2 2004/01/12 23:08:35 keil Exp $ - * - * Linux ISDN subsystem - * Definition of the interface between the subsystem and its low-level drivers. - * - * Copyright 1994,95,96 by Fritz Elfert (fritz@isdn4linux.de) - * Copyright 1995,96 Thinking Objects Software GmbH Wuerzburg - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - */ -#ifndef __ISDNIF_H__ -#define __ISDNIF_H__ - - -#include <linux/skbuff.h> -#include <uapi/linux/isdnif.h> - -/***************************************************************************/ -/* Extensions made by Werner Cornelius (werner@ikt.de) */ -/* */ -/* The proceed command holds a incoming call in a state to leave processes */ -/* enough time to check whether ist should be accepted. */ -/* The PROT_IO Command extends the interface to make protocol dependent */ -/* features available (call diversion, call waiting...). */ -/* */ -/* The PROT_IO Command is executed with the desired driver id and the arg */ -/* parameter coded as follows: */ -/* The lower 8 bits of arg contain the desired protocol from ISDN_PTYPE */ -/* definitions. The upper 24 bits represent the protocol specific cmd/stat.*/ -/* Any additional data is protocol and command specific. */ -/* This mechanism also applies to the statcallb callback STAT_PROT. */ -/* */ -/* This suggested extension permits an easy expansion of protocol specific */ -/* handling. Extensions may be added at any time without changing the HL */ -/* driver code and not getting conflicts without certifications. */ -/* The well known CAPI 2.0 interface handles such extensions in a similar */ -/* way. Perhaps a protocol specific module may be added and separately */ -/* loaded and linked to the basic isdn module for handling. */ -/***************************************************************************/ - -/*****************/ -/* DSS1 commands */ -/*****************/ -#define DSS1_CMD_INVOKE ((0x00 << 8) | ISDN_PTYPE_EURO) /* invoke a supplementary service */ -#define DSS1_CMD_INVOKE_ABORT ((0x01 << 8) | ISDN_PTYPE_EURO) /* abort a invoke cmd */ - -/*******************************/ -/* DSS1 Status callback values */ -/*******************************/ -#define DSS1_STAT_INVOKE_RES ((0x80 << 8) | ISDN_PTYPE_EURO) /* Result for invocation */ -#define DSS1_STAT_INVOKE_ERR ((0x81 << 8) | ISDN_PTYPE_EURO) /* Error Return for invocation */ -#define DSS1_STAT_INVOKE_BRD ((0x82 << 8) | ISDN_PTYPE_EURO) /* Deliver invoke broadcast info */ - - -/*********************************************************************/ -/* structures for DSS1 commands and callback */ -/* */ -/* An action is invoked by sending a DSS1_CMD_INVOKE. The ll_id, proc*/ -/* timeout, datalen and data fields must be set before calling. */ -/* */ -/* The return value is a positive hl_id value also delivered in the */ -/* hl_id field. A value of zero signals no more left hl_id capacitys.*/ -/* A negative return value signals errors in LL. So if the return */ -/* value is <= 0 no action in LL will be taken -> request ignored */ -/* */ -/* The timeout field must be filled with a positive value specifying */ -/* the amount of time the INVOKED process waits for a reaction from */ -/* the network. */ -/* If a response (either error or result) is received during this */ -/* intervall, a reporting callback is initiated and the process will */ -/* be deleted, the hl identifier will be freed. */ -/* If no response is received during the specified intervall, a error*/ -/* callback is initiated with timeout set to -1 and a datalen set */ -/* to 0. */ -/* If timeout is set to a value <= 0 during INVOCATION the process is*/ -/* immediately deleted after sending the data. No callback occurs ! */ -/* */ -/* A currently waiting process may be aborted with INVOKE_ABORT. No */ -/* callback will occur when a process has been aborted. */ -/* */ -/* Broadcast invoke frames from the network are reported via the */ -/* STAT_INVOKE_BRD callback. The ll_id is set to 0, the other fields */ -/* are supplied by the network and not by the HL. */ -/*********************************************************************/ - -/*****************/ -/* NI1 commands */ -/*****************/ -#define NI1_CMD_INVOKE ((0x00 << 8) | ISDN_PTYPE_NI1) /* invoke a supplementary service */ -#define NI1_CMD_INVOKE_ABORT ((0x01 << 8) | ISDN_PTYPE_NI1) /* abort a invoke cmd */ - -/*******************************/ -/* NI1 Status callback values */ -/*******************************/ -#define NI1_STAT_INVOKE_RES ((0x80 << 8) | ISDN_PTYPE_NI1) /* Result for invocation */ -#define NI1_STAT_INVOKE_ERR ((0x81 << 8) | ISDN_PTYPE_NI1) /* Error Return for invocation */ -#define NI1_STAT_INVOKE_BRD ((0x82 << 8) | ISDN_PTYPE_NI1) /* Deliver invoke broadcast info */ - -typedef struct - { ulong ll_id; /* ID supplied by LL when executing */ - /* a command and returned by HL for */ - /* INVOKE_RES and INVOKE_ERR */ - int hl_id; /* ID supplied by HL when called */ - /* for executing a cmd and delivered */ - /* for results and errors */ - /* must be supplied by LL when aborting*/ - int proc; /* invoke procedure used by CMD_INVOKE */ - /* returned by callback and broadcast */ - int timeout; /* timeout for INVOKE CMD in ms */ - /* -1 in stat callback when timed out */ - /* error value when error callback */ - int datalen; /* length of cmd or stat data */ - u_char *data;/* pointer to data delivered or send */ - } isdn_cmd_stat; - -/* - * Commands from linklevel to lowlevel - * - */ -#define ISDN_CMD_IOCTL 0 /* Perform ioctl */ -#define ISDN_CMD_DIAL 1 /* Dial out */ -#define ISDN_CMD_ACCEPTD 2 /* Accept an incoming call on D-Chan. */ -#define ISDN_CMD_ACCEPTB 3 /* Request B-Channel connect. */ -#define ISDN_CMD_HANGUP 4 /* Hangup */ -#define ISDN_CMD_CLREAZ 5 /* Clear EAZ(s) of channel */ -#define ISDN_CMD_SETEAZ 6 /* Set EAZ(s) of channel */ -#define ISDN_CMD_GETEAZ 7 /* Get EAZ(s) of channel */ -#define ISDN_CMD_SETSIL 8 /* Set Service-Indicator-List of channel */ -#define ISDN_CMD_GETSIL 9 /* Get Service-Indicator-List of channel */ -#define ISDN_CMD_SETL2 10 /* Set B-Chan. Layer2-Parameter */ -#define ISDN_CMD_GETL2 11 /* Get B-Chan. Layer2-Parameter */ -#define ISDN_CMD_SETL3 12 /* Set B-Chan. Layer3-Parameter */ -#define ISDN_CMD_GETL3 13 /* Get B-Chan. Layer3-Parameter */ -// #define ISDN_CMD_LOCK 14 /* Signal usage by upper levels */ -// #define ISDN_CMD_UNLOCK 15 /* Release usage-lock */ -#define ISDN_CMD_SUSPEND 16 /* Suspend connection */ -#define ISDN_CMD_RESUME 17 /* Resume connection */ -#define ISDN_CMD_PROCEED 18 /* Proceed with call establishment */ -#define ISDN_CMD_ALERT 19 /* Alert after Proceeding */ -#define ISDN_CMD_REDIR 20 /* Redir a incoming call */ -#define ISDN_CMD_PROT_IO 21 /* Protocol specific commands */ -#define CAPI_PUT_MESSAGE 22 /* CAPI message send down or up */ -#define ISDN_CMD_FAXCMD 23 /* FAX commands to HL-driver */ -#define ISDN_CMD_AUDIO 24 /* DSP, DTMF, ... settings */ - -/* - * Status-Values delivered from lowlevel to linklevel via - * statcallb(). - * - */ -#define ISDN_STAT_STAVAIL 256 /* Raw status-data available */ -#define ISDN_STAT_ICALL 257 /* Incoming call detected */ -#define ISDN_STAT_RUN 258 /* Signal protocol-code is running */ -#define ISDN_STAT_STOP 259 /* Signal halt of protocol-code */ -#define ISDN_STAT_DCONN 260 /* Signal D-Channel connect */ -#define ISDN_STAT_BCONN 261 /* Signal B-Channel connect */ -#define ISDN_STAT_DHUP 262 /* Signal D-Channel disconnect */ -#define ISDN_STAT_BHUP 263 /* Signal B-Channel disconnect */ -#define ISDN_STAT_CINF 264 /* Charge-Info */ -#define ISDN_STAT_LOAD 265 /* Signal new lowlevel-driver is loaded */ -#define ISDN_STAT_UNLOAD 266 /* Signal unload of lowlevel-driver */ -#define ISDN_STAT_BSENT 267 /* Signal packet sent */ -#define ISDN_STAT_NODCH 268 /* Signal no D-Channel */ -#define ISDN_STAT_ADDCH 269 /* Add more Channels */ -#define ISDN_STAT_CAUSE 270 /* Cause-Message */ -#define ISDN_STAT_ICALLW 271 /* Incoming call without B-chan waiting */ -#define ISDN_STAT_REDIR 272 /* Redir result */ -#define ISDN_STAT_PROT 273 /* protocol IO specific callback */ -#define ISDN_STAT_DISPLAY 274 /* deliver a received display message */ -#define ISDN_STAT_L1ERR 275 /* Signal Layer-1 Error */ -#define ISDN_STAT_FAXIND 276 /* FAX indications from HL-driver */ -#define ISDN_STAT_AUDIO 277 /* DTMF, DSP indications */ -#define ISDN_STAT_DISCH 278 /* Disable/Enable channel usage */ - -/* - * Audio commands - */ -#define ISDN_AUDIO_SETDD 0 /* Set DTMF detection */ -#define ISDN_AUDIO_DTMF 1 /* Rx/Tx DTMF */ - -/* - * Values for errcode field - */ -#define ISDN_STAT_L1ERR_SEND 1 -#define ISDN_STAT_L1ERR_RECV 2 - -/* - * Values for feature-field of interface-struct. - */ -/* Layer 2 */ -#define ISDN_FEATURE_L2_X75I (0x0001 << ISDN_PROTO_L2_X75I) -#define ISDN_FEATURE_L2_X75UI (0x0001 << ISDN_PROTO_L2_X75UI) -#define ISDN_FEATURE_L2_X75BUI (0x0001 << ISDN_PROTO_L2_X75BUI) -#define ISDN_FEATURE_L2_HDLC (0x0001 << ISDN_PROTO_L2_HDLC) -#define ISDN_FEATURE_L2_TRANS (0x0001 << ISDN_PROTO_L2_TRANS) -#define ISDN_FEATURE_L2_X25DTE (0x0001 << ISDN_PROTO_L2_X25DTE) -#define ISDN_FEATURE_L2_X25DCE (0x0001 << ISDN_PROTO_L2_X25DCE) -#define ISDN_FEATURE_L2_V11096 (0x0001 << ISDN_PROTO_L2_V11096) -#define ISDN_FEATURE_L2_V11019 (0x0001 << ISDN_PROTO_L2_V11019) -#define ISDN_FEATURE_L2_V11038 (0x0001 << ISDN_PROTO_L2_V11038) -#define ISDN_FEATURE_L2_MODEM (0x0001 << ISDN_PROTO_L2_MODEM) -#define ISDN_FEATURE_L2_FAX (0x0001 << ISDN_PROTO_L2_FAX) -#define ISDN_FEATURE_L2_HDLC_56K (0x0001 << ISDN_PROTO_L2_HDLC_56K) - -#define ISDN_FEATURE_L2_MASK (0x0FFFF) /* Max. 16 protocols */ -#define ISDN_FEATURE_L2_SHIFT (0) - -/* Layer 3 */ -#define ISDN_FEATURE_L3_TRANS (0x10000 << ISDN_PROTO_L3_TRANS) -#define ISDN_FEATURE_L3_TRANSDSP (0x10000 << ISDN_PROTO_L3_TRANSDSP) -#define ISDN_FEATURE_L3_FCLASS2 (0x10000 << ISDN_PROTO_L3_FCLASS2) -#define ISDN_FEATURE_L3_FCLASS1 (0x10000 << ISDN_PROTO_L3_FCLASS1) - -#define ISDN_FEATURE_L3_MASK (0x0FF0000) /* Max. 8 Protocols */ -#define ISDN_FEATURE_L3_SHIFT (16) - -/* Signaling */ -#define ISDN_FEATURE_P_UNKNOWN (0x1000000 << ISDN_PTYPE_UNKNOWN) -#define ISDN_FEATURE_P_1TR6 (0x1000000 << ISDN_PTYPE_1TR6) -#define ISDN_FEATURE_P_EURO (0x1000000 << ISDN_PTYPE_EURO) -#define ISDN_FEATURE_P_NI1 (0x1000000 << ISDN_PTYPE_NI1) - -#define ISDN_FEATURE_P_MASK (0x0FF000000) /* Max. 8 Protocols */ -#define ISDN_FEATURE_P_SHIFT (24) - -typedef struct setup_parm { - unsigned char phone[32]; /* Remote Phone-Number */ - unsigned char eazmsn[32]; /* Local EAZ or MSN */ - unsigned char si1; /* Service Indicator 1 */ - unsigned char si2; /* Service Indicator 2 */ - unsigned char plan; /* Numbering plan */ - unsigned char screen; /* Screening info */ -} setup_parm; - - -#ifdef CONFIG_ISDN_TTY_FAX -/* T.30 Fax G3 */ - -#define FAXIDLEN 21 - -typedef struct T30_s { - /* session parameters */ - __u8 resolution; - __u8 rate; - __u8 width; - __u8 length; - __u8 compression; - __u8 ecm; - __u8 binary; - __u8 scantime; - __u8 id[FAXIDLEN]; - /* additional parameters */ - __u8 phase; - __u8 direction; - __u8 code; - __u8 badlin; - __u8 badmul; - __u8 bor; - __u8 fet; - __u8 pollid[FAXIDLEN]; - __u8 cq; - __u8 cr; - __u8 ctcrty; - __u8 minsp; - __u8 phcto; - __u8 rel; - __u8 nbc; - /* remote station parameters */ - __u8 r_resolution; - __u8 r_rate; - __u8 r_width; - __u8 r_length; - __u8 r_compression; - __u8 r_ecm; - __u8 r_binary; - __u8 r_scantime; - __u8 r_id[FAXIDLEN]; - __u8 r_code; -} __packed T30_s; - -#define ISDN_TTY_FAX_CONN_IN 0 -#define ISDN_TTY_FAX_CONN_OUT 1 - -#define ISDN_TTY_FAX_FCON 0 -#define ISDN_TTY_FAX_DIS 1 -#define ISDN_TTY_FAX_FTT 2 -#define ISDN_TTY_FAX_MCF 3 -#define ISDN_TTY_FAX_DCS 4 -#define ISDN_TTY_FAX_TRAIN_OK 5 -#define ISDN_TTY_FAX_EOP 6 -#define ISDN_TTY_FAX_EOM 7 -#define ISDN_TTY_FAX_MPS 8 -#define ISDN_TTY_FAX_DTC 9 -#define ISDN_TTY_FAX_RID 10 -#define ISDN_TTY_FAX_HNG 11 -#define ISDN_TTY_FAX_DT 12 -#define ISDN_TTY_FAX_FCON_I 13 -#define ISDN_TTY_FAX_DR 14 -#define ISDN_TTY_FAX_ET 15 -#define ISDN_TTY_FAX_CFR 16 -#define ISDN_TTY_FAX_PTS 17 -#define ISDN_TTY_FAX_SENT 18 - -#define ISDN_FAX_PHASE_IDLE 0 -#define ISDN_FAX_PHASE_A 1 -#define ISDN_FAX_PHASE_B 2 -#define ISDN_FAX_PHASE_C 3 -#define ISDN_FAX_PHASE_D 4 -#define ISDN_FAX_PHASE_E 5 - -#endif /* TTY_FAX */ - -#define ISDN_FAX_CLASS1_FAE 0 -#define ISDN_FAX_CLASS1_FTS 1 -#define ISDN_FAX_CLASS1_FRS 2 -#define ISDN_FAX_CLASS1_FTM 3 -#define ISDN_FAX_CLASS1_FRM 4 -#define ISDN_FAX_CLASS1_FTH 5 -#define ISDN_FAX_CLASS1_FRH 6 -#define ISDN_FAX_CLASS1_CTRL 7 - -#define ISDN_FAX_CLASS1_OK 0 -#define ISDN_FAX_CLASS1_CONNECT 1 -#define ISDN_FAX_CLASS1_NOCARR 2 -#define ISDN_FAX_CLASS1_ERROR 3 -#define ISDN_FAX_CLASS1_FCERROR 4 -#define ISDN_FAX_CLASS1_QUERY 5 - -typedef struct { - __u8 cmd; - __u8 subcmd; - __u8 para[50]; -} aux_s; - -#define AT_COMMAND 0 -#define AT_EQ_VALUE 1 -#define AT_QUERY 2 -#define AT_EQ_QUERY 3 - -/* CAPI structs */ - -/* this is compatible to the old union size */ -#define MAX_CAPI_PARA_LEN 50 - -typedef struct { - /* Header */ - __u16 Length; - __u16 ApplId; - __u8 Command; - __u8 Subcommand; - __u16 Messagenumber; - - /* Parameter */ - union { - __u32 Controller; - __u32 PLCI; - __u32 NCCI; - } adr; - __u8 para[MAX_CAPI_PARA_LEN]; -} capi_msg; - -/* - * Structure for exchanging above infos - * - */ -typedef struct { - int driver; /* Lowlevel-Driver-ID */ - int command; /* Command or Status (see above) */ - ulong arg; /* Additional Data */ - union { - ulong errcode; /* Type of error with STAT_L1ERR */ - int length; /* Amount of bytes sent with STAT_BSENT */ - u_char num[50]; /* Additional Data */ - setup_parm setup;/* For SETUP msg */ - capi_msg cmsg; /* For CAPI like messages */ - char display[85];/* display message data */ - isdn_cmd_stat isdn_io; /* ISDN IO-parameter/result */ - aux_s aux; /* for modem commands/indications */ -#ifdef CONFIG_ISDN_TTY_FAX - T30_s *fax; /* Pointer to ttys fax struct */ -#endif - ulong userdata; /* User Data */ - } parm; -} isdn_ctrl; - -#define dss1_io isdn_io -#define ni1_io isdn_io - -/* - * The interface-struct itself (initialized at load-time of lowlevel-driver) - * - * See Documentation/isdn/INTERFACE for a description, how the communication - * between the ISDN subsystem and its drivers is done. - * - */ -typedef struct { - struct module *owner; - - /* Number of channels supported by this driver - */ - int channels; - - /* - * Maximum Size of transmit/receive-buffer this driver supports. - */ - int maxbufsize; - - /* Feature-Flags for this driver. - * See defines ISDN_FEATURE_... for Values - */ - unsigned long features; - - /* - * Needed for calculating - * dev->hard_header_len = linklayer header + hl_hdrlen; - * Drivers, not supporting sk_buff's should set this to 0. - */ - unsigned short hl_hdrlen; - - /* - * Receive-Callback using sk_buff's - * Parameters: - * int Driver-ID - * int local channel-number (0 ...) - * struct sk_buff *skb received Data - */ - void (*rcvcallb_skb)(int, int, struct sk_buff *); - - /* Status-Callback - * Parameters: - * isdn_ctrl* - * driver = Driver ID. - * command = One of above ISDN_STAT_... constants. - * arg = depending on status-type. - * num = depending on status-type. - */ - int (*statcallb)(isdn_ctrl*); - - /* Send command - * Parameters: - * isdn_ctrl* - * driver = Driver ID. - * command = One of above ISDN_CMD_... constants. - * arg = depending on command. - * num = depending on command. - */ - int (*command)(isdn_ctrl*); - - /* - * Send data using sk_buff's - * Parameters: - * int driverId - * int local channel-number (0...) - * int Flag: Need ACK for this packet. - * struct sk_buff *skb Data to send - */ - int (*writebuf_skb) (int, int, int, struct sk_buff *); - - /* Send raw D-Channel-Commands - * Parameters: - * u_char pointer data - * int length of data - * int driverId - * int local channel-number (0 ...) - */ - int (*writecmd)(const u_char __user *, int, int, int); - - /* Read raw Status replies - * u_char pointer data (volatile) - * int length of buffer - * int driverId - * int local channel-number (0 ...) - */ - int (*readstat)(u_char __user *, int, int, int); - - char id[20]; -} isdn_if; - -/* - * Function which must be called by lowlevel-driver at loadtime with - * the following fields of above struct set: - * - * channels Number of channels that will be supported. - * hl_hdrlen Space to preserve in sk_buff's when sending. Drivers, not - * supporting sk_buff's should set this to 0. - * command Address of Command-Handler. - * features Bitwise coded Features of this driver. (use ISDN_FEATURE_...) - * writebuf_skb Address of Skbuff-Send-Handler. - * writecmd " " D-Channel " which accepts raw D-Ch-Commands. - * readstat " " D-Channel " which delivers raw Status-Data. - * - * The linklevel-driver fills the following fields: - * - * channels Driver-ID assigned to this driver. (Must be used on all - * subsequent callbacks. - * rcvcallb_skb Address of handler for received Skbuff's. - * statcallb " " " for status-changes. - * - */ -extern int register_isdn(isdn_if*); -#include <linux/uaccess.h> - -#endif /* __ISDNIF_H__ */ diff --git a/include/linux/iversion.h b/include/linux/iversion.h index be50ef7cedab..2917ef990d43 100644 --- a/include/linux/iversion.h +++ b/include/linux/iversion.h @@ -113,6 +113,30 @@ inode_peek_iversion_raw(const struct inode *inode) } /** + * inode_set_max_iversion_raw - update i_version new value is larger + * @inode: inode to set + * @val: new i_version to set + * + * Some self-managed filesystems (e.g Ceph) will only update the i_version + * value if the new value is larger than the one we already have. + */ +static inline void +inode_set_max_iversion_raw(struct inode *inode, u64 val) +{ + u64 cur, old; + + cur = inode_peek_iversion_raw(inode); + for (;;) { + if (cur > val) + break; + old = atomic64_cmpxchg(&inode->i_version, cur, val); + if (likely(old == cur)) + break; + cur = old; + } +} + +/** * inode_set_iversion - set i_version to a particular value * @inode: inode to set * @val: new i_version value to set diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 5c04181b7c6d..df03825ad1a1 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -451,6 +451,22 @@ struct jbd2_inode { * @i_flags: Flags of inode [j_list_lock] */ unsigned long i_flags; + + /** + * @i_dirty_start: + * + * Offset in bytes where the dirty range for this inode starts. + * [j_list_lock] + */ + loff_t i_dirty_start; + + /** + * @i_dirty_end: + * + * Inclusive offset in bytes where the dirty range for this inode + * ends. [j_list_lock] + */ + loff_t i_dirty_end; }; struct jbd2_revoke_table_s; @@ -1357,7 +1373,6 @@ void jbd2_journal_set_triggers(struct buffer_head *, struct jbd2_buffer_trigger_type *type); extern int jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *); extern int jbd2_journal_forget (handle_t *, struct buffer_head *); -extern void journal_sync_buffer (struct buffer_head *); extern int jbd2_journal_invalidatepage(journal_t *, struct page *, unsigned int, unsigned int); extern int jbd2_journal_try_to_free_buffers(journal_t *, struct page *, gfp_t); @@ -1397,6 +1412,12 @@ extern int jbd2_journal_force_commit(journal_t *); extern int jbd2_journal_force_commit_nested(journal_t *); extern int jbd2_journal_inode_add_write(handle_t *handle, struct jbd2_inode *inode); extern int jbd2_journal_inode_add_wait(handle_t *handle, struct jbd2_inode *inode); +extern int jbd2_journal_inode_ranged_write(handle_t *handle, + struct jbd2_inode *inode, loff_t start_byte, + loff_t length); +extern int jbd2_journal_inode_ranged_wait(handle_t *handle, + struct jbd2_inode *inode, loff_t start_byte, + loff_t length); extern int jbd2_journal_begin_ordered_truncate(journal_t *journal, struct jbd2_inode *inode, loff_t new_size); extern void jbd2_journal_init_jbd_inode(struct jbd2_inode *jinode, struct inode *inode); diff --git a/include/linux/jhash.h b/include/linux/jhash.h index 8037850f3104..ba2f6a9776b6 100644 --- a/include/linux/jhash.h +++ b/include/linux/jhash.h @@ -17,7 +17,7 @@ * if SELF_TEST is defined. You can use this free for any purpose. It's in * the public domain. It has no warranty. * - * Copyright (C) 2009-2010 Jozsef Kadlecsik (kadlec@blackhole.kfki.hu) + * Copyright (C) 2009-2010 Jozsef Kadlecsik (kadlec@netfilter.org) * * I've modified Bob's hash to be useful in the Linux kernel, and * any bugs present are my fault. diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 3e113a1fa0f1..3526c0aee954 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -215,6 +215,9 @@ extern void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type); extern void arch_jump_label_transform_static(struct jump_entry *entry, enum jump_label_type type); +extern bool arch_jump_label_transform_queue(struct jump_entry *entry, + enum jump_label_type type); +extern void arch_jump_label_transform_apply(void); extern int jump_label_text_reserved(void *start, void *end); extern void static_key_slow_inc(struct static_key *key); extern void static_key_slow_dec(struct static_key *key); diff --git a/include/linux/jump_label_ratelimit.h b/include/linux/jump_label_ratelimit.h index 42710d5949ba..8c3ee291b2d8 100644 --- a/include/linux/jump_label_ratelimit.h +++ b/include/linux/jump_label_ratelimit.h @@ -60,8 +60,6 @@ extern void jump_label_update_timeout(struct work_struct *work); 0), \ } -#define static_branch_deferred_inc(x) static_branch_inc(&(x)->key) - #else /* !CONFIG_JUMP_LABEL */ struct static_key_deferred { struct static_key key; @@ -95,4 +93,7 @@ jump_label_rate_limit(struct static_key_deferred *key, STATIC_KEY_CHECK_USE(key); } #endif /* CONFIG_JUMP_LABEL */ + +#define static_branch_deferred_inc(x) static_branch_inc(&(x)->key) + #endif /* _LINUX_JUMP_LABEL_RATELIMIT_H */ diff --git a/include/linux/kasan-checks.h b/include/linux/kasan-checks.h index a61dc075e2ce..ac6aba632f2d 100644 --- a/include/linux/kasan-checks.h +++ b/include/linux/kasan-checks.h @@ -2,14 +2,43 @@ #ifndef _LINUX_KASAN_CHECKS_H #define _LINUX_KASAN_CHECKS_H -#if defined(__SANITIZE_ADDRESS__) || defined(__KASAN_INTERNAL) -void kasan_check_read(const volatile void *p, unsigned int size); -void kasan_check_write(const volatile void *p, unsigned int size); +#include <linux/types.h> + +/* + * __kasan_check_*: Always available when KASAN is enabled. This may be used + * even in compilation units that selectively disable KASAN, but must use KASAN + * to validate access to an address. Never use these in header files! + */ +#ifdef CONFIG_KASAN +bool __kasan_check_read(const volatile void *p, unsigned int size); +bool __kasan_check_write(const volatile void *p, unsigned int size); +#else +static inline bool __kasan_check_read(const volatile void *p, unsigned int size) +{ + return true; +} +static inline bool __kasan_check_write(const volatile void *p, unsigned int size) +{ + return true; +} +#endif + +/* + * kasan_check_*: Only available when the particular compilation unit has KASAN + * instrumentation enabled. May be used in header files. + */ +#ifdef __SANITIZE_ADDRESS__ +#define kasan_check_read __kasan_check_read +#define kasan_check_write __kasan_check_write #else -static inline void kasan_check_read(const volatile void *p, unsigned int size) -{ } -static inline void kasan_check_write(const volatile void *p, unsigned int size) -{ } +static inline bool kasan_check_read(const volatile void *p, unsigned int size) +{ + return true; +} +static inline bool kasan_check_write(const volatile void *p, unsigned int size) +{ + return true; +} #endif #endif diff --git a/include/linux/kasan.h b/include/linux/kasan.h index b40ea104dd36..cc8a03cc9674 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -76,8 +76,11 @@ void kasan_free_shadow(const struct vm_struct *vm); int kasan_add_zero_shadow(void *start, unsigned long size); void kasan_remove_zero_shadow(void *start, unsigned long size); -size_t ksize(const void *); -static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); } +size_t __ksize(const void *); +static inline void kasan_unpoison_slab(const void *ptr) +{ + kasan_unpoison_shadow(ptr, __ksize(ptr)); +} size_t kasan_metadata_size(struct kmem_cache *cache); bool kasan_save_enable_multi_shot(void); diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 0c9bc231107f..4fa360a13c1e 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -88,6 +88,8 @@ */ #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) +#define typeof_member(T, m) typeof(((T*)0)->m) + #define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP #define DIV_ROUND_DOWN_ULL(ll, d) \ diff --git a/include/linux/key-type.h b/include/linux/key-type.h index 331cab70db09..4ded94bcf274 100644 --- a/include/linux/key-type.h +++ b/include/linux/key-type.h @@ -70,6 +70,9 @@ struct key_type { */ size_t def_datalen; + unsigned int flags; +#define KEY_TYPE_NET_DOMAIN 0x00000001 /* Keys of this type have a net namespace domain */ + /* vet a description */ int (*vet_description)(const char *description); diff --git a/include/linux/key.h b/include/linux/key.h index 1c8b88b455ef..91f391cd272e 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -31,6 +31,7 @@ typedef int32_t key_serial_t; typedef uint32_t key_perm_t; struct key; +struct net; #ifdef CONFIG_KEYS @@ -77,13 +78,34 @@ struct cred; struct key_type; struct key_owner; +struct key_tag; struct keyring_list; struct keyring_name; +struct key_tag { + struct rcu_head rcu; + refcount_t usage; + bool removed; /* T when subject removed */ +}; + struct keyring_index_key { + /* [!] If this structure is altered, the union in struct key must change too! */ + unsigned long hash; /* Hash value */ + union { + struct { +#ifdef __LITTLE_ENDIAN /* Put desc_len at the LSB of x */ + u8 desc_len; + char desc[sizeof(long) - 1]; /* First few chars of description */ +#else + char desc[sizeof(long) - 1]; /* First few chars of description */ + u8 desc_len; +#endif + }; + unsigned long x; + }; struct key_type *type; + struct key_tag *domain_tag; /* Domain of operation */ const char *description; - size_t desc_len; }; union key_payload { @@ -197,7 +219,10 @@ struct key { union { struct keyring_index_key index_key; struct { + unsigned long hash; + unsigned long len_desc; struct key_type *type; /* type of key */ + struct key_tag *domain_tag; /* Domain of operation */ char *description; }; }; @@ -248,6 +273,8 @@ extern struct key *key_alloc(struct key_type *type, extern void key_revoke(struct key *key); extern void key_invalidate(struct key *key); extern void key_put(struct key *key); +extern bool key_put_tag(struct key_tag *tag); +extern void key_remove_domain(struct key_tag *domain_tag); static inline struct key *__key_get(struct key *key) { @@ -265,26 +292,56 @@ static inline void key_ref_put(key_ref_t key_ref) key_put(key_ref_to_ptr(key_ref)); } -extern struct key *request_key(struct key_type *type, - const char *description, - const char *callout_info); +extern struct key *request_key_tag(struct key_type *type, + const char *description, + struct key_tag *domain_tag, + const char *callout_info); + +extern struct key *request_key_rcu(struct key_type *type, + const char *description, + struct key_tag *domain_tag); extern struct key *request_key_with_auxdata(struct key_type *type, const char *description, + struct key_tag *domain_tag, const void *callout_info, size_t callout_len, void *aux); -extern struct key *request_key_async(struct key_type *type, - const char *description, - const void *callout_info, - size_t callout_len); +/** + * request_key - Request a key and wait for construction + * @type: Type of key. + * @description: The searchable description of the key. + * @callout_info: The data to pass to the instantiation upcall (or NULL). + * + * As for request_key_tag(), but with the default global domain tag. + */ +static inline struct key *request_key(struct key_type *type, + const char *description, + const char *callout_info) +{ + return request_key_tag(type, description, NULL, callout_info); +} -extern struct key *request_key_async_with_auxdata(struct key_type *type, - const char *description, - const void *callout_info, - size_t callout_len, - void *aux); +#ifdef CONFIG_NET +/* + * request_key_net - Request a key for a net namespace and wait for construction + * @type: Type of key. + * @description: The searchable description of the key. + * @net: The network namespace that is the key's domain of operation. + * @callout_info: The data to pass to the instantiation upcall (or NULL). + * + * As for request_key() except that it does not add the returned key to a + * keyring if found, new keys are always allocated in the user's quota, the + * callout_info must be a NUL-terminated string and no auxiliary data can be + * passed. Only keys that operate the specified network namespace are used. + * + * Furthermore, it then works as wait_for_key_construction() to wait for the + * completion of keys undergoing construction with a non-interruptible wait. + */ +#define request_key_net(type, description, net, callout_info) \ + request_key_tag(type, description, net->key_domain, callout_info); +#endif /* CONFIG_NET */ extern int wait_for_key_construction(struct key *key, bool intr); @@ -305,6 +362,11 @@ extern int key_update(key_ref_t key, extern int key_link(struct key *keyring, struct key *key); +extern int key_move(struct key *key, + struct key *from_keyring, + struct key *to_keyring, + unsigned int flags); + extern int key_unlink(struct key *keyring, struct key *key); @@ -324,7 +386,8 @@ extern int keyring_clear(struct key *keyring); extern key_ref_t keyring_search(key_ref_t keyring, struct key_type *type, - const char *description); + const char *description, + bool recurse); extern int keyring_add_key(struct key *keyring, struct key *key); @@ -343,6 +406,7 @@ extern void key_set_timeout(struct key *, unsigned); extern key_ref_t lookup_user_key(key_serial_t id, unsigned long flags, key_perm_t perm); +extern void key_free_user_ns(struct user_namespace *); /* * The permissions required on a key that we're looking up. @@ -397,8 +461,8 @@ extern struct ctl_table key_sysctls[]; * the userspace interface */ extern int install_thread_keyring_to_cred(struct cred *cred); -extern void key_fsuid_changed(struct task_struct *tsk); -extern void key_fsgid_changed(struct task_struct *tsk); +extern void key_fsuid_changed(struct cred *new_cred); +extern void key_fsgid_changed(struct cred *new_cred); extern void key_init(void); #else /* CONFIG_KEYS */ @@ -413,9 +477,11 @@ extern void key_init(void); #define make_key_ref(k, p) NULL #define key_ref_to_ptr(k) NULL #define is_key_possessed(k) 0 -#define key_fsuid_changed(t) do { } while(0) -#define key_fsgid_changed(t) do { } while(0) +#define key_fsuid_changed(c) do { } while(0) +#define key_fsgid_changed(c) do { } while(0) #define key_init() do { } while(0) +#define key_free_user_ns(ns) do { } while(0) +#define key_remove_domain(d) do { } while(0) #endif /* CONFIG_KEYS */ #endif /* __KERNEL__ */ diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 443d9800ca3f..04bdaf01112c 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -458,4 +458,23 @@ static inline bool is_kprobe_optinsn_slot(unsigned long addr) } #endif +/* Returns true if kprobes handled the fault */ +static nokprobe_inline bool kprobe_page_fault(struct pt_regs *regs, + unsigned int trap) +{ + if (!kprobes_built_in()) + return false; + if (user_mode(regs)) + return false; + /* + * To be potentially processing a kprobe fault and to be allowed + * to call kprobe_running(), we have to be non-preemptible. + */ + if (preemptible()) + return false; + if (!kprobe_running()) + return false; + return kprobe_fault_handler(regs, trap); +} + #endif /* _LINUX_KPROBES_H */ diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index d1ad38a3f048..fcb46b3374c6 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -159,7 +159,7 @@ static inline bool is_error_page(struct page *page) extern struct kmem_cache *kvm_vcpu_cache; -extern spinlock_t kvm_lock; +extern struct mutex kvm_lock; extern struct list_head vm_list; struct kvm_io_range { @@ -318,6 +318,7 @@ struct kvm_vcpu { } spin_loop; #endif bool preempted; + bool ready; struct kvm_vcpu_arch arch; struct dentry *debugfs_dentry; }; @@ -860,17 +861,19 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); -bool kvm_arch_has_vcpu_debugfs(void); -int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu); +#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS +void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu); +#endif int kvm_arch_hardware_enable(void); void kvm_arch_hardware_disable(void); int kvm_arch_hardware_setup(void); void kvm_arch_hardware_unsetup(void); -void kvm_arch_check_processor_compat(void *rtn); +int kvm_arch_check_processor_compat(void); int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu); int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); +bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu); #ifndef __KVM_HAVE_ARCH_VM_ALLOC /* @@ -990,6 +993,7 @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm, struct kvm_irq_ack_notifier *kian); int kvm_request_irq_source_id(struct kvm *kvm); void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); +bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args); /* * search_memslots() and __gfn_to_memslot() are here because they are diff --git a/include/linux/leds-ti-lmu-common.h b/include/linux/leds-ti-lmu-common.h new file mode 100644 index 000000000000..5eb111f38803 --- /dev/null +++ b/include/linux/leds-ti-lmu-common.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// TI LMU Common Core +// Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/ + +#ifndef _TI_LMU_COMMON_H_ +#define _TI_LMU_COMMON_H_ + +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/init.h> +#include <linux/leds.h> +#include <linux/module.h> +#include <linux/regmap.h> +#include <linux/slab.h> +#include <uapi/linux/uleds.h> + +#define LMU_11BIT_LSB_MASK (BIT(0) | BIT(1) | BIT(2)) +#define LMU_11BIT_MSB_SHIFT 3 + +#define MAX_BRIGHTNESS_8BIT 255 +#define MAX_BRIGHTNESS_11BIT 2047 + +struct ti_lmu_bank { + struct regmap *regmap; + + int max_brightness; + + u8 lsb_brightness_reg; + u8 msb_brightness_reg; + + u8 runtime_ramp_reg; + u32 ramp_up_usec; + u32 ramp_down_usec; +}; + +int ti_lmu_common_set_brightness(struct ti_lmu_bank *lmu_bank, int brightness); + +int ti_lmu_common_set_ramp(struct ti_lmu_bank *lmu_bank); + +int ti_lmu_common_get_ramp_params(struct device *dev, + struct fwnode_handle *child, + struct ti_lmu_bank *lmu_data); + +int ti_lmu_common_get_brt_res(struct device *dev, struct fwnode_handle *child, + struct ti_lmu_bank *lmu_data); + +#endif /* _TI_LMU_COMMON_H_ */ diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index 03d5c3aece9d..7a64b3ddb408 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -11,6 +11,7 @@ #include <linux/types.h> #include <linux/uuid.h> #include <linux/spinlock.h> +#include <linux/bio.h> struct badrange_entry { u64 start; @@ -57,6 +58,9 @@ enum { */ ND_REGION_PERSIST_MEMCTRL = 2, + /* Platform provides asynchronous flush mechanism */ + ND_REGION_ASYNC = 3, + /* mark newly adjusted resources as requiring a label update */ DPA_RESOURCE_ADJUSTED = 1 << 0, }; @@ -113,6 +117,7 @@ struct nd_mapping_desc { int position; }; +struct nd_region; struct nd_region_desc { struct resource *res; struct nd_mapping_desc *mapping; @@ -125,6 +130,7 @@ struct nd_region_desc { int target_node; unsigned long flags; struct device_node *of_node; + int (*flush)(struct nd_region *nd_region, struct bio *bio); }; struct device; @@ -252,10 +258,12 @@ unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr); unsigned int nd_region_acquire_lane(struct nd_region *nd_region); void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane); u64 nd_fletcher64(void *addr, size_t len, bool le); -void nvdimm_flush(struct nd_region *nd_region); +int nvdimm_flush(struct nd_region *nd_region, struct bio *bio); +int generic_nvdimm_flush(struct nd_region *nd_region); int nvdimm_has_flush(struct nd_region *nd_region); int nvdimm_has_cache(struct nd_region *nd_region); int nvdimm_in_overwrite(struct nvdimm *nvdimm); +bool is_nvdimm_sync(struct nd_region *nd_region); static inline int nvdimm_ctl(struct nvdimm *nvdimm, unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc) diff --git a/include/linux/list.h b/include/linux/list.h index e951228db4b2..85c92555e31f 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -106,6 +106,20 @@ static inline void __list_del(struct list_head * prev, struct list_head * next) WRITE_ONCE(prev->next, next); } +/* + * Delete a list entry and clear the 'prev' pointer. + * + * This is a special-purpose list clearing method used in the networking code + * for lists allocated as per-cpu, where we don't want to incur the extra + * WRITE_ONCE() overhead of a regular list_del_init(). The code that uses this + * needs to check the node 'prev' pointer instead of calling list_empty(). + */ +static inline void __list_del_clearprev(struct list_head *entry) +{ + __list_del(entry->prev, entry->next); + entry->prev = NULL; +} + /** * list_del - deletes entry from list. * @entry: the element to delete from the list. diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index eeba421cc671..273400814020 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -35,7 +35,6 @@ * @stack_node: list node for klp_ops func_stack list * @old_size: size of the old function * @new_size: size of the new function - * @kobj_added: @kobj has been added and needs freeing * @nop: temporary patch to use the original code again; dyn. allocated * @patched: the func has been added to the klp_ops list * @transition: the func is currently being applied or reverted @@ -113,7 +112,6 @@ struct klp_callbacks { * @node: list node for klp_patch obj_list * @mod: kernel module associated with the patched object * (NULL for vmlinux) - * @kobj_added: @kobj has been added and needs freeing * @dynamic: temporary object for nop functions; dynamically allocated * @patched: the object's funcs have been added to the klp_ops list */ @@ -140,7 +138,6 @@ struct klp_object { * @list: list node for global list of actively used patches * @kobj: kobject for sysfs resources * @obj_list: dynamic list of the object entries - * @kobj_added: @kobj has been added and needs freeing * @enabled: the patch is enabled (but operation may be incomplete) * @forced: was involved in a forced transition * @free_work: patch cleanup from workqueue-context diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index c9b422dde542..d294dde9e546 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h @@ -282,6 +282,7 @@ void nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *, nlm_host_match_fn_t match); void nlmsvc_grant_reply(struct nlm_cookie *, __be32); void nlmsvc_release_call(struct nlm_rqst *); +void nlmsvc_locks_init_private(struct file_lock *, struct nlm_host *, pid_t); /* * File handling for the server personality @@ -289,6 +290,7 @@ void nlmsvc_release_call(struct nlm_rqst *); __be32 nlm_lookup_file(struct svc_rqst *, struct nlm_file **, struct nfs_fh *); void nlm_release_file(struct nlm_file *); +void nlmsvc_release_lockowner(struct nlm_lock *); void nlmsvc_mark_resources(struct net *); void nlmsvc_free_host_resources(struct nlm_host *); void nlmsvc_invalidate_all(void); diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 6e2377e6c1d6..0b0d7259276d 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -5,7 +5,7 @@ * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra * - * see Documentation/locking/lockdep-design.txt for more details. + * see Documentation/locking/lockdep-design.rst for more details. */ #ifndef __LINUX_LOCKDEP_H #define __LINUX_LOCKDEP_H @@ -203,11 +203,17 @@ struct lock_list { struct lock_list *parent; }; -/* - * We record lock dependency chains, so that we can cache them: +/** + * struct lock_chain - lock dependency chain record + * + * @irq_context: the same as irq_context in held_lock below + * @depth: the number of held locks in this chain + * @base: the index in chain_hlocks for this chain + * @entry: the collided lock chains in lock_chain hash list + * @chain_key: the hash key of this lock_chain */ struct lock_chain { - /* see BUILD_BUG_ON()s in lookup_chain_cache() */ + /* see BUILD_BUG_ON()s in add_chain_cache() */ unsigned int irq_context : 2, depth : 6, base : 24; @@ -217,12 +223,8 @@ struct lock_chain { }; #define MAX_LOCKDEP_KEYS_BITS 13 -/* - * Subtract one because we offset hlock->class_idx by 1 in order - * to make 0 mean no class. This avoids overflowing the class_idx - * bitfield and hitting the BUG in hlock_class(). - */ -#define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1) +#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS) +#define INITIAL_CHAIN_KEY -1 struct held_lock { /* @@ -247,6 +249,11 @@ struct held_lock { u64 waittime_stamp; u64 holdtime_stamp; #endif + /* + * class_idx is zero-indexed; it points to the element in + * lock_classes this held lock instance belongs to. class_idx is in + * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive. + */ unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; /* * The lock-stack is unified in that the lock chains of interrupt @@ -281,6 +288,8 @@ extern void lockdep_free_key_range(void *start, unsigned long size); extern asmlinkage void lockdep_sys_exit(void); extern void lockdep_set_selftest_task(struct task_struct *task); +extern void lockdep_init_task(struct task_struct *task); + extern void lockdep_off(void); extern void lockdep_on(void); @@ -385,7 +394,7 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); WARN_ON(debug_locks && !lockdep_is_held(l)); \ } while (0) -#define lockdep_assert_held_exclusive(l) do { \ +#define lockdep_assert_held_write(l) do { \ WARN_ON(debug_locks && !lockdep_is_held_type(l, 0)); \ } while (0) @@ -405,6 +414,10 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); #else /* !CONFIG_LOCKDEP */ +static inline void lockdep_init_task(struct task_struct *task) +{ +} + static inline void lockdep_off(void) { } @@ -466,7 +479,7 @@ struct lockdep_map { }; #define lockdep_is_held_type(l, r) (1) #define lockdep_assert_held(l) do { (void)(l); } while (0) -#define lockdep_assert_held_exclusive(l) do { (void)(l); } while (0) +#define lockdep_assert_held_write(l) do { (void)(l); } while (0) #define lockdep_assert_held_read(l) do { (void)(l); } while (0) #define lockdep_assert_held_once(l) do { (void)(l); } while (0) @@ -497,7 +510,6 @@ enum xhlock_context_t { { .name = (_name), .key = (void *)(_key), } static inline void lockdep_invariant_state(bool force) {} -static inline void lockdep_init_task(struct task_struct *task) {} static inline void lockdep_free_task(struct task_struct *task) {} #ifdef CONFIG_LOCK_STAT @@ -632,11 +644,18 @@ do { \ "IRQs not disabled as expected\n"); \ } while (0) +#define lockdep_assert_in_irq() do { \ + WARN_ONCE(debug_locks && !current->lockdep_recursion && \ + !current->hardirq_context, \ + "Not in hardirq as expected\n"); \ + } while (0) + #else # define might_lock(lock) do { } while (0) # define might_lock_read(lock) do { } while (0) # define lockdep_assert_irqs_enabled() do { } while (0) # define lockdep_assert_irqs_disabled() do { } while (0) +# define lockdep_assert_in_irq() do { } while (0) #endif #ifdef CONFIG_LOCKDEP diff --git a/include/linux/log2.h b/include/linux/log2.h index 1aec01365ed4..83a4a3ca3e8a 100644 --- a/include/linux/log2.h +++ b/include/linux/log2.h @@ -220,4 +220,38 @@ int __order_base_2(unsigned long n) ilog2((n) - 1) + 1) : \ __order_base_2(n) \ ) + +static inline __attribute__((const)) +int __bits_per(unsigned long n) +{ + if (n < 2) + return 1; + if (is_power_of_2(n)) + return order_base_2(n) + 1; + return order_base_2(n); +} + +/** + * bits_per - calculate the number of bits required for the argument + * @n: parameter + * + * This is constant-capable and can be used for compile time + * initializations, e.g bitfields. + * + * The first few values calculated by this routine: + * bf(0) = 1 + * bf(1) = 1 + * bf(2) = 2 + * bf(3) = 2 + * bf(4) = 3 + * ... and so on. + */ +#define bits_per(n) \ +( \ + __builtin_constant_p(n) ? ( \ + ((n) == 0 || (n) == 1) \ + ? 1 : ilog2(n) + 1 \ + ) : \ + __bits_per(n) \ +) #endif /* _LINUX_LOG2_H */ diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index 47f58cfb6a19..df1318d85f7d 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -77,7 +77,7 @@ * state. This is called immediately after commit_creds(). * * Security hooks for mount using fs_context. - * [See also Documentation/filesystems/mounting.txt] + * [See also Documentation/filesystems/mount_api.txt] * * @fs_context_dup: * Allocate and attach a security structure to sc->security. This pointer diff --git a/include/linux/lz4.h b/include/linux/lz4.h index 394e3d9213b8..b16e15b9587a 100644 --- a/include/linux/lz4.h +++ b/include/linux/lz4.h @@ -278,7 +278,7 @@ int LZ4_decompress_fast(const char *source, char *dest, int originalSize); * @compressedSize: is the precise full size of the compressed block * @maxDecompressedSize: is the size of 'dest' buffer * - * Decompresses data fom 'source' into 'dest'. + * Decompresses data from 'source' into 'dest'. * If the source stream is detected malformed, the function will * stop decoding and return a negative result. * This function is protected against buffer overflow exploits, @@ -522,7 +522,7 @@ int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode, const char *dictionary, int dictSize); /** - * LZ4_decompress_fast_continue() - Decompress blocks in streaming mode + * LZ4_decompress_safe_continue() - Decompress blocks in streaming mode * @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure * @source: source address of the compressed data * @dest: output buffer address of the uncompressed data @@ -530,7 +530,7 @@ int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode, * @compressedSize: is the precise full size of the compressed block * @maxDecompressedSize: is the size of 'dest' buffer * - * These decoding function allows decompression of multiple blocks + * This decoding function allows decompression of multiple blocks * in "streaming" mode. * Previously decoded blocks *must* remain available at the memory position * where they were decoded (up to 64 KB) @@ -569,7 +569,7 @@ int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode, * which must be already allocated with 'originalSize' bytes * @originalSize: is the original and therefore uncompressed size * - * These decoding function allows decompression of multiple blocks + * This decoding function allows decompression of multiple blocks * in "streaming" mode. * Previously decoded blocks *must* remain available at the memory position * where they were decoded (up to 64 KB) @@ -610,10 +610,10 @@ int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode, * @dictStart: pointer to the start of the dictionary in memory * @dictSize: size of dictionary * - * These decoding function works the same as + * This decoding function works the same as * a combination of LZ4_setStreamDecode() followed by * LZ4_decompress_safe_continue() - * It is stand-alone, and don'tn eed a LZ4_streamDecode_t structure. + * It is stand-alone, and doesn't need an LZ4_streamDecode_t structure. * * Return: number of bytes decompressed into destination buffer * (necessarily <= maxDecompressedSize) @@ -633,10 +633,10 @@ int LZ4_decompress_safe_usingDict(const char *source, char *dest, * @dictStart: pointer to the start of the dictionary in memory * @dictSize: size of dictionary * - * These decoding function works the same as + * This decoding function works the same as * a combination of LZ4_setStreamDecode() followed by - * LZ4_decompress_safe_continue() - * It is stand-alone, and don'tn eed a LZ4_streamDecode_t structure. + * LZ4_decompress_fast_continue() + * It is stand-alone, and doesn't need an LZ4_streamDecode_t structure. * * Return: number of bytes decompressed into destination buffer * (necessarily <= maxDecompressedSize) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 1dcb763bb610..44c41462be33 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -233,8 +233,9 @@ struct mem_cgroup { /* OOM-Killer disable */ int oom_kill_disable; - /* memory.events */ + /* memory.events and memory.events.local */ struct cgroup_file events_file; + struct cgroup_file events_local_file; /* handle for "memory.swap.events" */ struct cgroup_file swap_events_file; @@ -281,6 +282,7 @@ struct mem_cgroup { /* memory.events */ atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS]; + atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS]; unsigned long socket_pressure; @@ -392,7 +394,6 @@ out: struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *); -bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg); struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm); @@ -747,6 +748,9 @@ static inline void count_memcg_event_mm(struct mm_struct *mm, static inline void memcg_memory_event(struct mem_cgroup *memcg, enum memcg_memory_event event) { + atomic_long_inc(&memcg->memory_events_local[event]); + cgroup_file_notify(&memcg->events_local_file); + do { atomic_long_inc(&memcg->memory_events[event]); cgroup_file_notify(&memcg->events_file); @@ -870,12 +874,6 @@ static inline bool mm_match_cgroup(struct mm_struct *mm, return true; } -static inline bool task_in_mem_cgroup(struct task_struct *task, - const struct mem_cgroup *memcg) -{ - return true; -} - static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) { return NULL; @@ -1273,6 +1271,8 @@ int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order); void __memcg_kmem_uncharge(struct page *page, int order); int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, struct mem_cgroup *memcg); +void __memcg_kmem_uncharge_memcg(struct mem_cgroup *memcg, + unsigned int nr_pages); extern struct static_key_false memcg_kmem_enabled_key; extern struct workqueue_struct *memcg_kmem_cache_wq; @@ -1314,6 +1314,14 @@ static inline int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, return __memcg_kmem_charge_memcg(page, gfp, order, memcg); return 0; } + +static inline void memcg_kmem_uncharge_memcg(struct page *page, int order, + struct mem_cgroup *memcg) +{ + if (memcg_kmem_enabled()) + __memcg_kmem_uncharge_memcg(memcg, 1 << order); +} + /* * helper for accessing a memcg's index. It will be used as an index in the * child cache array in kmem_cache, and also to derive its name. This function diff --git a/include/linux/memory.h b/include/linux/memory.h index e1dc1bb2b787..02e633f3ede0 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -111,16 +111,15 @@ extern int register_memory_notifier(struct notifier_block *nb); extern void unregister_memory_notifier(struct notifier_block *nb); extern int register_memory_isolate_notifier(struct notifier_block *nb); extern void unregister_memory_isolate_notifier(struct notifier_block *nb); -int hotplug_memory_register(int nid, struct mem_section *section); -#ifdef CONFIG_MEMORY_HOTREMOVE -extern void unregister_memory_section(struct mem_section *); -#endif +int create_memory_block_devices(unsigned long start, unsigned long size); +void remove_memory_block_devices(unsigned long start, unsigned long size); extern int memory_dev_init(void); extern int memory_notify(unsigned long val, void *v); extern int memory_isolate_notify(unsigned long val, void *v); -extern struct memory_block *find_memory_block_hinted(struct mem_section *, - struct memory_block *); extern struct memory_block *find_memory_block(struct mem_section *); +typedef int (*walk_memory_blocks_func_t)(struct memory_block *, void *); +extern int walk_memory_blocks(unsigned long start, unsigned long size, + void *arg, walk_memory_blocks_func_t func); #define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT) #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index ae892eef8b82..f46ea71b4ffd 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -123,20 +123,10 @@ static inline bool movable_node_is_enabled(void) return movable_node_enabled; } -#ifdef CONFIG_MEMORY_HOTREMOVE extern void arch_remove_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap); extern void __remove_pages(struct zone *zone, unsigned long start_pfn, unsigned long nr_pages, struct vmem_altmap *altmap); -#endif /* CONFIG_MEMORY_HOTREMOVE */ - -/* - * Do we want sysfs memblock files created. This will allow userspace to online - * and offline memory explicitly. Lack of this bit means that the caller has to - * call move_pfn_range_to_zone to finish the initialization. - */ - -#define MHP_MEMBLOCK_API (1<<0) /* reasonably generic interface to expand the physical pages */ extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, @@ -324,7 +314,7 @@ static inline void pgdat_resize_init(struct pglist_data *pgdat) {} extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages); extern void try_offline_node(int nid); extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); -extern void remove_memory(int nid, u64 start, u64 size); +extern int remove_memory(int nid, u64 start, u64 size); extern void __remove_memory(int nid, u64 start, u64 size); #else @@ -341,22 +331,25 @@ static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages) return -EINVAL; } -static inline void remove_memory(int nid, u64 start, u64 size) {} +static inline int remove_memory(int nid, u64 start, u64 size) +{ + return -EBUSY; +} + static inline void __remove_memory(int nid, u64 start, u64 size) {} #endif /* CONFIG_MEMORY_HOTREMOVE */ extern void __ref free_area_init_core_hotplug(int nid); -extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn, - void *arg, int (*func)(struct memory_block *, void *)); extern int __add_memory(int nid, u64 start, u64 size); extern int add_memory(int nid, u64 start, u64 size); extern int add_memory_resource(int nid, struct resource *resource); extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, unsigned long nr_pages, struct vmem_altmap *altmap); extern bool is_memblock_offlined(struct memory_block *mem); -extern int sparse_add_one_section(int nid, unsigned long start_pfn, - struct vmem_altmap *altmap); -extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms, +extern int sparse_add_section(int nid, unsigned long pfn, + unsigned long nr_pages, struct vmem_altmap *altmap); +extern void sparse_remove_section(struct mem_section *ms, + unsigned long pfn, unsigned long nr_pages, unsigned long map_offset, struct vmem_altmap *altmap); extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum); diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 1732dea030b2..f8a5b2a19945 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -37,13 +37,6 @@ struct vmem_altmap { * A more complete discussion of unaddressable memory may be found in * include/linux/hmm.h and Documentation/vm/hmm.rst. * - * MEMORY_DEVICE_PUBLIC: - * Device memory that is cache coherent from device and CPU point of view. This - * is use on platform that have an advance system bus (like CAPI or CCIX). A - * driver can hotplug the device memory using ZONE_DEVICE and with that memory - * type. Any page of a process can be migrated to such memory. However no one - * should be allow to pin such memory so that it can always be evicted. - * * MEMORY_DEVICE_FS_DAX: * Host memory that has similar access semantics as System RAM i.e. DMA * coherent and supports page pinning. In support of coordinating page @@ -52,54 +45,84 @@ struct vmem_altmap { * wakeup is used to coordinate physical address space management (ex: * fs truncate/hole punch) vs pinned pages (ex: device dma). * + * MEMORY_DEVICE_DEVDAX: + * Host memory that has similar access semantics as System RAM i.e. DMA + * coherent and supports page pinning. In contrast to + * MEMORY_DEVICE_FS_DAX, this memory is access via a device-dax + * character device. + * * MEMORY_DEVICE_PCI_P2PDMA: * Device memory residing in a PCI BAR intended for use with Peer-to-Peer * transactions. */ enum memory_type { + /* 0 is reserved to catch uninitialized type fields */ MEMORY_DEVICE_PRIVATE = 1, - MEMORY_DEVICE_PUBLIC, MEMORY_DEVICE_FS_DAX, + MEMORY_DEVICE_DEVDAX, MEMORY_DEVICE_PCI_P2PDMA, }; -/* - * Additional notes about MEMORY_DEVICE_PRIVATE may be found in - * include/linux/hmm.h and Documentation/vm/hmm.rst. There is also a brief - * explanation in include/linux/memory_hotplug.h. - * - * The page_free() callback is called once the page refcount reaches 1 - * (ZONE_DEVICE pages never reach 0 refcount unless there is a refcount bug. - * This allows the device driver to implement its own memory management.) - */ -typedef void (*dev_page_free_t)(struct page *page, void *data); +struct dev_pagemap_ops { + /* + * Called once the page refcount reaches 1. (ZONE_DEVICE pages never + * reach 0 refcount unless there is a refcount bug. This allows the + * device driver to implement its own memory management.) + */ + void (*page_free)(struct page *page); + + /* + * Transition the refcount in struct dev_pagemap to the dead state. + */ + void (*kill)(struct dev_pagemap *pgmap); + + /* + * Wait for refcount in struct dev_pagemap to be idle and reap it. + */ + void (*cleanup)(struct dev_pagemap *pgmap); + + /* + * Used for private (un-addressable) device memory only. Must migrate + * the page back to a CPU accessible page. + */ + vm_fault_t (*migrate_to_ram)(struct vm_fault *vmf); +}; + +#define PGMAP_ALTMAP_VALID (1 << 0) /** * struct dev_pagemap - metadata for ZONE_DEVICE mappings - * @page_free: free page callback when page refcount reaches 1 * @altmap: pre-allocated/reserved memory for vmemmap allocations * @res: physical address range covered by @ref * @ref: reference count that pins the devm_memremap_pages() mapping - * @kill: callback to transition @ref to the dead state - * @cleanup: callback to wait for @ref to be idle and reap it + * @internal_ref: internal reference if @ref is not provided by the caller + * @done: completion for @internal_ref * @dev: host device of the mapping for debug * @data: private data pointer for page_free() * @type: memory type: see MEMORY_* in memory_hotplug.h + * @flags: PGMAP_* flags to specify defailed behavior + * @ops: method table */ struct dev_pagemap { - dev_page_free_t page_free; struct vmem_altmap altmap; - bool altmap_valid; struct resource res; struct percpu_ref *ref; - void (*kill)(struct percpu_ref *ref); - void (*cleanup)(struct percpu_ref *ref); + struct percpu_ref internal_ref; + struct completion done; struct device *dev; - void *data; enum memory_type type; + unsigned int flags; u64 pci_p2pdma_bus_offset; + const struct dev_pagemap_ops *ops; }; +static inline struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap) +{ + if (pgmap->flags & PGMAP_ALTMAP_VALID) + return &pgmap->altmap; + return NULL; +} + #ifdef CONFIG_ZONE_DEVICE void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap); void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap); diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h index 5ddca44be06d..77805c3f2de7 100644 --- a/include/linux/mfd/cros_ec.h +++ b/include/linux/mfd/cros_ec.h @@ -19,6 +19,7 @@ #define CROS_EC_DEV_PD_NAME "cros_pd" #define CROS_EC_DEV_TP_NAME "cros_tp" #define CROS_EC_DEV_ISH_NAME "cros_ish" +#define CROS_EC_DEV_SCP_NAME "cros_scp" /* * The EC is unresponsive for a time after a reboot command. Add a @@ -155,6 +156,7 @@ struct cros_ec_device { struct ec_response_get_next_event_v1 event_data; int event_size; u32 host_event_wake_mask; + u32 last_resume_result; }; /** diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h index 114614e20e4d..7ccb8757b79d 100644 --- a/include/linux/mfd/cros_ec_commands.h +++ b/include/linux/mfd/cros_ec_commands.h @@ -4,17 +4,20 @@ * * Copyright (C) 2012 Google, Inc * - * The ChromeOS EC multi function device is used to mux all the requests - * to the EC device for its multiple features: keyboard controller, - * battery charging and regulator control, firmware update. - * - * NOTE: This file is copied verbatim from the ChromeOS EC Open Source - * project in an attempt to make future updates easy to make. + * NOTE: This file is auto-generated from ChromeOS EC Open Source code from + * https://chromium.googlesource.com/chromiumos/platform/ec/+/master/include/ec_commands.h */ +/* Host communication command constants for Chrome EC */ + #ifndef __CROS_EC_COMMANDS_H #define __CROS_EC_COMMANDS_H + + + +#define BUILD_ASSERT(_cond) + /* * Current version of this protocol * @@ -25,7 +28,7 @@ #define EC_PROTO_VERSION 0x00000002 /* Command version mask */ -#define EC_VER_MASK(version) (1UL << (version)) +#define EC_VER_MASK(version) BIT(version) /* I/O addresses for ACPI commands */ #define EC_LPC_ADDR_ACPI_DATA 0x62 @@ -39,25 +42,28 @@ /* Protocol version 2 */ #define EC_LPC_ADDR_HOST_ARGS 0x800 /* And 0x801, 0x802, 0x803 */ #define EC_LPC_ADDR_HOST_PARAM 0x804 /* For version 2 params; size is - * EC_PROTO2_MAX_PARAM_SIZE */ + * EC_PROTO2_MAX_PARAM_SIZE + */ /* Protocol version 3 */ #define EC_LPC_ADDR_HOST_PACKET 0x800 /* Offset of version 3 packet */ #define EC_LPC_HOST_PACKET_SIZE 0x100 /* Max size of version 3 packet */ -/* The actual block is 0x800-0x8ff, but some BIOSes think it's 0x880-0x8ff - * and they tell the kernel that so we have to think of it as two parts. */ +/* + * The actual block is 0x800-0x8ff, but some BIOSes think it's 0x880-0x8ff + * and they tell the kernel that so we have to think of it as two parts. + */ #define EC_HOST_CMD_REGION0 0x800 #define EC_HOST_CMD_REGION1 0x880 #define EC_HOST_CMD_REGION_SIZE 0x80 /* EC command register bit functions */ -#define EC_LPC_CMDR_DATA (1 << 0) /* Data ready for host to read */ -#define EC_LPC_CMDR_PENDING (1 << 1) /* Write pending to EC */ -#define EC_LPC_CMDR_BUSY (1 << 2) /* EC is busy processing a command */ -#define EC_LPC_CMDR_CMD (1 << 3) /* Last host write was a command */ -#define EC_LPC_CMDR_ACPI_BRST (1 << 4) /* Burst mode (not used) */ -#define EC_LPC_CMDR_SCI (1 << 5) /* SCI event is pending */ -#define EC_LPC_CMDR_SMI (1 << 6) /* SMI event is pending */ +#define EC_LPC_CMDR_DATA BIT(0) /* Data ready for host to read */ +#define EC_LPC_CMDR_PENDING BIT(1) /* Write pending to EC */ +#define EC_LPC_CMDR_BUSY BIT(2) /* EC is busy processing a command */ +#define EC_LPC_CMDR_CMD BIT(3) /* Last host write was a command */ +#define EC_LPC_CMDR_ACPI_BRST BIT(4) /* Burst mode (not used) */ +#define EC_LPC_CMDR_SCI BIT(5) /* SCI event is pending */ +#define EC_LPC_CMDR_SMI BIT(6) /* SMI event is pending */ #define EC_LPC_ADDR_MEMMAP 0x900 #define EC_MEMMAP_SIZE 255 /* ACPI IO buffer max is 255 bytes */ @@ -77,13 +83,15 @@ /* Unused 0x28 - 0x2f */ #define EC_MEMMAP_SWITCHES 0x30 /* 8 bits */ /* Unused 0x31 - 0x33 */ -#define EC_MEMMAP_HOST_EVENTS 0x34 /* 32 bits */ -/* Reserve 0x38 - 0x3f for additional host event-related stuff */ -/* Battery values are all 32 bits */ +#define EC_MEMMAP_HOST_EVENTS 0x34 /* 64 bits */ +/* Battery values are all 32 bits, unless otherwise noted. */ #define EC_MEMMAP_BATT_VOLT 0x40 /* Battery Present Voltage */ #define EC_MEMMAP_BATT_RATE 0x44 /* Battery Present Rate */ #define EC_MEMMAP_BATT_CAP 0x48 /* Battery Remaining Capacity */ -#define EC_MEMMAP_BATT_FLAG 0x4c /* Battery State, defined below */ +#define EC_MEMMAP_BATT_FLAG 0x4c /* Battery State, see below (8-bit) */ +#define EC_MEMMAP_BATT_COUNT 0x4d /* Battery Count (8-bit) */ +#define EC_MEMMAP_BATT_INDEX 0x4e /* Current Battery Data Index (8-bit) */ +/* Unused 0x4f */ #define EC_MEMMAP_BATT_DCAP 0x50 /* Battery Design Capacity */ #define EC_MEMMAP_BATT_DVLT 0x54 /* Battery Design Voltage */ #define EC_MEMMAP_BATT_LFCC 0x58 /* Battery Last Full Charge Capacity */ @@ -97,15 +105,24 @@ /* Unused 0x84 - 0x8f */ #define EC_MEMMAP_ACC_STATUS 0x90 /* Accelerometer status (8 bits )*/ /* Unused 0x91 */ -#define EC_MEMMAP_ACC_DATA 0x92 /* Accelerometer data 0x92 - 0x9f */ +#define EC_MEMMAP_ACC_DATA 0x92 /* Accelerometers data 0x92 - 0x9f */ +/* 0x92: Lid Angle if available, LID_ANGLE_UNRELIABLE otherwise */ +/* 0x94 - 0x99: 1st Accelerometer */ +/* 0x9a - 0x9f: 2nd Accelerometer */ #define EC_MEMMAP_GYRO_DATA 0xa0 /* Gyroscope data 0xa0 - 0xa5 */ -/* Unused 0xa6 - 0xfe (remember, 0xff is NOT part of the memmap region) */ +/* Unused 0xa6 - 0xdf */ +/* + * ACPI is unable to access memory mapped data at or above this offset due to + * limitations of the ACPI protocol. Do not place data in the range 0xe0 - 0xfe + * which might be needed by ACPI. + */ +#define EC_MEMMAP_NO_ACPI 0xe0 /* Define the format of the accelerometer mapped memory status byte. */ #define EC_MEMMAP_ACC_STATUS_SAMPLE_ID_MASK 0x0f -#define EC_MEMMAP_ACC_STATUS_BUSY_BIT (1 << 4) -#define EC_MEMMAP_ACC_STATUS_PRESENCE_BIT (1 << 7) +#define EC_MEMMAP_ACC_STATUS_BUSY_BIT BIT(4) +#define EC_MEMMAP_ACC_STATUS_PRESENCE_BIT BIT(7) /* Number of temp sensors at EC_MEMMAP_TEMP_SENSOR */ #define EC_TEMP_SENSOR_ENTRIES 16 @@ -149,6 +166,8 @@ #define EC_BATT_FLAG_DISCHARGING 0x04 #define EC_BATT_FLAG_CHARGING 0x08 #define EC_BATT_FLAG_LEVEL_CRITICAL 0x10 +/* Set if some of the static/dynamic data is invalid (or outdated). */ +#define EC_BATT_FLAG_INVALID_DATA 0x20 /* Switch flags at EC_MEMMAP_SWITCHES */ #define EC_SWITCH_LID_OPEN 0x01 @@ -174,20 +193,242 @@ #define EC_WIRELESS_SWITCH_WWAN 0x04 /* WWAN power */ #define EC_WIRELESS_SWITCH_WLAN_POWER 0x08 /* WLAN power */ +/*****************************************************************************/ +/* + * ACPI commands + * + * These are valid ONLY on the ACPI command/data port. + */ + +/* + * ACPI Read Embedded Controller + * + * This reads from ACPI memory space on the EC (EC_ACPI_MEM_*). + * + * Use the following sequence: + * + * - Write EC_CMD_ACPI_READ to EC_LPC_ADDR_ACPI_CMD + * - Wait for EC_LPC_CMDR_PENDING bit to clear + * - Write address to EC_LPC_ADDR_ACPI_DATA + * - Wait for EC_LPC_CMDR_DATA bit to set + * - Read value from EC_LPC_ADDR_ACPI_DATA + */ +#define EC_CMD_ACPI_READ 0x0080 + +/* + * ACPI Write Embedded Controller + * + * This reads from ACPI memory space on the EC (EC_ACPI_MEM_*). + * + * Use the following sequence: + * + * - Write EC_CMD_ACPI_WRITE to EC_LPC_ADDR_ACPI_CMD + * - Wait for EC_LPC_CMDR_PENDING bit to clear + * - Write address to EC_LPC_ADDR_ACPI_DATA + * - Wait for EC_LPC_CMDR_PENDING bit to clear + * - Write value to EC_LPC_ADDR_ACPI_DATA + */ +#define EC_CMD_ACPI_WRITE 0x0081 + +/* + * ACPI Burst Enable Embedded Controller + * + * This enables burst mode on the EC to allow the host to issue several + * commands back-to-back. While in this mode, writes to mapped multi-byte + * data are locked out to ensure data consistency. + */ +#define EC_CMD_ACPI_BURST_ENABLE 0x0082 + +/* + * ACPI Burst Disable Embedded Controller + * + * This disables burst mode on the EC and stops preventing EC writes to mapped + * multi-byte data. + */ +#define EC_CMD_ACPI_BURST_DISABLE 0x0083 + +/* + * ACPI Query Embedded Controller + * + * This clears the lowest-order bit in the currently pending host events, and + * sets the result code to the 1-based index of the bit (event 0x00000001 = 1, + * event 0x80000000 = 32), or 0 if no event was pending. + */ +#define EC_CMD_ACPI_QUERY_EVENT 0x0084 + +/* Valid addresses in ACPI memory space, for read/write commands */ + +/* Memory space version; set to EC_ACPI_MEM_VERSION_CURRENT */ +#define EC_ACPI_MEM_VERSION 0x00 +/* + * Test location; writing value here updates test compliment byte to (0xff - + * value). + */ +#define EC_ACPI_MEM_TEST 0x01 +/* Test compliment; writes here are ignored. */ +#define EC_ACPI_MEM_TEST_COMPLIMENT 0x02 + +/* Keyboard backlight brightness percent (0 - 100) */ +#define EC_ACPI_MEM_KEYBOARD_BACKLIGHT 0x03 +/* DPTF Target Fan Duty (0-100, 0xff for auto/none) */ +#define EC_ACPI_MEM_FAN_DUTY 0x04 + +/* + * DPTF temp thresholds. Any of the EC's temp sensors can have up to two + * independent thresholds attached to them. The current value of the ID + * register determines which sensor is affected by the THRESHOLD and COMMIT + * registers. The THRESHOLD register uses the same EC_TEMP_SENSOR_OFFSET scheme + * as the memory-mapped sensors. The COMMIT register applies those settings. + * + * The spec does not mandate any way to read back the threshold settings + * themselves, but when a threshold is crossed the AP needs a way to determine + * which sensor(s) are responsible. Each reading of the ID register clears and + * returns one sensor ID that has crossed one of its threshold (in either + * direction) since the last read. A value of 0xFF means "no new thresholds + * have tripped". Setting or enabling the thresholds for a sensor will clear + * the unread event count for that sensor. + */ +#define EC_ACPI_MEM_TEMP_ID 0x05 +#define EC_ACPI_MEM_TEMP_THRESHOLD 0x06 +#define EC_ACPI_MEM_TEMP_COMMIT 0x07 +/* + * Here are the bits for the COMMIT register: + * bit 0 selects the threshold index for the chosen sensor (0/1) + * bit 1 enables/disables the selected threshold (0 = off, 1 = on) + * Each write to the commit register affects one threshold. + */ +#define EC_ACPI_MEM_TEMP_COMMIT_SELECT_MASK BIT(0) +#define EC_ACPI_MEM_TEMP_COMMIT_ENABLE_MASK BIT(1) +/* + * Example: + * + * Set the thresholds for sensor 2 to 50 C and 60 C: + * write 2 to [0x05] -- select temp sensor 2 + * write 0x7b to [0x06] -- C_TO_K(50) - EC_TEMP_SENSOR_OFFSET + * write 0x2 to [0x07] -- enable threshold 0 with this value + * write 0x85 to [0x06] -- C_TO_K(60) - EC_TEMP_SENSOR_OFFSET + * write 0x3 to [0x07] -- enable threshold 1 with this value + * + * Disable the 60 C threshold, leaving the 50 C threshold unchanged: + * write 2 to [0x05] -- select temp sensor 2 + * write 0x1 to [0x07] -- disable threshold 1 + */ + +/* DPTF battery charging current limit */ +#define EC_ACPI_MEM_CHARGING_LIMIT 0x08 + +/* Charging limit is specified in 64 mA steps */ +#define EC_ACPI_MEM_CHARGING_LIMIT_STEP_MA 64 +/* Value to disable DPTF battery charging limit */ +#define EC_ACPI_MEM_CHARGING_LIMIT_DISABLED 0xff + +/* + * Report device orientation + * Bits Definition + * 3:1 Device DPTF Profile Number (DDPN) + * 0 = Reserved for backward compatibility (indicates no valid + * profile number. Host should fall back to using TBMD). + * 1..7 = DPTF Profile number to indicate to host which table needs + * to be loaded. + * 0 Tablet Mode Device Indicator (TBMD) + */ +#define EC_ACPI_MEM_DEVICE_ORIENTATION 0x09 +#define EC_ACPI_MEM_TBMD_SHIFT 0 +#define EC_ACPI_MEM_TBMD_MASK 0x1 +#define EC_ACPI_MEM_DDPN_SHIFT 1 +#define EC_ACPI_MEM_DDPN_MASK 0x7 + +/* + * Report device features. Uses the same format as the host command, except: + * + * bit 0 (EC_FEATURE_LIMITED) changes meaning from "EC code has a limited set + * of features", which is of limited interest when the system is already + * interpreting ACPI bytecode, to "EC_FEATURES[0-7] is not supported". Since + * these are supported, it defaults to 0. + * This allows detecting the presence of this field since older versions of + * the EC codebase would simply return 0xff to that unknown address. Check + * FEATURES0 != 0xff (or FEATURES0[0] == 0) to make sure that the other bits + * are valid. + */ +#define EC_ACPI_MEM_DEVICE_FEATURES0 0x0a +#define EC_ACPI_MEM_DEVICE_FEATURES1 0x0b +#define EC_ACPI_MEM_DEVICE_FEATURES2 0x0c +#define EC_ACPI_MEM_DEVICE_FEATURES3 0x0d +#define EC_ACPI_MEM_DEVICE_FEATURES4 0x0e +#define EC_ACPI_MEM_DEVICE_FEATURES5 0x0f +#define EC_ACPI_MEM_DEVICE_FEATURES6 0x10 +#define EC_ACPI_MEM_DEVICE_FEATURES7 0x11 + +#define EC_ACPI_MEM_BATTERY_INDEX 0x12 + +/* + * USB Port Power. Each bit indicates whether the corresponding USB ports' power + * is enabled (1) or disabled (0). + * bit 0 USB port ID 0 + * ... + * bit 7 USB port ID 7 + */ +#define EC_ACPI_MEM_USB_PORT_POWER 0x13 + +/* + * ACPI addresses 0x20 - 0xff map to EC_MEMMAP offset 0x00 - 0xdf. This data + * is read-only from the AP. Added in EC_ACPI_MEM_VERSION 2. + */ +#define EC_ACPI_MEM_MAPPED_BEGIN 0x20 +#define EC_ACPI_MEM_MAPPED_SIZE 0xe0 + +/* Current version of ACPI memory address space */ +#define EC_ACPI_MEM_VERSION_CURRENT 2 + + /* * This header file is used in coreboot both in C and ACPI code. The ACPI code * is pre-processed to handle constants but the ASL compiler is unable to * handle actual C code so keep it separate. */ -#ifndef __ACPI__ + /* - * Define __packed if someone hasn't beat us to it. Linux kernel style - * checking prefers __packed over __attribute__((packed)). + * Attributes for EC request and response packets. Just defining __packed + * results in inefficient assembly code on ARM, if the structure is actually + * 32-bit aligned, as it should be for all buffers. + * + * Be very careful when adding these to existing structures. They will round + * up the structure size to the specified boundary. + * + * Also be very careful to make that if a structure is included in some other + * parent structure that the alignment will still be true given the packing of + * the parent structure. This is particularly important if the sub-structure + * will be passed as a pointer to another function, since that function will + * not know about the misaligment caused by the parent structure's packing. + * + * Also be very careful using __packed - particularly when nesting non-packed + * structures inside packed ones. In fact, DO NOT use __packed directly; + * always use one of these attributes. + * + * Once everything is annotated properly, the following search strings should + * not return ANY matches in this file other than right here: + * + * "__packed" - generates inefficient code; all sub-structs must also be packed + * + * "struct [^_]" - all structs should be annotated, except for structs that are + * members of other structs/unions (and their original declarations should be + * annotated). + */ + +/* + * Packed structures make no assumption about alignment, so they do inefficient + * byte-wise reads. */ -#ifndef __packed -#define __packed __attribute__((packed)) -#endif +#define __ec_align1 __packed +#define __ec_align2 __packed +#define __ec_align4 __packed +#define __ec_align_size1 __packed +#define __ec_align_offset1 __packed +#define __ec_align_offset2 __packed +#define __ec_todo_packed __packed +#define __ec_todo_unpacked + /* LPC command status byte masks */ /* EC has written a byte in the data register and host hasn't read it yet */ @@ -198,7 +439,7 @@ #define EC_LPC_STATUS_PROCESSING 0x04 /* Last write to EC was a command, not data */ #define EC_LPC_STATUS_LAST_CMD 0x08 -/* EC is in burst mode. Unsupported by Chrome EC, so this bit is never set */ +/* EC is in burst mode */ #define EC_LPC_STATUS_BURST_MODE 0x10 /* SCI event is pending (requesting SCI query) */ #define EC_LPC_STATUS_SCI_PENDING 0x20 @@ -214,7 +455,10 @@ #define EC_LPC_STATUS_BUSY_MASK \ (EC_LPC_STATUS_FROM_HOST | EC_LPC_STATUS_PROCESSING) -/* Host command response codes */ +/* + * Host command response codes (16-bit). Note that response codes should be + * stored in a uint16_t rather than directly in a value of this type. + */ enum ec_status { EC_RES_SUCCESS = 0, EC_RES_INVALID_COMMAND = 1, @@ -230,7 +474,13 @@ enum ec_status { EC_RES_OVERFLOW = 11, /* Table / data overflow */ EC_RES_INVALID_HEADER = 12, /* Header contains invalid data */ EC_RES_REQUEST_TRUNCATED = 13, /* Didn't get the entire request */ - EC_RES_RESPONSE_TOO_BIG = 14 /* Response was too big to handle */ + EC_RES_RESPONSE_TOO_BIG = 14, /* Response was too big to handle */ + EC_RES_BUS_ERROR = 15, /* Communications bus error */ + EC_RES_BUSY = 16, /* Up but too busy. Should retry */ + EC_RES_INVALID_HEADER_VERSION = 17, /* Header version invalid */ + EC_RES_INVALID_HEADER_CRC = 18, /* Header CRC invalid */ + EC_RES_INVALID_DATA_CRC = 19, /* Data CRC invalid */ + EC_RES_DUP_UNAVAILABLE = 20, /* Can't resend response */ }; /* @@ -250,7 +500,8 @@ enum host_event_code { EC_HOST_EVENT_BATTERY_CRITICAL = 7, EC_HOST_EVENT_BATTERY = 8, EC_HOST_EVENT_THERMAL_THRESHOLD = 9, - EC_HOST_EVENT_THERMAL_OVERLOAD = 10, + /* Event generated by a device attached to the EC */ + EC_HOST_EVENT_DEVICE = 10, EC_HOST_EVENT_THERMAL = 11, EC_HOST_EVENT_USB_CHARGER = 12, EC_HOST_EVENT_KEY_PRESSED = 13, @@ -277,15 +528,34 @@ enum host_event_code { EC_HOST_EVENT_HANG_DETECT = 20, /* Hang detect logic detected a hang and warm rebooted the AP */ EC_HOST_EVENT_HANG_REBOOT = 21, + /* PD MCU triggering host event */ EC_HOST_EVENT_PD_MCU = 22, - /* EC desires to change state of host-controlled USB mux */ - EC_HOST_EVENT_USB_MUX = 28, + /* Battery Status flags have changed */ + EC_HOST_EVENT_BATTERY_STATUS = 23, + + /* EC encountered a panic, triggering a reset */ + EC_HOST_EVENT_PANIC = 24, + + /* Keyboard fastboot combo has been pressed */ + EC_HOST_EVENT_KEYBOARD_FASTBOOT = 25, /* EC RTC event occurred */ EC_HOST_EVENT_RTC = 26, + /* Emulate MKBP event */ + EC_HOST_EVENT_MKBP = 27, + + /* EC desires to change state of host-controlled USB mux */ + EC_HOST_EVENT_USB_MUX = 28, + + /* TABLET/LAPTOP mode or detachable base attach/detach event */ + EC_HOST_EVENT_MODE_CHANGE = 29, + + /* Keyboard recovery combo with hardware reinitialization */ + EC_HOST_EVENT_KEYBOARD_RECOVERY_HW_REINIT = 30, + /* * The high bit of the event mask is not used as a host event code. If * it reads back as set, then the entire event mask should be @@ -296,7 +566,7 @@ enum host_event_code { EC_HOST_EVENT_INVALID = 32 }; /* Host event mask */ -#define EC_HOST_EVENT_MASK(event_code) (1UL << ((event_code) - 1)) +#define EC_HOST_EVENT_MASK(event_code) BIT_ULL((event_code) - 1) /** * struct ec_lpc_host_args - Arguments at EC_LPC_ADDR_HOST_ARGS @@ -311,7 +581,7 @@ struct ec_lpc_host_args { uint8_t command_version; uint8_t data_size; uint8_t checksum; -} __packed; +} __ec_align4; /* Flags for ec_lpc_host_args.flags */ /* @@ -321,7 +591,7 @@ struct ec_lpc_host_args { * If EC gets a command and this flag is not set, this is an old-style command. * Command version is 0 and params from host are at EC_LPC_ADDR_OLD_PARAM with * unknown length. EC must respond with an old-style response (that is, - * withouth setting EC_HOST_ARGS_FLAG_TO_HOST). + * without setting EC_HOST_ARGS_FLAG_TO_HOST). */ #define EC_HOST_ARGS_FLAG_FROM_HOST 0x01 /* @@ -482,7 +752,7 @@ struct ec_host_request { uint8_t command_version; uint8_t reserved; uint16_t data_len; -} __packed; +} __ec_align4; #define EC_HOST_RESPONSE_VERSION 3 @@ -501,18 +771,151 @@ struct ec_host_response { uint16_t result; uint16_t data_len; uint16_t reserved; -} __packed; +} __ec_align4; + +/*****************************************************************************/ + +/* + * Host command protocol V4. + * + * Packets always start with a request or response header. They are followed + * by data_len bytes of data. If the data_crc_present flag is set, the data + * bytes are followed by a CRC-8 of that data, using using x^8 + x^2 + x + 1 + * polynomial. + * + * Host algorithm when sending a request q: + * + * 101) tries_left=(some value, e.g. 3); + * 102) q.seq_num++ + * 103) q.seq_dup=0 + * 104) Calculate q.header_crc. + * 105) Send request q to EC. + * 106) Wait for response r. Go to 201 if received or 301 if timeout. + * + * 201) If r.struct_version != 4, go to 301. + * 202) If r.header_crc mismatches calculated CRC for r header, go to 301. + * 203) If r.data_crc_present and r.data_crc mismatches, go to 301. + * 204) If r.seq_num != q.seq_num, go to 301. + * 205) If r.seq_dup == q.seq_dup, return success. + * 207) If r.seq_dup == 1, go to 301. + * 208) Return error. + * + * 301) If --tries_left <= 0, return error. + * 302) If q.seq_dup == 1, go to 105. + * 303) q.seq_dup = 1 + * 304) Go to 104. + * + * EC algorithm when receiving a request q. + * EC has response buffer r, error buffer e. + * + * 101) If q.struct_version != 4, set e.result = EC_RES_INVALID_HEADER_VERSION + * and go to 301 + * 102) If q.header_crc mismatches calculated CRC, set e.result = + * EC_RES_INVALID_HEADER_CRC and go to 301 + * 103) If q.data_crc_present, calculate data CRC. If that mismatches the CRC + * byte at the end of the packet, set e.result = EC_RES_INVALID_DATA_CRC + * and go to 301. + * 104) If q.seq_dup == 0, go to 201. + * 105) If q.seq_num != r.seq_num, go to 201. + * 106) If q.seq_dup == r.seq_dup, go to 205, else go to 203. + * + * 201) Process request q into response r. + * 202) r.seq_num = q.seq_num + * 203) r.seq_dup = q.seq_dup + * 204) Calculate r.header_crc + * 205) If r.data_len > 0 and data is no longer available, set e.result = + * EC_RES_DUP_UNAVAILABLE and go to 301. + * 206) Send response r. + * + * 301) e.seq_num = q.seq_num + * 302) e.seq_dup = q.seq_dup + * 303) Calculate e.header_crc. + * 304) Send error response e. + */ + +/* Version 4 request from host */ +struct ec_host_request4 { + /* + * bits 0-3: struct_version: Structure version (=4) + * bit 4: is_response: Is response (=0) + * bits 5-6: seq_num: Sequence number + * bit 7: seq_dup: Sequence duplicate flag + */ + uint8_t fields0; + + /* + * bits 0-4: command_version: Command version + * bits 5-6: Reserved (set 0, ignore on read) + * bit 7: data_crc_present: Is data CRC present after data + */ + uint8_t fields1; + + /* Command code (EC_CMD_*) */ + uint16_t command; + + /* Length of data which follows this header (not including data CRC) */ + uint16_t data_len; + + /* Reserved (set 0, ignore on read) */ + uint8_t reserved; + + /* CRC-8 of above fields, using x^8 + x^2 + x + 1 polynomial */ + uint8_t header_crc; +} __ec_align4; + +/* Version 4 response from EC */ +struct ec_host_response4 { + /* + * bits 0-3: struct_version: Structure version (=4) + * bit 4: is_response: Is response (=1) + * bits 5-6: seq_num: Sequence number + * bit 7: seq_dup: Sequence duplicate flag + */ + uint8_t fields0; + + /* + * bits 0-6: Reserved (set 0, ignore on read) + * bit 7: data_crc_present: Is data CRC present after data + */ + uint8_t fields1; + + /* Result code (EC_RES_*) */ + uint16_t result; + + /* Length of data which follows this header (not including data CRC) */ + uint16_t data_len; + + /* Reserved (set 0, ignore on read) */ + uint8_t reserved; + + /* CRC-8 of above fields, using x^8 + x^2 + x + 1 polynomial */ + uint8_t header_crc; +} __ec_align4; + +/* Fields in fields0 byte */ +#define EC_PACKET4_0_STRUCT_VERSION_MASK 0x0f +#define EC_PACKET4_0_IS_RESPONSE_MASK 0x10 +#define EC_PACKET4_0_SEQ_NUM_SHIFT 5 +#define EC_PACKET4_0_SEQ_NUM_MASK 0x60 +#define EC_PACKET4_0_SEQ_DUP_MASK 0x80 + +/* Fields in fields1 byte */ +#define EC_PACKET4_1_COMMAND_VERSION_MASK 0x1f /* (request only) */ +#define EC_PACKET4_1_DATA_CRC_PRESENT_MASK 0x80 /*****************************************************************************/ /* * Notes on commands: * * Each command is an 16-bit command value. Commands which take params or - * return response data specify structs for that data. If no struct is + * return response data specify structures for that data. If no structure is * specified, the command does not input or output data, respectively. * Parameter/response length is implicit in the structs. Some underlying * communication protocols (I2C, SPI) may add length or checksum headers, but * those are implementation-dependent and not defined here. + * + * All commands MUST be #defined to be 4-digit UPPER CASE hex values + * (e.g., 0x00AB, not 0xab) for CONFIG_HOSTCMD_SECTION_SORTED to work. */ /*****************************************************************************/ @@ -522,7 +925,7 @@ struct ec_host_response { * Get protocol version, used to deal with non-backward compatible protocol * changes. */ -#define EC_CMD_PROTO_VERSION 0x00 +#define EC_CMD_PROTO_VERSION 0x0000 /** * struct ec_response_proto_version - Response to the proto version command. @@ -530,13 +933,13 @@ struct ec_host_response { */ struct ec_response_proto_version { uint32_t version; -} __packed; +} __ec_align4; /* * Hello. This is a simple command to test the EC is responsive to * commands. */ -#define EC_CMD_HELLO 0x01 +#define EC_CMD_HELLO 0x0001 /** * struct ec_params_hello - Parameters to the hello command. @@ -544,7 +947,7 @@ struct ec_response_proto_version { */ struct ec_params_hello { uint32_t in_data; -} __packed; +} __ec_align4; /** * struct ec_response_hello - Response to the hello command. @@ -552,10 +955,10 @@ struct ec_params_hello { */ struct ec_response_hello { uint32_t out_data; -} __packed; +} __ec_align4; /* Get version number */ -#define EC_CMD_GET_VERSION 0x02 +#define EC_CMD_GET_VERSION 0x0002 enum ec_current_image { EC_IMAGE_UNKNOWN = 0, @@ -575,10 +978,10 @@ struct ec_response_get_version { char version_string_rw[32]; char reserved[32]; uint32_t current_image; -} __packed; +} __ec_align4; /* Read test */ -#define EC_CMD_READ_TEST 0x03 +#define EC_CMD_READ_TEST 0x0003 /** * struct ec_params_read_test - Parameters for the read test command. @@ -588,7 +991,7 @@ struct ec_response_get_version { struct ec_params_read_test { uint32_t offset; uint32_t size; -} __packed; +} __ec_align4; /** * struct ec_response_read_test - Response to the read test command. @@ -596,17 +999,17 @@ struct ec_params_read_test { */ struct ec_response_read_test { uint32_t data[32]; -} __packed; +} __ec_align4; /* * Get build information * * Response is null-terminated string. */ -#define EC_CMD_GET_BUILD_INFO 0x04 +#define EC_CMD_GET_BUILD_INFO 0x0004 /* Get chip info */ -#define EC_CMD_GET_CHIP_INFO 0x05 +#define EC_CMD_GET_CHIP_INFO 0x0005 /** * struct ec_response_get_chip_info - Response to the get chip info command. @@ -618,10 +1021,10 @@ struct ec_response_get_chip_info { char vendor[32]; char name[32]; char revision[32]; -} __packed; +} __ec_align4; /* Get board HW version */ -#define EC_CMD_GET_BOARD_VERSION 0x06 +#define EC_CMD_GET_BOARD_VERSION 0x0006 /** * struct ec_response_board_version - Response to the board version command. @@ -629,7 +1032,7 @@ struct ec_response_get_chip_info { */ struct ec_response_board_version { uint16_t board_version; -} __packed; +} __ec_align2; /* * Read memory-mapped data. @@ -639,7 +1042,7 @@ struct ec_response_board_version { * * Response is params.size bytes of data. */ -#define EC_CMD_READ_MEMMAP 0x07 +#define EC_CMD_READ_MEMMAP 0x0007 /** * struct ec_params_read_memmap - Parameters for the read memory map command. @@ -649,10 +1052,10 @@ struct ec_response_board_version { struct ec_params_read_memmap { uint8_t offset; uint8_t size; -} __packed; +} __ec_align1; /* Read versions supported for a command */ -#define EC_CMD_GET_CMD_VERSIONS 0x08 +#define EC_CMD_GET_CMD_VERSIONS 0x0008 /** * struct ec_params_get_cmd_versions - Parameters for the get command versions. @@ -660,7 +1063,7 @@ struct ec_params_read_memmap { */ struct ec_params_get_cmd_versions { uint8_t cmd; -} __packed; +} __ec_align1; /** * struct ec_params_get_cmd_versions_v1 - Parameters for the get command @@ -669,7 +1072,7 @@ struct ec_params_get_cmd_versions { */ struct ec_params_get_cmd_versions_v1 { uint16_t cmd; -} __packed; +} __ec_align2; /** * struct ec_response_get_cmd_version - Response to the get command versions. @@ -678,20 +1081,20 @@ struct ec_params_get_cmd_versions_v1 { */ struct ec_response_get_cmd_versions { uint32_t version_mask; -} __packed; +} __ec_align4; /* - * Check EC communcations status (busy). This is needed on i2c/spi but not + * Check EC communications status (busy). This is needed on i2c/spi but not * on lpc since it has its own out-of-band busy indicator. * * lpc must read the status from the command register. Attempting this on * lpc will overwrite the args/parameter space and corrupt its data. */ -#define EC_CMD_GET_COMMS_STATUS 0x09 +#define EC_CMD_GET_COMMS_STATUS 0x0009 /* Avoid using ec_status which is for return values */ enum ec_comms_status { - EC_COMMS_STATUS_PROCESSING = 1 << 0, /* Processing cmd */ + EC_COMMS_STATUS_PROCESSING = BIT(0), /* Processing cmd */ }; /** @@ -701,29 +1104,29 @@ enum ec_comms_status { */ struct ec_response_get_comms_status { uint32_t flags; /* Mask of enum ec_comms_status */ -} __packed; +} __ec_align4; /* Fake a variety of responses, purely for testing purposes. */ -#define EC_CMD_TEST_PROTOCOL 0x0a +#define EC_CMD_TEST_PROTOCOL 0x000A /* Tell the EC what to send back to us. */ struct ec_params_test_protocol { uint32_t ec_result; uint32_t ret_len; uint8_t buf[32]; -} __packed; +} __ec_align4; /* Here it comes... */ struct ec_response_test_protocol { uint8_t buf[32]; -} __packed; +} __ec_align4; -/* Get prococol information */ -#define EC_CMD_GET_PROTOCOL_INFO 0x0b +/* Get protocol information */ +#define EC_CMD_GET_PROTOCOL_INFO 0x000B /* Flags for ec_response_get_protocol_info.flags */ /* EC_RES_IN_PROGRESS may be returned if a command is slow */ -#define EC_PROTOCOL_INFO_IN_PROGRESS_SUPPORTED (1 << 0) +#define EC_PROTOCOL_INFO_IN_PROGRESS_SUPPORTED BIT(0) /** * struct ec_response_get_protocol_info - Response to the get protocol info. @@ -739,7 +1142,7 @@ struct ec_response_get_protocol_info { uint16_t max_request_packet_size; uint16_t max_response_packet_size; uint32_t flags; -} __packed; +} __ec_align4; /*****************************************************************************/ @@ -757,19 +1160,19 @@ struct ec_response_get_protocol_info { struct ec_params_get_set_value { uint32_t flags; uint32_t value; -} __packed; +} __ec_align4; struct ec_response_get_set_value { uint32_t flags; uint32_t value; -} __packed; +} __ec_align4; -/* More than one command can use these structs to get/set paramters. */ -#define EC_CMD_GSV_PAUSE_IN_S5 0x0c +/* More than one command can use these structs to get/set parameters. */ +#define EC_CMD_GSV_PAUSE_IN_S5 0x000C /*****************************************************************************/ /* List the features supported by the firmware */ -#define EC_CMD_GET_FEATURES 0x0d +#define EC_CMD_GET_FEATURES 0x000D /* Supported features */ enum ec_feature_code { @@ -876,24 +1279,36 @@ enum ec_feature_code { EC_FEATURE_REFINED_TABLET_MODE_HYSTERESIS = 37, /* EC supports audio codec. */ EC_FEATURE_AUDIO_CODEC = 38, - /* EC Supports SCP. */ + /* The MCU is a System Companion Processor (SCP). */ EC_FEATURE_SCP = 39, /* The MCU is an Integrated Sensor Hub */ EC_FEATURE_ISH = 40, }; -#define EC_FEATURE_MASK_0(event_code) (1UL << (event_code % 32)) -#define EC_FEATURE_MASK_1(event_code) (1UL << (event_code - 32)) +#define EC_FEATURE_MASK_0(event_code) BIT(event_code % 32) +#define EC_FEATURE_MASK_1(event_code) BIT(event_code - 32) struct ec_response_get_features { uint32_t flags[2]; -} __packed; +} __ec_align4; + +/*****************************************************************************/ +/* Get the board's SKU ID from EC */ +#define EC_CMD_GET_SKU_ID 0x000E + +/* Set SKU ID from AP */ +#define EC_CMD_SET_SKU_ID 0x000F + +struct ec_sku_id_info { + uint32_t sku_id; +} __ec_align4; /*****************************************************************************/ /* Flash commands */ /* Get flash info */ -#define EC_CMD_FLASH_INFO 0x10 +#define EC_CMD_FLASH_INFO 0x0010 +#define EC_VER_FLASH_INFO 2 /** * struct ec_response_flash_info - Response to the flash info command. @@ -912,11 +1327,22 @@ struct ec_response_flash_info { uint32_t write_block_size; uint32_t erase_block_size; uint32_t protect_block_size; -} __packed; +} __ec_align4; -/* Flags for version 1+ flash info command */ -/* EC flash erases bits to 0 instead of 1 */ -#define EC_FLASH_INFO_ERASE_TO_0 (1 << 0) +/* + * Flags for version 1+ flash info command + * EC flash erases bits to 0 instead of 1. + */ +#define EC_FLASH_INFO_ERASE_TO_0 BIT(0) + +/* + * Flash must be selected for read/write/erase operations to succeed. This may + * be necessary on a chip where write/erase can be corrupted by other board + * activity, or where the chip needs to enable some sort of programming voltage, + * or where the read/write/erase operations require cleanly suspending other + * chip functionality. + */ +#define EC_FLASH_INFO_SELECT_REQUIRED BIT(1) /** * struct ec_response_flash_info_1 - Response to the flash info v1 command. @@ -938,7 +1364,14 @@ struct ec_response_flash_info { * fields following. * * gcc anonymous structs don't seem to get along with the __packed directive; - * if they did we'd define the version 0 struct as a sub-struct of this one. + * if they did we'd define the version 0 structure as a sub-structure of this + * one. + * + * Version 2 supports flash banks of different sizes: + * The caller specified the number of banks it has preallocated + * (num_banks_desc) + * The EC returns the number of banks describing the flash memory. + * It adds banks descriptions up to num_banks_desc. */ struct ec_response_flash_info_1 { /* Version 0 fields; see above for description */ @@ -950,14 +1383,50 @@ struct ec_response_flash_info_1 { /* Version 1 adds these fields: */ uint32_t write_ideal_size; uint32_t flags; -} __packed; +} __ec_align4; + +struct ec_params_flash_info_2 { + /* Number of banks to describe */ + uint16_t num_banks_desc; + /* Reserved; set 0; ignore on read */ + uint8_t reserved[2]; +} __ec_align4; + +struct ec_flash_bank { + /* Number of sector is in this bank. */ + uint16_t count; + /* Size in power of 2 of each sector (8 --> 256 bytes) */ + uint8_t size_exp; + /* Minimal write size for the sectors in this bank */ + uint8_t write_size_exp; + /* Erase size for the sectors in this bank */ + uint8_t erase_size_exp; + /* Size for write protection, usually identical to erase size. */ + uint8_t protect_size_exp; + /* Reserved; set 0; ignore on read */ + uint8_t reserved[2]; +}; + +struct ec_response_flash_info_2 { + /* Total flash in the EC. */ + uint32_t flash_size; + /* Flags; see EC_FLASH_INFO_* */ + uint32_t flags; + /* Maximum size to use to send data to write to the EC. */ + uint32_t write_ideal_size; + /* Number of banks present in the EC. */ + uint16_t num_banks_total; + /* Number of banks described in banks array. */ + uint16_t num_banks_desc; + struct ec_flash_bank banks[0]; +} __ec_align4; /* * Read flash * * Response is params.size bytes of data. */ -#define EC_CMD_FLASH_READ 0x11 +#define EC_CMD_FLASH_READ 0x0011 /** * struct ec_params_flash_read - Parameters for the flash read command. @@ -967,10 +1436,10 @@ struct ec_response_flash_info_1 { struct ec_params_flash_read { uint32_t offset; uint32_t size; -} __packed; +} __ec_align4; /* Write flash */ -#define EC_CMD_FLASH_WRITE 0x12 +#define EC_CMD_FLASH_WRITE 0x0012 #define EC_VER_FLASH_WRITE 1 /* Version 0 of the flash command supported only 64 bytes of data */ @@ -985,20 +1454,57 @@ struct ec_params_flash_write { uint32_t offset; uint32_t size; /* Followed by data to write */ -} __packed; +} __ec_align4; /* Erase flash */ -#define EC_CMD_FLASH_ERASE 0x13 +#define EC_CMD_FLASH_ERASE 0x0013 /** - * struct ec_params_flash_erase - Parameters for the flash erase command. + * struct ec_params_flash_erase - Parameters for the flash erase command, v0. * @offset: Byte offset to erase. * @size: Size to erase in bytes. */ struct ec_params_flash_erase { uint32_t offset; uint32_t size; -} __packed; +} __ec_align4; + +/* + * v1 add async erase: + * subcommands can returns: + * EC_RES_SUCCESS : erased (see ERASE_SECTOR_ASYNC case below). + * EC_RES_INVALID_PARAM : offset/size are not aligned on a erase boundary. + * EC_RES_ERROR : other errors. + * EC_RES_BUSY : an existing erase operation is in progress. + * EC_RES_ACCESS_DENIED: Trying to erase running image. + * + * When ERASE_SECTOR_ASYNC returns EC_RES_SUCCESS, the operation is just + * properly queued. The user must call ERASE_GET_RESULT subcommand to get + * the proper result. + * When ERASE_GET_RESULT returns EC_RES_BUSY, the caller must wait and send + * ERASE_GET_RESULT again to get the result of ERASE_SECTOR_ASYNC. + * ERASE_GET_RESULT command may timeout on EC where flash access is not + * permitted while erasing. (For instance, STM32F4). + */ +enum ec_flash_erase_cmd { + FLASH_ERASE_SECTOR, /* Erase and wait for result */ + FLASH_ERASE_SECTOR_ASYNC, /* Erase and return immediately. */ + FLASH_ERASE_GET_RESULT, /* Ask for last erase result */ +}; + +/** + * struct ec_params_flash_erase_v1 - Parameters for the flash erase command, v1. + * @cmd: One of ec_flash_erase_cmd. + * @reserved: Pad byte; currently always contains 0. + * @flag: No flags defined yet; set to 0. + * @params: Same as v0 parameters. + */ +struct ec_params_flash_erase_v1 { + uint8_t cmd; + uint8_t reserved; + uint16_t flag; + struct ec_params_flash_erase params; +} __ec_align4; /* * Get/set flash protection. @@ -1010,31 +1516,40 @@ struct ec_params_flash_erase { * * If mask=0, simply returns the current flags state. */ -#define EC_CMD_FLASH_PROTECT 0x15 +#define EC_CMD_FLASH_PROTECT 0x0015 #define EC_VER_FLASH_PROTECT 1 /* Command version 1 */ /* Flags for flash protection */ /* RO flash code protected when the EC boots */ -#define EC_FLASH_PROTECT_RO_AT_BOOT (1 << 0) +#define EC_FLASH_PROTECT_RO_AT_BOOT BIT(0) /* * RO flash code protected now. If this bit is set, at-boot status cannot * be changed. */ -#define EC_FLASH_PROTECT_RO_NOW (1 << 1) +#define EC_FLASH_PROTECT_RO_NOW BIT(1) /* Entire flash code protected now, until reboot. */ -#define EC_FLASH_PROTECT_ALL_NOW (1 << 2) +#define EC_FLASH_PROTECT_ALL_NOW BIT(2) /* Flash write protect GPIO is asserted now */ -#define EC_FLASH_PROTECT_GPIO_ASSERTED (1 << 3) +#define EC_FLASH_PROTECT_GPIO_ASSERTED BIT(3) /* Error - at least one bank of flash is stuck locked, and cannot be unlocked */ -#define EC_FLASH_PROTECT_ERROR_STUCK (1 << 4) +#define EC_FLASH_PROTECT_ERROR_STUCK BIT(4) /* * Error - flash protection is in inconsistent state. At least one bank of * flash which should be protected is not protected. Usually fixed by * re-requesting the desired flags, or by a hard reset if that fails. */ -#define EC_FLASH_PROTECT_ERROR_INCONSISTENT (1 << 5) -/* Entile flash code protected when the EC boots */ -#define EC_FLASH_PROTECT_ALL_AT_BOOT (1 << 6) +#define EC_FLASH_PROTECT_ERROR_INCONSISTENT BIT(5) +/* Entire flash code protected when the EC boots */ +#define EC_FLASH_PROTECT_ALL_AT_BOOT BIT(6) +/* RW flash code protected when the EC boots */ +#define EC_FLASH_PROTECT_RW_AT_BOOT BIT(7) +/* RW flash code protected now. */ +#define EC_FLASH_PROTECT_RW_NOW BIT(8) +/* Rollback information flash region protected when the EC boots */ +#define EC_FLASH_PROTECT_ROLLBACK_AT_BOOT BIT(9) +/* Rollback information flash region protected now */ +#define EC_FLASH_PROTECT_ROLLBACK_NOW BIT(10) + /** * struct ec_params_flash_protect - Parameters for the flash protect command. @@ -1044,7 +1559,7 @@ struct ec_params_flash_erase { struct ec_params_flash_protect { uint32_t mask; uint32_t flags; -} __packed; +} __ec_align4; /** * struct ec_response_flash_protect - Response to the flash protect command. @@ -1059,7 +1574,7 @@ struct ec_response_flash_protect { uint32_t flags; uint32_t valid_flags; uint32_t writable_flags; -} __packed; +} __ec_align4; /* * Note: commands 0x14 - 0x19 version 0 were old commands to get/set flash @@ -1067,22 +1582,37 @@ struct ec_response_flash_protect { */ /* Get the region offset/size */ -#define EC_CMD_FLASH_REGION_INFO 0x16 +#define EC_CMD_FLASH_REGION_INFO 0x0016 #define EC_VER_FLASH_REGION_INFO 1 enum ec_flash_region { /* Region which holds read-only EC image */ EC_FLASH_REGION_RO = 0, - /* Region which holds rewritable EC image */ - EC_FLASH_REGION_RW, + /* + * Region which holds active RW image. 'Active' is different from + * 'running'. Active means 'scheduled-to-run'. Since RO image always + * scheduled to run, active/non-active applies only to RW images (for + * the same reason 'update' applies only to RW images. It's a state of + * an image on a flash. Running image can be RO, RW_A, RW_B but active + * image can only be RW_A or RW_B. In recovery mode, an active RW image + * doesn't enter 'running' state but it's still active on a flash. + */ + EC_FLASH_REGION_ACTIVE, /* * Region which should be write-protected in the factory (a superset of * EC_FLASH_REGION_RO) */ EC_FLASH_REGION_WP_RO, + /* Region which holds updatable (non-active) RW image */ + EC_FLASH_REGION_UPDATE, /* Number of regions */ EC_FLASH_REGION_COUNT, }; +/* + * 'RW' is vague if there are multiple RW images; we mean the active one, + * so the old constant is deprecated. + */ +#define EC_FLASH_REGION_RW EC_FLASH_REGION_ACTIVE /** * struct ec_params_flash_region_info - Parameters for the flash region info @@ -1091,15 +1621,15 @@ enum ec_flash_region { */ struct ec_params_flash_region_info { uint32_t region; -} __packed; +} __ec_align4; struct ec_response_flash_region_info { uint32_t offset; uint32_t size; -} __packed; +} __ec_align4; /* Read/write VbNvContext */ -#define EC_CMD_VBNV_CONTEXT 0x17 +#define EC_CMD_VBNV_CONTEXT 0x0017 #define EC_VER_VBNV_CONTEXT 1 #define EC_VBNV_BLOCK_SIZE 16 @@ -1111,52 +1641,99 @@ enum ec_vbnvcontext_op { struct ec_params_vbnvcontext { uint32_t op; uint8_t block[EC_VBNV_BLOCK_SIZE]; -} __packed; +} __ec_align4; struct ec_response_vbnvcontext { uint8_t block[EC_VBNV_BLOCK_SIZE]; -} __packed; +} __ec_align4; + + +/* Get SPI flash information */ +#define EC_CMD_FLASH_SPI_INFO 0x0018 + +struct ec_response_flash_spi_info { + /* JEDEC info from command 0x9F (manufacturer, memory type, size) */ + uint8_t jedec[3]; + + /* Pad byte; currently always contains 0 */ + uint8_t reserved0; + + /* Manufacturer / device ID from command 0x90 */ + uint8_t mfr_dev_id[2]; + + /* Status registers from command 0x05 and 0x35 */ + uint8_t sr1, sr2; +} __ec_align1; + + +/* Select flash during flash operations */ +#define EC_CMD_FLASH_SELECT 0x0019 + +/** + * struct ec_params_flash_select - Parameters for the flash select command. + * @select: 1 to select flash, 0 to deselect flash + */ +struct ec_params_flash_select { + uint8_t select; +} __ec_align4; + /*****************************************************************************/ /* PWM commands */ /* Get fan target RPM */ -#define EC_CMD_PWM_GET_FAN_TARGET_RPM 0x20 +#define EC_CMD_PWM_GET_FAN_TARGET_RPM 0x0020 struct ec_response_pwm_get_fan_rpm { uint32_t rpm; -} __packed; +} __ec_align4; /* Set target fan RPM */ -#define EC_CMD_PWM_SET_FAN_TARGET_RPM 0x21 +#define EC_CMD_PWM_SET_FAN_TARGET_RPM 0x0021 + +/* Version 0 of input params */ +struct ec_params_pwm_set_fan_target_rpm_v0 { + uint32_t rpm; +} __ec_align4; -struct ec_params_pwm_set_fan_target_rpm { +/* Version 1 of input params */ +struct ec_params_pwm_set_fan_target_rpm_v1 { uint32_t rpm; -} __packed; + uint8_t fan_idx; +} __ec_align_size1; /* Get keyboard backlight */ -#define EC_CMD_PWM_GET_KEYBOARD_BACKLIGHT 0x22 +/* OBSOLETE - Use EC_CMD_PWM_SET_DUTY */ +#define EC_CMD_PWM_GET_KEYBOARD_BACKLIGHT 0x0022 struct ec_response_pwm_get_keyboard_backlight { uint8_t percent; uint8_t enabled; -} __packed; +} __ec_align1; /* Set keyboard backlight */ -#define EC_CMD_PWM_SET_KEYBOARD_BACKLIGHT 0x23 +/* OBSOLETE - Use EC_CMD_PWM_SET_DUTY */ +#define EC_CMD_PWM_SET_KEYBOARD_BACKLIGHT 0x0023 struct ec_params_pwm_set_keyboard_backlight { uint8_t percent; -} __packed; +} __ec_align1; /* Set target fan PWM duty cycle */ -#define EC_CMD_PWM_SET_FAN_DUTY 0x24 +#define EC_CMD_PWM_SET_FAN_DUTY 0x0024 + +/* Version 0 of input params */ +struct ec_params_pwm_set_fan_duty_v0 { + uint32_t percent; +} __ec_align4; -struct ec_params_pwm_set_fan_duty { +/* Version 1 of input params */ +struct ec_params_pwm_set_fan_duty_v1 { uint32_t percent; -} __packed; + uint8_t fan_idx; +} __ec_align_size1; -#define EC_CMD_PWM_SET_DUTY 0x25 +#define EC_CMD_PWM_SET_DUTY 0x0025 /* 16 bit duty cycle, 0xffff = 100% */ #define EC_PWM_MAX_DUTY 0xffff @@ -1174,18 +1751,18 @@ struct ec_params_pwm_set_duty { uint16_t duty; /* Duty cycle, EC_PWM_MAX_DUTY = 100% */ uint8_t pwm_type; /* ec_pwm_type */ uint8_t index; /* Type-specific index, or 0 if unique */ -} __packed; +} __ec_align4; -#define EC_CMD_PWM_GET_DUTY 0x26 +#define EC_CMD_PWM_GET_DUTY 0x0026 struct ec_params_pwm_get_duty { uint8_t pwm_type; /* ec_pwm_type */ uint8_t index; /* Type-specific index, or 0 if unique */ -} __packed; +} __ec_align1; struct ec_response_pwm_get_duty { uint16_t duty; /* Duty cycle, EC_PWM_MAX_DUTY = 100% */ -} __packed; +} __ec_align2; /*****************************************************************************/ /* @@ -1194,11 +1771,11 @@ struct ec_response_pwm_get_duty { * into a subcommand. We'll make separate structs for subcommands with * different input args, so that we know how much to expect. */ -#define EC_CMD_LIGHTBAR_CMD 0x28 +#define EC_CMD_LIGHTBAR_CMD 0x0028 struct rgb_s { uint8_t r, g, b; -}; +} __ec_todo_unpacked; #define LB_BATTERY_LEVELS 4 @@ -1238,7 +1815,7 @@ struct lightbar_params_v0 { /* Color palette */ struct rgb_s color[8]; /* 0-3 are Google colors */ -} __packed; +} __ec_todo_packed; struct lightbar_params_v1 { /* Timing */ @@ -1251,7 +1828,10 @@ struct lightbar_params_v1 { int32_t s3_sleep_for; int32_t s3_ramp_up; int32_t s3_ramp_down; + int32_t s5_ramp_up; + int32_t s5_ramp_down; int32_t tap_tick_delay; + int32_t tap_gate_delay; int32_t tap_display_time; /* Tap-for-battery params */ @@ -1279,84 +1859,182 @@ struct lightbar_params_v1 { uint8_t s0_idx[2][LB_BATTERY_LEVELS]; /* AP is running */ uint8_t s3_idx[2][LB_BATTERY_LEVELS]; /* AP is sleeping */ + /* s5: single color pulse on inhibited power-up */ + uint8_t s5_idx; + /* Color palette */ struct rgb_s color[8]; /* 0-3 are Google colors */ -} __packed; +} __ec_todo_packed; + +/* Lightbar command params v2 + * crbug.com/467716 + * + * lightbar_parms_v1 was too big for i2c, therefore in v2, we split them up by + * logical groups to make it more manageable ( < 120 bytes). + * + * NOTE: Each of these groups must be less than 120 bytes. + */ + +struct lightbar_params_v2_timing { + /* Timing */ + int32_t google_ramp_up; + int32_t google_ramp_down; + int32_t s3s0_ramp_up; + int32_t s0_tick_delay[2]; /* AC=0/1 */ + int32_t s0a_tick_delay[2]; /* AC=0/1 */ + int32_t s0s3_ramp_down; + int32_t s3_sleep_for; + int32_t s3_ramp_up; + int32_t s3_ramp_down; + int32_t s5_ramp_up; + int32_t s5_ramp_down; + int32_t tap_tick_delay; + int32_t tap_gate_delay; + int32_t tap_display_time; +} __ec_todo_packed; + +struct lightbar_params_v2_tap { + /* Tap-for-battery params */ + uint8_t tap_pct_red; + uint8_t tap_pct_green; + uint8_t tap_seg_min_on; + uint8_t tap_seg_max_on; + uint8_t tap_seg_osc; + uint8_t tap_idx[3]; +} __ec_todo_packed; + +struct lightbar_params_v2_oscillation { + /* Oscillation */ + uint8_t osc_min[2]; /* AC=0/1 */ + uint8_t osc_max[2]; /* AC=0/1 */ + uint8_t w_ofs[2]; /* AC=0/1 */ +} __ec_todo_packed; + +struct lightbar_params_v2_brightness { + /* Brightness limits based on the backlight and AC. */ + uint8_t bright_bl_off_fixed[2]; /* AC=0/1 */ + uint8_t bright_bl_on_min[2]; /* AC=0/1 */ + uint8_t bright_bl_on_max[2]; /* AC=0/1 */ +} __ec_todo_packed; + +struct lightbar_params_v2_thresholds { + /* Battery level thresholds */ + uint8_t battery_threshold[LB_BATTERY_LEVELS - 1]; +} __ec_todo_packed; -/* Lightbar program */ +struct lightbar_params_v2_colors { + /* Map [AC][battery_level] to color index */ + uint8_t s0_idx[2][LB_BATTERY_LEVELS]; /* AP is running */ + uint8_t s3_idx[2][LB_BATTERY_LEVELS]; /* AP is sleeping */ + + /* s5: single color pulse on inhibited power-up */ + uint8_t s5_idx; + + /* Color palette */ + struct rgb_s color[8]; /* 0-3 are Google colors */ +} __ec_todo_packed; + +/* Lightbar program. */ #define EC_LB_PROG_LEN 192 struct lightbar_program { uint8_t size; uint8_t data[EC_LB_PROG_LEN]; -}; +} __ec_todo_unpacked; struct ec_params_lightbar { uint8_t cmd; /* Command (see enum lightbar_command) */ union { - struct { - /* no args */ - } dump, off, on, init, get_seq, get_params_v0, get_params_v1, - version, get_brightness, get_demo, suspend, resume; + /* + * The following commands have no args: + * + * dump, off, on, init, get_seq, get_params_v0, get_params_v1, + * version, get_brightness, get_demo, suspend, resume, + * get_params_v2_timing, get_params_v2_tap, get_params_v2_osc, + * get_params_v2_bright, get_params_v2_thlds, + * get_params_v2_colors + * + * Don't use an empty struct, because C++ hates that. + */ - struct { + struct __ec_todo_unpacked { uint8_t num; } set_brightness, seq, demo; - struct { + struct __ec_todo_unpacked { uint8_t ctrl, reg, value; } reg; - struct { + struct __ec_todo_unpacked { uint8_t led, red, green, blue; } set_rgb; - struct { + struct __ec_todo_unpacked { uint8_t led; } get_rgb; - struct { + struct __ec_todo_unpacked { uint8_t enable; } manual_suspend_ctrl; struct lightbar_params_v0 set_params_v0; struct lightbar_params_v1 set_params_v1; + + struct lightbar_params_v2_timing set_v2par_timing; + struct lightbar_params_v2_tap set_v2par_tap; + struct lightbar_params_v2_oscillation set_v2par_osc; + struct lightbar_params_v2_brightness set_v2par_bright; + struct lightbar_params_v2_thresholds set_v2par_thlds; + struct lightbar_params_v2_colors set_v2par_colors; + struct lightbar_program set_program; }; -} __packed; +} __ec_todo_packed; struct ec_response_lightbar { union { - struct { - struct { + struct __ec_todo_unpacked { + struct __ec_todo_unpacked { uint8_t reg; uint8_t ic0; uint8_t ic1; } vals[23]; } dump; - struct { + struct __ec_todo_unpacked { uint8_t num; } get_seq, get_brightness, get_demo; struct lightbar_params_v0 get_params_v0; struct lightbar_params_v1 get_params_v1; - struct { + + struct lightbar_params_v2_timing get_params_v2_timing; + struct lightbar_params_v2_tap get_params_v2_tap; + struct lightbar_params_v2_oscillation get_params_v2_osc; + struct lightbar_params_v2_brightness get_params_v2_bright; + struct lightbar_params_v2_thresholds get_params_v2_thlds; + struct lightbar_params_v2_colors get_params_v2_colors; + + struct __ec_todo_unpacked { uint32_t num; uint32_t flags; } version; - struct { + struct __ec_todo_unpacked { uint8_t red, green, blue; } get_rgb; - struct { - /* no return params */ - } off, on, init, set_brightness, seq, reg, set_rgb, - demo, set_params_v0, set_params_v1, - set_program, manual_suspend_ctrl, suspend, resume; + /* + * The following commands have no response: + * + * off, on, init, set_brightness, seq, reg, set_rgb, demo, + * set_params_v0, set_params_v1, set_program, + * manual_suspend_ctrl, suspend, resume, set_v2par_timing, + * set_v2par_tap, set_v2par_osc, set_v2par_bright, + * set_v2par_thlds, set_v2par_colors + */ }; -} __packed; +} __ec_todo_packed; /* Lightbar commands */ enum lightbar_command { @@ -1382,13 +2060,25 @@ enum lightbar_command { LIGHTBAR_CMD_MANUAL_SUSPEND_CTRL = 19, LIGHTBAR_CMD_SUSPEND = 20, LIGHTBAR_CMD_RESUME = 21, + LIGHTBAR_CMD_GET_PARAMS_V2_TIMING = 22, + LIGHTBAR_CMD_SET_PARAMS_V2_TIMING = 23, + LIGHTBAR_CMD_GET_PARAMS_V2_TAP = 24, + LIGHTBAR_CMD_SET_PARAMS_V2_TAP = 25, + LIGHTBAR_CMD_GET_PARAMS_V2_OSCILLATION = 26, + LIGHTBAR_CMD_SET_PARAMS_V2_OSCILLATION = 27, + LIGHTBAR_CMD_GET_PARAMS_V2_BRIGHTNESS = 28, + LIGHTBAR_CMD_SET_PARAMS_V2_BRIGHTNESS = 29, + LIGHTBAR_CMD_GET_PARAMS_V2_THRESHOLDS = 30, + LIGHTBAR_CMD_SET_PARAMS_V2_THRESHOLDS = 31, + LIGHTBAR_CMD_GET_PARAMS_V2_COLORS = 32, + LIGHTBAR_CMD_SET_PARAMS_V2_COLORS = 33, LIGHTBAR_NUM_CMDS }; /*****************************************************************************/ /* LED control commands */ -#define EC_CMD_LED_CONTROL 0x29 +#define EC_CMD_LED_CONTROL 0x0029 enum ec_led_id { /* LED to indicate battery state of charge */ @@ -1400,13 +2090,21 @@ enum ec_led_id { EC_LED_ID_POWER_LED, /* LED on power adapter or its plug */ EC_LED_ID_ADAPTER_LED, + /* LED to indicate left side */ + EC_LED_ID_LEFT_LED, + /* LED to indicate right side */ + EC_LED_ID_RIGHT_LED, + /* LED to indicate recovery mode with HW_REINIT */ + EC_LED_ID_RECOVERY_HW_REINIT_LED, + /* LED to indicate sysrq debug mode. */ + EC_LED_ID_SYSRQ_DEBUG_LED, EC_LED_ID_COUNT }; /* LED control flags */ -#define EC_LED_FLAGS_QUERY (1 << 0) /* Query LED capability only */ -#define EC_LED_FLAGS_AUTO (1 << 1) /* Switch LED back to automatic control */ +#define EC_LED_FLAGS_QUERY BIT(0) /* Query LED capability only */ +#define EC_LED_FLAGS_AUTO BIT(1) /* Switch LED back to automatic control */ enum ec_led_colors { EC_LED_COLOR_RED = 0, @@ -1414,6 +2112,7 @@ enum ec_led_colors { EC_LED_COLOR_BLUE, EC_LED_COLOR_YELLOW, EC_LED_COLOR_WHITE, + EC_LED_COLOR_AMBER, EC_LED_COLOR_COUNT }; @@ -1423,7 +2122,7 @@ struct ec_params_led_control { uint8_t flags; /* Control flags */ uint8_t brightness[EC_LED_COLOR_COUNT]; -} __packed; +} __ec_align1; struct ec_response_led_control { /* @@ -1434,7 +2133,7 @@ struct ec_response_led_control { * Other values means the LED is control by PWM. */ uint8_t brightness_range[EC_LED_COLOR_COUNT]; -} __packed; +} __ec_align1; /*****************************************************************************/ /* Verified boot commands */ @@ -1445,7 +2144,7 @@ struct ec_response_led_control { */ /* Verified boot hash command */ -#define EC_CMD_VBOOT_HASH 0x2A +#define EC_CMD_VBOOT_HASH 0x002A struct ec_params_vboot_hash { uint8_t cmd; /* enum ec_vboot_hash_cmd */ @@ -1455,7 +2154,7 @@ struct ec_params_vboot_hash { uint32_t offset; /* Offset in flash to hash */ uint32_t size; /* Number of bytes to hash */ uint8_t nonce_data[64]; /* Nonce data; ignored if nonce_size=0 */ -} __packed; +} __ec_align4; struct ec_response_vboot_hash { uint8_t status; /* enum ec_vboot_hash_status */ @@ -1465,7 +2164,7 @@ struct ec_response_vboot_hash { uint32_t offset; /* Offset in flash which was hashed */ uint32_t size; /* Number of bytes hashed */ uint8_t hash_digest[64]; /* Hash digest data */ -} __packed; +} __ec_align4; enum ec_vboot_hash_cmd { EC_VBOOT_HASH_GET = 0, /* Get current hash status */ @@ -1489,15 +2188,22 @@ enum ec_vboot_hash_status { * If one of these is specified, the EC will automatically update offset and * size to the correct values for the specified image (RO or RW). */ -#define EC_VBOOT_HASH_OFFSET_RO 0xfffffffe -#define EC_VBOOT_HASH_OFFSET_RW 0xfffffffd +#define EC_VBOOT_HASH_OFFSET_RO 0xfffffffe +#define EC_VBOOT_HASH_OFFSET_ACTIVE 0xfffffffd +#define EC_VBOOT_HASH_OFFSET_UPDATE 0xfffffffc + +/* + * 'RW' is vague if there are multiple RW images; we mean the active one, + * so the old constant is deprecated. + */ +#define EC_VBOOT_HASH_OFFSET_RW EC_VBOOT_HASH_OFFSET_ACTIVE /*****************************************************************************/ /* * Motion sense commands. We'll make separate structs for sub-commands with * different input args, so that we know how much to expect. */ -#define EC_CMD_MOTION_SENSE_CMD 0x2B +#define EC_CMD_MOTION_SENSE_CMD 0x002B /* Motion sense commands */ enum motionsense_command { @@ -1516,7 +2222,13 @@ enum motionsense_command { /* * EC Rate command is a setter/getter command for the EC sampling rate - * of all motion sensors in milliseconds. + * in milliseconds. + * It is per sensor, the EC run sample task at the minimum of all + * sensors EC_RATE. + * For sensors without hardware FIFO, EC_RATE should be equals to 1/ODR + * to collect all the sensor samples. + * For sensor with hardware FIFO, EC_RATE is used as the maximal delay + * to process of all motion sensors in milliseconds. */ MOTIONSENSE_CMD_EC_RATE = 2, @@ -1547,32 +2259,76 @@ enum motionsense_command { MOTIONSENSE_CMD_DATA = 6, /* - * Perform low level calibration.. On sensors that support it, ask to - * do offset calibration. + * Return sensor fifo info. + */ + MOTIONSENSE_CMD_FIFO_INFO = 7, + + /* + * Insert a flush element in the fifo and return sensor fifo info. + * The host can use that element to synchronize its operation. + */ + MOTIONSENSE_CMD_FIFO_FLUSH = 8, + + /* + * Return a portion of the fifo. + */ + MOTIONSENSE_CMD_FIFO_READ = 9, + + /* + * Perform low level calibration. + * On sensors that support it, ask to do offset calibration. */ MOTIONSENSE_CMD_PERFORM_CALIB = 10, /* - * Sensor Offset command is a setter/getter command for the offset used - * for calibration. The offsets can be calculated by the host, or via + * Sensor Offset command is a setter/getter command for the offset + * used for calibration. + * The offsets can be calculated by the host, or via * PERFORM_CALIB command. */ MOTIONSENSE_CMD_SENSOR_OFFSET = 11, - /* Number of motionsense sub-commands. */ - MOTIONSENSE_NUM_CMDS -}; + /* + * List available activities for a MOTION sensor. + * Indicates if they are enabled or disabled. + */ + MOTIONSENSE_CMD_LIST_ACTIVITIES = 12, + + /* + * Activity management + * Enable/Disable activity recognition. + */ + MOTIONSENSE_CMD_SET_ACTIVITY = 13, + + /* + * Lid Angle + */ + MOTIONSENSE_CMD_LID_ANGLE = 14, + + /* + * Allow the FIFO to trigger interrupt via MKBP events. + * By default the FIFO does not send interrupt to process the FIFO + * until the AP is ready or it is coming from a wakeup sensor. + */ + MOTIONSENSE_CMD_FIFO_INT_ENABLE = 15, + + /* + * Spoof the readings of the sensors. The spoofed readings can be set + * to arbitrary values, or will lock to the last read actual values. + */ + MOTIONSENSE_CMD_SPOOF = 16, -enum motionsensor_id { - EC_MOTION_SENSOR_ACCEL_BASE = 0, - EC_MOTION_SENSOR_ACCEL_LID = 1, - EC_MOTION_SENSOR_GYRO = 2, + /* Set lid angle for tablet mode detection. */ + MOTIONSENSE_CMD_TABLET_MODE_LID_ANGLE = 17, /* - * Note, if more sensors are added and this count changes, the padding - * in ec_response_motion_sense dump command must be modified. + * Sensor Scale command is a setter/getter command for the calibration + * scale. */ - EC_MOTION_SENSOR_COUNT = 3 + MOTIONSENSE_CMD_SENSOR_SCALE = 18, + + /* Number of motionsense sub-commands. */ + MOTIONSENSE_NUM_CMDS }; /* List of motion sensor types. */ @@ -1584,6 +2340,7 @@ enum motionsensor_type { MOTIONSENSE_TYPE_LIGHT = 4, MOTIONSENSE_TYPE_ACTIVITY = 5, MOTIONSENSE_TYPE_BARO = 6, + MOTIONSENSE_TYPE_SYNC = 7, MOTIONSENSE_TYPE_MAX, }; @@ -1591,19 +2348,116 @@ enum motionsensor_type { enum motionsensor_location { MOTIONSENSE_LOC_BASE = 0, MOTIONSENSE_LOC_LID = 1, + MOTIONSENSE_LOC_CAMERA = 2, MOTIONSENSE_LOC_MAX, }; /* List of motion sensor chips. */ enum motionsensor_chip { MOTIONSENSE_CHIP_KXCJ9 = 0, + MOTIONSENSE_CHIP_LSM6DS0 = 1, + MOTIONSENSE_CHIP_BMI160 = 2, + MOTIONSENSE_CHIP_SI1141 = 3, + MOTIONSENSE_CHIP_SI1142 = 4, + MOTIONSENSE_CHIP_SI1143 = 5, + MOTIONSENSE_CHIP_KX022 = 6, + MOTIONSENSE_CHIP_L3GD20H = 7, + MOTIONSENSE_CHIP_BMA255 = 8, + MOTIONSENSE_CHIP_BMP280 = 9, + MOTIONSENSE_CHIP_OPT3001 = 10, + MOTIONSENSE_CHIP_BH1730 = 11, + MOTIONSENSE_CHIP_GPIO = 12, + MOTIONSENSE_CHIP_LIS2DH = 13, + MOTIONSENSE_CHIP_LSM6DSM = 14, + MOTIONSENSE_CHIP_LIS2DE = 15, + MOTIONSENSE_CHIP_LIS2MDL = 16, + MOTIONSENSE_CHIP_LSM6DS3 = 17, + MOTIONSENSE_CHIP_LSM6DSO = 18, + MOTIONSENSE_CHIP_LNG2DM = 19, + MOTIONSENSE_CHIP_MAX, +}; + +/* List of orientation positions */ +enum motionsensor_orientation { + MOTIONSENSE_ORIENTATION_LANDSCAPE = 0, + MOTIONSENSE_ORIENTATION_PORTRAIT = 1, + MOTIONSENSE_ORIENTATION_UPSIDE_DOWN_PORTRAIT = 2, + MOTIONSENSE_ORIENTATION_UPSIDE_DOWN_LANDSCAPE = 3, + MOTIONSENSE_ORIENTATION_UNKNOWN = 4, +}; + +struct ec_response_motion_sensor_data { + /* Flags for each sensor. */ + uint8_t flags; + /* Sensor number the data comes from. */ + uint8_t sensor_num; + /* Each sensor is up to 3-axis. */ + union { + int16_t data[3]; + struct __ec_todo_packed { + uint16_t reserved; + uint32_t timestamp; + }; + struct __ec_todo_unpacked { + uint8_t activity; /* motionsensor_activity */ + uint8_t state; + int16_t add_info[2]; + }; + }; +} __ec_todo_packed; + +/* Note: used in ec_response_get_next_data */ +struct ec_response_motion_sense_fifo_info { + /* Size of the fifo */ + uint16_t size; + /* Amount of space used in the fifo */ + uint16_t count; + /* Timestamp recorded in us. + * aka accurate timestamp when host event was triggered. + */ + uint32_t timestamp; + /* Total amount of vector lost */ + uint16_t total_lost; + /* Lost events since the last fifo_info, per sensors */ + uint16_t lost[0]; +} __ec_todo_packed; + +struct ec_response_motion_sense_fifo_data { + uint32_t number_data; + struct ec_response_motion_sensor_data data[0]; +} __ec_todo_packed; + +/* List supported activity recognition */ +enum motionsensor_activity { + MOTIONSENSE_ACTIVITY_RESERVED = 0, + MOTIONSENSE_ACTIVITY_SIG_MOTION = 1, + MOTIONSENSE_ACTIVITY_DOUBLE_TAP = 2, + MOTIONSENSE_ACTIVITY_ORIENTATION = 3, }; +struct ec_motion_sense_activity { + uint8_t sensor_num; + uint8_t activity; /* one of enum motionsensor_activity */ + uint8_t enable; /* 1: enable, 0: disable */ + uint8_t reserved; + uint16_t parameters[3]; /* activity dependent parameters */ +} __ec_todo_unpacked; + /* Module flag masks used for the dump sub-command. */ -#define MOTIONSENSE_MODULE_FLAG_ACTIVE (1<<0) +#define MOTIONSENSE_MODULE_FLAG_ACTIVE BIT(0) /* Sensor flag masks used for the dump sub-command. */ -#define MOTIONSENSE_SENSOR_FLAG_PRESENT (1<<0) +#define MOTIONSENSE_SENSOR_FLAG_PRESENT BIT(0) + +/* + * Flush entry for synchronization. + * data contains time stamp + */ +#define MOTIONSENSE_SENSOR_FLAG_FLUSH BIT(0) +#define MOTIONSENSE_SENSOR_FLAG_TIMESTAMP BIT(1) +#define MOTIONSENSE_SENSOR_FLAG_WAKEUP BIT(2) +#define MOTIONSENSE_SENSOR_FLAG_TABLET_MODE BIT(3) +#define MOTIONSENSE_SENSOR_FLAG_ODR BIT(4) /* * Send this value for the data element to only perform a read. If you @@ -1614,48 +2468,79 @@ enum motionsensor_chip { #define EC_MOTION_SENSE_INVALID_CALIB_TEMP 0x8000 +/* MOTIONSENSE_CMD_SENSOR_OFFSET subcommand flag */ /* Set Calibration information */ -#define MOTION_SENSE_SET_OFFSET 1 +#define MOTION_SENSE_SET_OFFSET BIT(0) -struct ec_response_motion_sensor_data { - /* Flags for each sensor. */ - uint8_t flags; - /* Sensor number the data comes from */ - uint8_t sensor_num; - /* Each sensor is up to 3-axis. */ - union { - int16_t data[3]; - struct { - uint16_t rsvd; - uint32_t timestamp; - } __packed; - struct { - uint8_t activity; /* motionsensor_activity */ - uint8_t state; - int16_t add_info[2]; - }; - }; -} __packed; +/* Default Scale value, factor 1. */ +#define MOTION_SENSE_DEFAULT_SCALE BIT(15) + +#define LID_ANGLE_UNRELIABLE 500 + +enum motionsense_spoof_mode { + /* Disable spoof mode. */ + MOTIONSENSE_SPOOF_MODE_DISABLE = 0, + + /* Enable spoof mode, but use provided component values. */ + MOTIONSENSE_SPOOF_MODE_CUSTOM, + + /* Enable spoof mode, but use the current sensor values. */ + MOTIONSENSE_SPOOF_MODE_LOCK_CURRENT, + + /* Query the current spoof mode status for the sensor. */ + MOTIONSENSE_SPOOF_MODE_QUERY, +}; struct ec_params_motion_sense { uint8_t cmd; union { /* Used for MOTIONSENSE_CMD_DUMP. */ - struct { - /* no args */ + struct __ec_todo_unpacked { + /* + * Maximal number of sensor the host is expecting. + * 0 means the host is only interested in the number + * of sensors controlled by the EC. + */ + uint8_t max_sensor_count; } dump; /* - * Used for MOTIONSENSE_CMD_EC_RATE and - * MOTIONSENSE_CMD_KB_WAKE_ANGLE. + * Used for MOTIONSENSE_CMD_KB_WAKE_ANGLE. */ - struct { - /* Data to set or EC_MOTION_SENSE_NO_VALUE to read. */ + struct __ec_todo_unpacked { + /* Data to set or EC_MOTION_SENSE_NO_VALUE to read. + * kb_wake_angle: angle to wakup AP. + */ int16_t data; - } ec_rate, kb_wake_angle; + } kb_wake_angle; + + /* + * Used for MOTIONSENSE_CMD_INFO, MOTIONSENSE_CMD_DATA + * and MOTIONSENSE_CMD_PERFORM_CALIB. + */ + struct __ec_todo_unpacked { + uint8_t sensor_num; + } info, info_3, data, fifo_flush, perform_calib, + list_activities; + + /* + * Used for MOTIONSENSE_CMD_EC_RATE, MOTIONSENSE_CMD_SENSOR_ODR + * and MOTIONSENSE_CMD_SENSOR_RANGE. + */ + struct __ec_todo_unpacked { + uint8_t sensor_num; + + /* Rounding flag, true for round-up, false for down. */ + uint8_t roundup; + + uint16_t reserved; + + /* Data to set or EC_MOTION_SENSE_NO_VALUE to read. */ + int32_t data; + } ec_rate, sensor_odr, sensor_range; /* Used for MOTIONSENSE_CMD_SENSOR_OFFSET */ - struct { + struct __ec_todo_packed { uint8_t sensor_num; /* @@ -1681,36 +2566,102 @@ struct ec_params_motion_sense { * Compass: 1/16 uT */ int16_t offset[3]; - } __packed sensor_offset; + } sensor_offset; - /* Used for MOTIONSENSE_CMD_INFO. */ - struct { + /* Used for MOTIONSENSE_CMD_SENSOR_SCALE */ + struct __ec_todo_packed { uint8_t sensor_num; - } info; - /* - * Used for MOTIONSENSE_CMD_SENSOR_ODR and - * MOTIONSENSE_CMD_SENSOR_RANGE. - */ - struct { - /* Should be element of enum motionsensor_id. */ - uint8_t sensor_num; + /* + * bit 0: If set (MOTION_SENSE_SET_OFFSET), set + * the calibration information in the EC. + * If unset, just retrieve calibration information. + */ + uint16_t flags; - /* Rounding flag, true for round-up, false for down. */ - uint8_t roundup; + /* + * Temperature at calibration, in units of 0.01 C + * 0x8000: invalid / unknown. + * 0x0: 0C + * 0x7fff: +327.67C + */ + int16_t temp; - uint16_t reserved; + /* + * Scale for calibration: + * By default scale is 1, it is encoded on 16bits: + * 1 = BIT(15) + * ~2 = 0xFFFF + * ~0 = 0. + */ + uint16_t scale[3]; + } sensor_scale; - /* Data to set or EC_MOTION_SENSE_NO_VALUE to read. */ - int32_t data; - } sensor_odr, sensor_range; + + /* Used for MOTIONSENSE_CMD_FIFO_INFO */ + /* (no params) */ + + /* Used for MOTIONSENSE_CMD_FIFO_READ */ + struct __ec_todo_unpacked { + /* + * Number of expected vector to return. + * EC may return less or 0 if none available. + */ + uint32_t max_data_vector; + } fifo_read; + + struct ec_motion_sense_activity set_activity; + + /* Used for MOTIONSENSE_CMD_LID_ANGLE */ + /* (no params) */ + + /* Used for MOTIONSENSE_CMD_FIFO_INT_ENABLE */ + struct __ec_todo_unpacked { + /* + * 1: enable, 0 disable fifo, + * EC_MOTION_SENSE_NO_VALUE return value. + */ + int8_t enable; + } fifo_int_enable; + + /* Used for MOTIONSENSE_CMD_SPOOF */ + struct __ec_todo_packed { + uint8_t sensor_id; + + /* See enum motionsense_spoof_mode. */ + uint8_t spoof_enable; + + /* Ignored, used for alignment. */ + uint8_t reserved; + + /* Individual component values to spoof. */ + int16_t components[3]; + } spoof; + + /* Used for MOTIONSENSE_CMD_TABLET_MODE_LID_ANGLE. */ + struct __ec_todo_unpacked { + /* + * Lid angle threshold for switching between tablet and + * clamshell mode. + */ + int16_t lid_angle; + + /* + * Hysteresis degree to prevent fluctuations between + * clamshell and tablet mode if lid angle keeps + * changing around the threshold. Lid motion driver will + * use lid_angle + hys_degree to trigger tablet mode and + * lid_angle - hys_degree to trigger clamshell mode. + */ + int16_t hys_degree; + } tablet_mode_threshold; }; -} __packed; +} __ec_todo_packed; struct ec_response_motion_sense { union { - /* Used for MOTIONSENSE_CMD_DUMP. */ - struct { + /* Used for MOTIONSENSE_CMD_DUMP */ + struct __ec_todo_unpacked { /* Flags representing the motion sensor module. */ uint8_t module_flags; @@ -1725,7 +2676,7 @@ struct ec_response_motion_sense { } dump; /* Used for MOTIONSENSE_CMD_INFO. */ - struct { + struct __ec_todo_unpacked { /* Should be element of enum motionsensor_type. */ uint8_t type; @@ -1736,37 +2687,129 @@ struct ec_response_motion_sense { uint8_t chip; } info; + /* Used for MOTIONSENSE_CMD_INFO version 3 */ + struct __ec_todo_unpacked { + /* Should be element of enum motionsensor_type. */ + uint8_t type; + + /* Should be element of enum motionsensor_location. */ + uint8_t location; + + /* Should be element of enum motionsensor_chip. */ + uint8_t chip; + + /* Minimum sensor sampling frequency */ + uint32_t min_frequency; + + /* Maximum sensor sampling frequency */ + uint32_t max_frequency; + + /* Max number of sensor events that could be in fifo */ + uint32_t fifo_max_event_count; + } info_3; + /* Used for MOTIONSENSE_CMD_DATA */ struct ec_response_motion_sensor_data data; /* * Used for MOTIONSENSE_CMD_EC_RATE, MOTIONSENSE_CMD_SENSOR_ODR, - * MOTIONSENSE_CMD_SENSOR_RANGE, and - * MOTIONSENSE_CMD_KB_WAKE_ANGLE. + * MOTIONSENSE_CMD_SENSOR_RANGE, + * MOTIONSENSE_CMD_KB_WAKE_ANGLE, + * MOTIONSENSE_CMD_FIFO_INT_ENABLE and + * MOTIONSENSE_CMD_SPOOF. */ - struct { + struct __ec_todo_unpacked { /* Current value of the parameter queried. */ int32_t ret; - } ec_rate, sensor_odr, sensor_range, kb_wake_angle; + } ec_rate, sensor_odr, sensor_range, kb_wake_angle, + fifo_int_enable, spoof; - /* Used for MOTIONSENSE_CMD_SENSOR_OFFSET */ - struct { + /* + * Used for MOTIONSENSE_CMD_SENSOR_OFFSET, + * PERFORM_CALIB. + */ + struct __ec_todo_unpacked { int16_t temp; int16_t offset[3]; } sensor_offset, perform_calib; + + /* Used for MOTIONSENSE_CMD_SENSOR_SCALE */ + struct __ec_todo_unpacked { + int16_t temp; + uint16_t scale[3]; + } sensor_scale; + + struct ec_response_motion_sense_fifo_info fifo_info, fifo_flush; + + struct ec_response_motion_sense_fifo_data fifo_read; + + struct __ec_todo_packed { + uint16_t reserved; + uint32_t enabled; + uint32_t disabled; + } list_activities; + + /* No params for set activity */ + + /* Used for MOTIONSENSE_CMD_LID_ANGLE */ + struct __ec_todo_unpacked { + /* + * Angle between 0 and 360 degree if available, + * LID_ANGLE_UNRELIABLE otherwise. + */ + uint16_t value; + } lid_angle; + + /* Used for MOTIONSENSE_CMD_TABLET_MODE_LID_ANGLE. */ + struct __ec_todo_unpacked { + /* + * Lid angle threshold for switching between tablet and + * clamshell mode. + */ + uint16_t lid_angle; + + /* Hysteresis degree. */ + uint16_t hys_degree; + } tablet_mode_threshold; + }; -} __packed; +} __ec_todo_packed; + +/*****************************************************************************/ +/* Force lid open command */ + +/* Make lid event always open */ +#define EC_CMD_FORCE_LID_OPEN 0x002C + +struct ec_params_force_lid_open { + uint8_t enabled; +} __ec_align1; + +/*****************************************************************************/ +/* Configure the behavior of the power button */ +#define EC_CMD_CONFIG_POWER_BUTTON 0x002D + +enum ec_config_power_button_flags { + /* Enable/Disable power button pulses for x86 devices */ + EC_POWER_BUTTON_ENABLE_PULSE = BIT(0), +}; + +struct ec_params_config_power_button { + /* See enum ec_config_power_button_flags */ + uint8_t flags; +} __ec_align1; /*****************************************************************************/ /* USB charging control commands */ /* Set USB port charging mode */ -#define EC_CMD_USB_CHARGE_SET_MODE 0x30 +#define EC_CMD_USB_CHARGE_SET_MODE 0x0030 struct ec_params_usb_charge_set_mode { uint8_t usb_port_id; - uint8_t mode; -} __packed; + uint8_t mode:7; + uint8_t inhibit_charge:1; +} __ec_align1; /*****************************************************************************/ /* Persistent storage for host */ @@ -1775,35 +2818,35 @@ struct ec_params_usb_charge_set_mode { #define EC_PSTORE_SIZE_MAX 64 /* Get persistent storage info */ -#define EC_CMD_PSTORE_INFO 0x40 +#define EC_CMD_PSTORE_INFO 0x0040 struct ec_response_pstore_info { /* Persistent storage size, in bytes */ uint32_t pstore_size; /* Access size; read/write offset and size must be a multiple of this */ uint32_t access_size; -} __packed; +} __ec_align4; /* * Read persistent storage * * Response is params.size bytes of data. */ -#define EC_CMD_PSTORE_READ 0x41 +#define EC_CMD_PSTORE_READ 0x0041 struct ec_params_pstore_read { uint32_t offset; /* Byte offset to read */ uint32_t size; /* Size to read in bytes */ -} __packed; +} __ec_align4; /* Write persistent storage */ -#define EC_CMD_PSTORE_WRITE 0x42 +#define EC_CMD_PSTORE_WRITE 0x0042 struct ec_params_pstore_write { uint32_t offset; /* Byte offset to write */ uint32_t size; /* Size to write in bytes */ uint8_t data[EC_PSTORE_SIZE_MAX]; -} __packed; +} __ec_align4; /*****************************************************************************/ /* Real-time clock */ @@ -1811,21 +2854,21 @@ struct ec_params_pstore_write { /* RTC params and response structures */ struct ec_params_rtc { uint32_t time; -} __packed; +} __ec_align4; struct ec_response_rtc { uint32_t time; -} __packed; +} __ec_align4; /* These use ec_response_rtc */ -#define EC_CMD_RTC_GET_VALUE 0x44 -#define EC_CMD_RTC_GET_ALARM 0x45 +#define EC_CMD_RTC_GET_VALUE 0x0044 +#define EC_CMD_RTC_GET_ALARM 0x0045 /* These all use ec_params_rtc */ -#define EC_CMD_RTC_SET_VALUE 0x46 -#define EC_CMD_RTC_SET_ALARM 0x47 +#define EC_CMD_RTC_SET_VALUE 0x0046 +#define EC_CMD_RTC_SET_ALARM 0x0047 -/* Pass as param to SET_ALARM to clear the current alarm */ +/* Pass as time param to SET_ALARM to clear the current alarm */ #define EC_RTC_ALARM_CLEAR 0 /*****************************************************************************/ @@ -1835,8 +2878,8 @@ struct ec_response_rtc { #define EC_PORT80_SIZE_MAX 32 /* Get last port80 code from previous boot */ -#define EC_CMD_PORT80_LAST_BOOT 0x48 -#define EC_CMD_PORT80_READ 0x48 +#define EC_CMD_PORT80_LAST_BOOT 0x0048 +#define EC_CMD_PORT80_READ 0x0048 enum ec_port80_subcmd { EC_PORT80_GET_INFO = 0, @@ -1846,29 +2889,72 @@ enum ec_port80_subcmd { struct ec_params_port80_read { uint16_t subcmd; union { - struct { + struct __ec_todo_unpacked { uint32_t offset; uint32_t num_entries; } read_buffer; }; -} __packed; +} __ec_todo_packed; struct ec_response_port80_read { union { - struct { + struct __ec_todo_unpacked { uint32_t writes; uint32_t history_size; uint32_t last_boot; } get_info; - struct { + struct __ec_todo_unpacked { uint16_t codes[EC_PORT80_SIZE_MAX]; } data; }; -} __packed; +} __ec_todo_packed; struct ec_response_port80_last_boot { uint16_t code; -} __packed; +} __ec_align2; + +/*****************************************************************************/ +/* Temporary secure storage for host verified boot use */ + +/* Number of bytes in a vstore slot */ +#define EC_VSTORE_SLOT_SIZE 64 + +/* Maximum number of vstore slots */ +#define EC_VSTORE_SLOT_MAX 32 + +/* Get persistent storage info */ +#define EC_CMD_VSTORE_INFO 0x0049 +struct ec_response_vstore_info { + /* Indicates which slots are locked */ + uint32_t slot_locked; + /* Total number of slots available */ + uint8_t slot_count; +} __ec_align_size1; + +/* + * Read temporary secure storage + * + * Response is EC_VSTORE_SLOT_SIZE bytes of data. + */ +#define EC_CMD_VSTORE_READ 0x004A + +struct ec_params_vstore_read { + uint8_t slot; /* Slot to read from */ +} __ec_align1; + +struct ec_response_vstore_read { + uint8_t data[EC_VSTORE_SLOT_SIZE]; +} __ec_align1; + +/* + * Write temporary secure storage and lock it. + */ +#define EC_CMD_VSTORE_WRITE 0x004B + +struct ec_params_vstore_write { + uint8_t slot; /* Slot to write to */ + uint8_t data[EC_VSTORE_SLOT_SIZE]; +} __ec_align1; /*****************************************************************************/ /* Thermal engine commands. Note that there are two implementations. We'll @@ -1877,8 +2963,8 @@ struct ec_response_port80_last_boot { * Version 1 separates the CPU thermal limits from the fan control. */ -#define EC_CMD_THERMAL_SET_THRESHOLD 0x50 -#define EC_CMD_THERMAL_GET_THRESHOLD 0x51 +#define EC_CMD_THERMAL_SET_THRESHOLD 0x0050 +#define EC_CMD_THERMAL_GET_THRESHOLD 0x0051 /* The version 0 structs are opaque. You have to know what they are for * the get/set commands to make any sense. @@ -1889,17 +2975,17 @@ struct ec_params_thermal_set_threshold { uint8_t sensor_type; uint8_t threshold_id; uint16_t value; -} __packed; +} __ec_align2; /* Version 0 - get */ struct ec_params_thermal_get_threshold { uint8_t sensor_type; uint8_t threshold_id; -} __packed; +} __ec_align1; struct ec_response_thermal_get_threshold { uint16_t value; -} __packed; +} __ec_align2; /* The version 1 structs are visible. */ @@ -1911,71 +2997,124 @@ enum ec_temp_thresholds { EC_TEMP_THRESH_COUNT }; -/* Thermal configuration for one temperature sensor. Temps are in degrees K. +/* + * Thermal configuration for one temperature sensor. Temps are in degrees K. * Zero values will be silently ignored by the thermal task. + * + * Set 'temp_host' value allows thermal task to trigger some event with 1 degree + * hysteresis. + * For example, + * temp_host[EC_TEMP_THRESH_HIGH] = 300 K + * temp_host_release[EC_TEMP_THRESH_HIGH] = 0 K + * EC will throttle ap when temperature >= 301 K, and release throttling when + * temperature <= 299 K. + * + * Set 'temp_host_release' value allows thermal task has a custom hysteresis. + * For example, + * temp_host[EC_TEMP_THRESH_HIGH] = 300 K + * temp_host_release[EC_TEMP_THRESH_HIGH] = 295 K + * EC will throttle ap when temperature >= 301 K, and release throttling when + * temperature <= 294 K. + * + * Note that this structure is a sub-structure of + * ec_params_thermal_set_threshold_v1, but maintains its alignment there. */ struct ec_thermal_config { uint32_t temp_host[EC_TEMP_THRESH_COUNT]; /* levels of hotness */ + uint32_t temp_host_release[EC_TEMP_THRESH_COUNT]; /* release levels */ uint32_t temp_fan_off; /* no active cooling needed */ uint32_t temp_fan_max; /* max active cooling needed */ -} __packed; +} __ec_align4; /* Version 1 - get config for one sensor. */ struct ec_params_thermal_get_threshold_v1 { uint32_t sensor_num; -} __packed; +} __ec_align4; /* This returns a struct ec_thermal_config */ -/* Version 1 - set config for one sensor. - * Use read-modify-write for best results! */ +/* + * Version 1 - set config for one sensor. + * Use read-modify-write for best results! + */ struct ec_params_thermal_set_threshold_v1 { uint32_t sensor_num; struct ec_thermal_config cfg; -} __packed; +} __ec_align4; /* This returns no data */ /****************************************************************************/ /* Toggle automatic fan control */ -#define EC_CMD_THERMAL_AUTO_FAN_CTRL 0x52 +#define EC_CMD_THERMAL_AUTO_FAN_CTRL 0x0052 + +/* Version 1 of input params */ +struct ec_params_auto_fan_ctrl_v1 { + uint8_t fan_idx; +} __ec_align1; -/* Get TMP006 calibration data */ -#define EC_CMD_TMP006_GET_CALIBRATION 0x53 +/* Get/Set TMP006 calibration data */ +#define EC_CMD_TMP006_GET_CALIBRATION 0x0053 +#define EC_CMD_TMP006_SET_CALIBRATION 0x0054 +/* + * The original TMP006 calibration only needed four params, but now we need + * more. Since the algorithm is nothing but magic numbers anyway, we'll leave + * the params opaque. The v1 "get" response will include the algorithm number + * and how many params it requires. That way we can change the EC code without + * needing to update this file. We can also use a different algorithm on each + * sensor. + */ + +/* This is the same struct for both v0 and v1. */ struct ec_params_tmp006_get_calibration { uint8_t index; -} __packed; +} __ec_align1; -struct ec_response_tmp006_get_calibration { +/* Version 0 */ +struct ec_response_tmp006_get_calibration_v0 { float s0; float b0; float b1; float b2; -} __packed; +} __ec_align4; -/* Set TMP006 calibration data */ -#define EC_CMD_TMP006_SET_CALIBRATION 0x54 - -struct ec_params_tmp006_set_calibration { +struct ec_params_tmp006_set_calibration_v0 { uint8_t index; - uint8_t reserved[3]; /* Reserved; set 0 */ + uint8_t reserved[3]; float s0; float b0; float b1; float b2; -} __packed; +} __ec_align4; + +/* Version 1 */ +struct ec_response_tmp006_get_calibration_v1 { + uint8_t algorithm; + uint8_t num_params; + uint8_t reserved[2]; + float val[0]; +} __ec_align4; + +struct ec_params_tmp006_set_calibration_v1 { + uint8_t index; + uint8_t algorithm; + uint8_t num_params; + uint8_t reserved; + float val[0]; +} __ec_align4; + /* Read raw TMP006 data */ -#define EC_CMD_TMP006_GET_RAW 0x55 +#define EC_CMD_TMP006_GET_RAW 0x0055 struct ec_params_tmp006_get_raw { uint8_t index; -} __packed; +} __ec_align1; struct ec_response_tmp006_get_raw { int32_t t; /* In 1/100 K */ int32_t v; /* In nV */ -}; +} __ec_align4; /*****************************************************************************/ /* MKBP - Matrix KeyBoard Protocol */ @@ -1990,24 +3129,24 @@ struct ec_response_tmp006_get_raw { * to obtain the instantaneous state, use EC_CMD_MKBP_INFO with the type * EC_MKBP_INFO_CURRENT and event EC_MKBP_EVENT_KEY_MATRIX. */ -#define EC_CMD_MKBP_STATE 0x60 +#define EC_CMD_MKBP_STATE 0x0060 /* * Provide information about various MKBP things. See enum ec_mkbp_info_type. */ -#define EC_CMD_MKBP_INFO 0x61 +#define EC_CMD_MKBP_INFO 0x0061 struct ec_response_mkbp_info { uint32_t rows; uint32_t cols; /* Formerly "switches", which was 0. */ uint8_t reserved; -} __packed; +} __ec_align_size1; struct ec_params_mkbp_info { uint8_t info_type; uint8_t event_type; -} __packed; +} __ec_align1; enum ec_mkbp_info_type { /* @@ -2049,17 +3188,28 @@ enum ec_mkbp_info_type { }; /* Simulate key press */ -#define EC_CMD_MKBP_SIMULATE_KEY 0x62 +#define EC_CMD_MKBP_SIMULATE_KEY 0x0062 struct ec_params_mkbp_simulate_key { uint8_t col; uint8_t row; uint8_t pressed; -} __packed; +} __ec_align1; + +#define EC_CMD_GET_KEYBOARD_ID 0x0063 + +struct ec_response_keyboard_id { + uint32_t keyboard_id; +} __ec_align4; + +enum keyboard_id { + KEYBOARD_ID_UNSUPPORTED = 0, + KEYBOARD_ID_UNREADABLE = 0xffffffff, +}; /* Configure keyboard scanning */ -#define EC_CMD_MKBP_SET_CONFIG 0x64 -#define EC_CMD_MKBP_GET_CONFIG 0x65 +#define EC_CMD_MKBP_SET_CONFIG 0x0064 +#define EC_CMD_MKBP_GET_CONFIG 0x0065 /* flags */ enum mkbp_config_flags { @@ -2067,16 +3217,21 @@ enum mkbp_config_flags { }; enum mkbp_config_valid { - EC_MKBP_VALID_SCAN_PERIOD = 1 << 0, - EC_MKBP_VALID_POLL_TIMEOUT = 1 << 1, - EC_MKBP_VALID_MIN_POST_SCAN_DELAY = 1 << 3, - EC_MKBP_VALID_OUTPUT_SETTLE = 1 << 4, - EC_MKBP_VALID_DEBOUNCE_DOWN = 1 << 5, - EC_MKBP_VALID_DEBOUNCE_UP = 1 << 6, - EC_MKBP_VALID_FIFO_MAX_DEPTH = 1 << 7, + EC_MKBP_VALID_SCAN_PERIOD = BIT(0), + EC_MKBP_VALID_POLL_TIMEOUT = BIT(1), + EC_MKBP_VALID_MIN_POST_SCAN_DELAY = BIT(3), + EC_MKBP_VALID_OUTPUT_SETTLE = BIT(4), + EC_MKBP_VALID_DEBOUNCE_DOWN = BIT(5), + EC_MKBP_VALID_DEBOUNCE_UP = BIT(6), + EC_MKBP_VALID_FIFO_MAX_DEPTH = BIT(7), }; -/* Configuration for our key scanning algorithm */ +/* + * Configuration for our key scanning algorithm. + * + * Note that this is used as a sub-structure of + * ec_{params/response}_mkbp_get_config. + */ struct ec_mkbp_config { uint32_t valid_mask; /* valid fields */ uint8_t flags; /* some flags (enum mkbp_config_flags) */ @@ -2096,18 +3251,18 @@ struct ec_mkbp_config { uint16_t debounce_up_us; /* time for debounce on key up */ /* maximum depth to allow for fifo (0 = no keyscan output) */ uint8_t fifo_max_depth; -} __packed; +} __ec_align_size1; struct ec_params_mkbp_set_config { struct ec_mkbp_config config; -} __packed; +} __ec_align_size1; struct ec_response_mkbp_get_config { struct ec_mkbp_config config; -} __packed; +} __ec_align_size1; /* Run the key scan emulation */ -#define EC_CMD_KEYSCAN_SEQ_CTRL 0x66 +#define EC_CMD_KEYSCAN_SEQ_CTRL 0x0066 enum ec_keyscan_seq_cmd { EC_KEYSCAN_SEQ_STATUS = 0, /* Get status information */ @@ -2122,23 +3277,23 @@ enum ec_collect_flags { * Indicates this scan was processed by the EC. Due to timing, some * scans may be skipped. */ - EC_KEYSCAN_SEQ_FLAG_DONE = 1 << 0, + EC_KEYSCAN_SEQ_FLAG_DONE = BIT(0), }; struct ec_collect_item { uint8_t flags; /* some flags (enum ec_collect_flags) */ -}; +} __ec_align1; struct ec_params_keyscan_seq_ctrl { uint8_t cmd; /* Command to send (enum ec_keyscan_seq_cmd) */ union { - struct { + struct __ec_align1 { uint8_t active; /* still active */ uint8_t num_items; /* number of items */ /* Current item being presented */ uint8_t cur_item; } status; - struct { + struct __ec_todo_unpacked { /* * Absolute time for this scan, measured from the * start of the sequence. @@ -2146,29 +3301,40 @@ struct ec_params_keyscan_seq_ctrl { uint32_t time_us; uint8_t scan[0]; /* keyscan data */ } add; - struct { + struct __ec_align1 { uint8_t start_item; /* First item to return */ uint8_t num_items; /* Number of items to return */ } collect; }; -} __packed; +} __ec_todo_packed; struct ec_result_keyscan_seq_ctrl { union { - struct { + struct __ec_todo_unpacked { uint8_t num_items; /* Number of items */ /* Data for each item */ struct ec_collect_item item[0]; } collect; }; -} __packed; +} __ec_todo_packed; /* - * Command for retrieving the next pending MKBP event from the EC device + * Get the next pending MKBP event. * - * The device replies with UNAVAILABLE if there aren't any pending events. + * Returns EC_RES_UNAVAILABLE if there is no event pending. */ -#define EC_CMD_GET_NEXT_EVENT 0x67 +#define EC_CMD_GET_NEXT_EVENT 0x0067 + +#define EC_MKBP_HAS_MORE_EVENTS_SHIFT 7 + +/* + * We use the most significant bit of the event type to indicate to the host + * that the EC has more MKBP events available to provide. + */ +#define EC_MKBP_HAS_MORE_EVENTS BIT(EC_MKBP_HAS_MORE_EVENTS_SHIFT) + +/* The mask to apply to get the raw event type */ +#define EC_MKBP_EVENT_TYPE_MASK (BIT(EC_MKBP_HAS_MORE_EVENTS_SHIFT) - 1) enum ec_mkbp_event { /* Keyboard matrix changed. The event data is the new matrix state. */ @@ -2186,9 +3352,21 @@ enum ec_mkbp_event { /* The state of the switches have changed. */ EC_MKBP_EVENT_SWITCH = 4, - /* EC sent a sysrq command */ + /* New Fingerprint sensor event, the event data is fp_events bitmap. */ + EC_MKBP_EVENT_FINGERPRINT = 5, + + /* + * Sysrq event: send emulated sysrq. The event data is sysrq, + * corresponding to the key to be pressed. + */ EC_MKBP_EVENT_SYSRQ = 6, + /* + * New 64-bit host event. + * The event data is 8 bytes of host event flags. + */ + EC_MKBP_EVENT_HOST_EVENT64 = 7, + /* Notify the AP that something happened on CEC */ EC_MKBP_EVENT_CEC_EVENT = 8, @@ -2198,65 +3376,140 @@ enum ec_mkbp_event { /* Number of MKBP events */ EC_MKBP_EVENT_COUNT, }; +BUILD_ASSERT(EC_MKBP_EVENT_COUNT <= EC_MKBP_EVENT_TYPE_MASK); -union ec_response_get_next_data { - uint8_t key_matrix[13]; +union __ec_align_offset1 ec_response_get_next_data { + uint8_t key_matrix[13]; /* Unaligned */ - uint32_t host_event; + uint32_t host_event; + uint64_t host_event64; + + struct __ec_todo_unpacked { + /* For aligning the fifo_info */ + uint8_t reserved[3]; + struct ec_response_motion_sense_fifo_info info; + } sensor_fifo; + + uint32_t buttons; + + uint32_t switches; - uint32_t buttons; - uint32_t switches; - uint32_t sysrq; -} __packed; + uint32_t fp_events; + + uint32_t sysrq; -union ec_response_get_next_data_v1 { + /* CEC events from enum mkbp_cec_event */ + uint32_t cec_events; +}; + +union __ec_align_offset1 ec_response_get_next_data_v1 { uint8_t key_matrix[16]; + + /* Unaligned */ uint32_t host_event; + uint64_t host_event64; + + struct __ec_todo_unpacked { + /* For aligning the fifo_info */ + uint8_t reserved[3]; + struct ec_response_motion_sense_fifo_info info; + } sensor_fifo; + uint32_t buttons; + uint32_t switches; + + uint32_t fp_events; + uint32_t sysrq; + + /* CEC events from enum mkbp_cec_event */ uint32_t cec_events; + uint8_t cec_message[16]; -} __packed; +}; +BUILD_ASSERT(sizeof(union ec_response_get_next_data_v1) == 16); struct ec_response_get_next_event { uint8_t event_type; /* Followed by event data if any */ union ec_response_get_next_data data; -} __packed; +} __ec_align1; struct ec_response_get_next_event_v1 { uint8_t event_type; /* Followed by event data if any */ union ec_response_get_next_data_v1 data; -} __packed; +} __ec_align1; /* Bit indices for buttons and switches.*/ /* Buttons */ #define EC_MKBP_POWER_BUTTON 0 #define EC_MKBP_VOL_UP 1 #define EC_MKBP_VOL_DOWN 2 +#define EC_MKBP_RECOVERY 3 /* Switches */ #define EC_MKBP_LID_OPEN 0 #define EC_MKBP_TABLET_MODE 1 #define EC_MKBP_BASE_ATTACHED 2 +/* Run keyboard factory test scanning */ +#define EC_CMD_KEYBOARD_FACTORY_TEST 0x0068 + +struct ec_response_keyboard_factory_test { + uint16_t shorted; /* Keyboard pins are shorted */ +} __ec_align2; + +/* Fingerprint events in 'fp_events' for EC_MKBP_EVENT_FINGERPRINT */ +#define EC_MKBP_FP_RAW_EVENT(fp_events) ((fp_events) & 0x00FFFFFF) +#define EC_MKBP_FP_ERRCODE(fp_events) ((fp_events) & 0x0000000F) +#define EC_MKBP_FP_ENROLL_PROGRESS_OFFSET 4 +#define EC_MKBP_FP_ENROLL_PROGRESS(fpe) (((fpe) & 0x00000FF0) \ + >> EC_MKBP_FP_ENROLL_PROGRESS_OFFSET) +#define EC_MKBP_FP_MATCH_IDX_OFFSET 12 +#define EC_MKBP_FP_MATCH_IDX_MASK 0x0000F000 +#define EC_MKBP_FP_MATCH_IDX(fpe) (((fpe) & EC_MKBP_FP_MATCH_IDX_MASK) \ + >> EC_MKBP_FP_MATCH_IDX_OFFSET) +#define EC_MKBP_FP_ENROLL BIT(27) +#define EC_MKBP_FP_MATCH BIT(28) +#define EC_MKBP_FP_FINGER_DOWN BIT(29) +#define EC_MKBP_FP_FINGER_UP BIT(30) +#define EC_MKBP_FP_IMAGE_READY BIT(31) +/* code given by EC_MKBP_FP_ERRCODE() when EC_MKBP_FP_ENROLL is set */ +#define EC_MKBP_FP_ERR_ENROLL_OK 0 +#define EC_MKBP_FP_ERR_ENROLL_LOW_QUALITY 1 +#define EC_MKBP_FP_ERR_ENROLL_IMMOBILE 2 +#define EC_MKBP_FP_ERR_ENROLL_LOW_COVERAGE 3 +#define EC_MKBP_FP_ERR_ENROLL_INTERNAL 5 +/* Can be used to detect if image was usable for enrollment or not. */ +#define EC_MKBP_FP_ERR_ENROLL_PROBLEM_MASK 1 +/* code given by EC_MKBP_FP_ERRCODE() when EC_MKBP_FP_MATCH is set */ +#define EC_MKBP_FP_ERR_MATCH_NO 0 +#define EC_MKBP_FP_ERR_MATCH_NO_INTERNAL 6 +#define EC_MKBP_FP_ERR_MATCH_NO_TEMPLATES 7 +#define EC_MKBP_FP_ERR_MATCH_NO_LOW_QUALITY 2 +#define EC_MKBP_FP_ERR_MATCH_NO_LOW_COVERAGE 4 +#define EC_MKBP_FP_ERR_MATCH_YES 1 +#define EC_MKBP_FP_ERR_MATCH_YES_UPDATED 3 +#define EC_MKBP_FP_ERR_MATCH_YES_UPDATE_FAILED 5 + + /*****************************************************************************/ /* Temperature sensor commands */ /* Read temperature sensor info */ -#define EC_CMD_TEMP_SENSOR_GET_INFO 0x70 +#define EC_CMD_TEMP_SENSOR_GET_INFO 0x0070 struct ec_params_temp_sensor_get_info { uint8_t id; -} __packed; +} __ec_align1; struct ec_response_temp_sensor_get_info { char sensor_name[32]; uint8_t sensor_type; -} __packed; +} __ec_align1; /*****************************************************************************/ @@ -2269,49 +3522,131 @@ struct ec_response_temp_sensor_get_info { /*****************************************************************************/ /* Host event commands */ + +/* Obsolete. New implementation should use EC_CMD_HOST_EVENT instead */ /* * Host event mask params and response structures, shared by all of the host * event commands below. */ struct ec_params_host_event_mask { uint32_t mask; -} __packed; +} __ec_align4; struct ec_response_host_event_mask { uint32_t mask; -} __packed; +} __ec_align4; /* These all use ec_response_host_event_mask */ -#define EC_CMD_HOST_EVENT_GET_B 0x87 -#define EC_CMD_HOST_EVENT_GET_SMI_MASK 0x88 -#define EC_CMD_HOST_EVENT_GET_SCI_MASK 0x89 -#define EC_CMD_HOST_EVENT_GET_WAKE_MASK 0x8d +#define EC_CMD_HOST_EVENT_GET_B 0x0087 +#define EC_CMD_HOST_EVENT_GET_SMI_MASK 0x0088 +#define EC_CMD_HOST_EVENT_GET_SCI_MASK 0x0089 +#define EC_CMD_HOST_EVENT_GET_WAKE_MASK 0x008D /* These all use ec_params_host_event_mask */ -#define EC_CMD_HOST_EVENT_SET_SMI_MASK 0x8a -#define EC_CMD_HOST_EVENT_SET_SCI_MASK 0x8b -#define EC_CMD_HOST_EVENT_CLEAR 0x8c -#define EC_CMD_HOST_EVENT_SET_WAKE_MASK 0x8e -#define EC_CMD_HOST_EVENT_CLEAR_B 0x8f +#define EC_CMD_HOST_EVENT_SET_SMI_MASK 0x008A +#define EC_CMD_HOST_EVENT_SET_SCI_MASK 0x008B +#define EC_CMD_HOST_EVENT_CLEAR 0x008C +#define EC_CMD_HOST_EVENT_SET_WAKE_MASK 0x008E +#define EC_CMD_HOST_EVENT_CLEAR_B 0x008F + +/* + * Unified host event programming interface - Should be used by newer versions + * of BIOS/OS to program host events and masks + */ + +struct ec_params_host_event { + + /* Action requested by host - one of enum ec_host_event_action. */ + uint8_t action; + + /* + * Mask type that the host requested the action on - one of + * enum ec_host_event_mask_type. + */ + uint8_t mask_type; + + /* Set to 0, ignore on read */ + uint16_t reserved; + + /* Value to be used in case of set operations. */ + uint64_t value; +} __ec_align4; + +/* + * Response structure returned by EC_CMD_HOST_EVENT. + * Update the value on a GET request. Set to 0 on GET/CLEAR + */ + +struct ec_response_host_event { + + /* Mask value in case of get operation */ + uint64_t value; +} __ec_align4; + +enum ec_host_event_action { + /* + * params.value is ignored. Value of mask_type populated + * in response.value + */ + EC_HOST_EVENT_GET, + + /* Bits in params.value are set */ + EC_HOST_EVENT_SET, + + /* Bits in params.value are cleared */ + EC_HOST_EVENT_CLEAR, +}; + +enum ec_host_event_mask_type { + + /* Main host event copy */ + EC_HOST_EVENT_MAIN, + + /* Copy B of host events */ + EC_HOST_EVENT_B, + + /* SCI Mask */ + EC_HOST_EVENT_SCI_MASK, + + /* SMI Mask */ + EC_HOST_EVENT_SMI_MASK, + + /* Mask of events that should be always reported in hostevents */ + EC_HOST_EVENT_ALWAYS_REPORT_MASK, + + /* Active wake mask */ + EC_HOST_EVENT_ACTIVE_WAKE_MASK, + + /* Lazy wake mask for S0ix */ + EC_HOST_EVENT_LAZY_WAKE_MASK_S0IX, + + /* Lazy wake mask for S3 */ + EC_HOST_EVENT_LAZY_WAKE_MASK_S3, + + /* Lazy wake mask for S5 */ + EC_HOST_EVENT_LAZY_WAKE_MASK_S5, +}; + +#define EC_CMD_HOST_EVENT 0x00A4 /*****************************************************************************/ /* Switch commands */ /* Enable/disable LCD backlight */ -#define EC_CMD_SWITCH_ENABLE_BKLIGHT 0x90 +#define EC_CMD_SWITCH_ENABLE_BKLIGHT 0x0090 struct ec_params_switch_enable_backlight { uint8_t enabled; -} __packed; +} __ec_align1; /* Enable/disable WLAN/Bluetooth */ -#define EC_CMD_SWITCH_ENABLE_WIRELESS 0x91 +#define EC_CMD_SWITCH_ENABLE_WIRELESS 0x0091 #define EC_VER_SWITCH_ENABLE_WIRELESS 1 /* Version 0 params; no response */ struct ec_params_switch_enable_wireless_v0 { uint8_t enabled; -} __packed; +} __ec_align1; /* Version 1 params */ struct ec_params_switch_enable_wireless_v1 { @@ -2330,7 +3665,7 @@ struct ec_params_switch_enable_wireless_v1 { /* Which flags to copy from suspend_flags */ uint8_t suspend_mask; -} __packed; +} __ec_align1; /* Version 1 response */ struct ec_response_switch_enable_wireless_v1 { @@ -2339,55 +3674,56 @@ struct ec_response_switch_enable_wireless_v1 { /* Flags to leave enabled in S3 */ uint8_t suspend_flags; -} __packed; +} __ec_align1; /*****************************************************************************/ /* GPIO commands. Only available on EC if write protect has been disabled. */ /* Set GPIO output value */ -#define EC_CMD_GPIO_SET 0x92 +#define EC_CMD_GPIO_SET 0x0092 struct ec_params_gpio_set { char name[32]; uint8_t val; -} __packed; +} __ec_align1; /* Get GPIO value */ -#define EC_CMD_GPIO_GET 0x93 +#define EC_CMD_GPIO_GET 0x0093 /* Version 0 of input params and response */ struct ec_params_gpio_get { char name[32]; -} __packed; +} __ec_align1; + struct ec_response_gpio_get { uint8_t val; -} __packed; +} __ec_align1; /* Version 1 of input params and response */ struct ec_params_gpio_get_v1 { uint8_t subcmd; union { - struct { + struct __ec_align1 { char name[32]; } get_value_by_name; - struct { + struct __ec_align1 { uint8_t index; } get_info; }; -} __packed; +} __ec_align1; struct ec_response_gpio_get_v1 { union { - struct { + struct __ec_align1 { uint8_t val; } get_value_by_name, get_count; - struct { + struct __ec_todo_unpacked { uint8_t val; char name[32]; uint32_t flags; } get_info; }; -} __packed; +} __ec_todo_packed; enum gpio_get_subcmd { EC_GPIO_GET_BY_NAME = 0, @@ -2399,25 +3735,28 @@ enum gpio_get_subcmd { /* I2C commands. Only available when flash write protect is unlocked. */ /* - * TODO(crosbug.com/p/23570): These commands are deprecated, and will be - * removed soon. Use EC_CMD_I2C_XFER instead. + * CAUTION: These commands are deprecated, and are not supported anymore in EC + * builds >= 8398.0.0 (see crosbug.com/p/23570). + * + * Use EC_CMD_I2C_PASSTHRU instead. */ /* Read I2C bus */ -#define EC_CMD_I2C_READ 0x94 +#define EC_CMD_I2C_READ 0x0094 struct ec_params_i2c_read { uint16_t addr; /* 8-bit address (7-bit shifted << 1) */ uint8_t read_size; /* Either 8 or 16. */ uint8_t port; uint8_t offset; -} __packed; +} __ec_align_size1; + struct ec_response_i2c_read { uint16_t data; -} __packed; +} __ec_align2; /* Write I2C bus */ -#define EC_CMD_I2C_WRITE 0x95 +#define EC_CMD_I2C_WRITE 0x0095 struct ec_params_i2c_write { uint16_t data; @@ -2425,7 +3764,7 @@ struct ec_params_i2c_write { uint8_t write_size; /* Either 8 or 16. */ uint8_t port; uint8_t offset; -} __packed; +} __ec_align_size1; /*****************************************************************************/ /* Charge state commands. Only available when flash write protect unlocked. */ @@ -2433,7 +3772,7 @@ struct ec_params_i2c_write { /* Force charge state machine to stop charging the battery or force it to * discharge the battery. */ -#define EC_CMD_CHARGE_CONTROL 0x96 +#define EC_CMD_CHARGE_CONTROL 0x0096 #define EC_VER_CHARGE_CONTROL 1 enum ec_charge_control_mode { @@ -2444,13 +3783,12 @@ enum ec_charge_control_mode { struct ec_params_charge_control { uint32_t mode; /* enum charge_control_mode */ -} __packed; +} __ec_align4; /*****************************************************************************/ -/* Console commands. Only available when flash write protect is unlocked. */ /* Snapshot console output buffer for use by EC_CMD_CONSOLE_READ. */ -#define EC_CMD_CONSOLE_SNAPSHOT 0x97 +#define EC_CMD_CONSOLE_SNAPSHOT 0x0097 /* * Read data from the saved snapshot. If the subcmd parameter is @@ -2464,7 +3802,7 @@ struct ec_params_charge_control { * Response is null-terminated string. Empty string, if there is no more * remaining output. */ -#define EC_CMD_CONSOLE_READ 0x98 +#define EC_CMD_CONSOLE_READ 0x0098 enum ec_console_read_subcmd { CONSOLE_READ_NEXT = 0, @@ -2473,7 +3811,7 @@ enum ec_console_read_subcmd { struct ec_params_console_read_v1 { uint8_t subcmd; /* enum ec_console_read_subcmd */ -} __packed; +} __ec_align1; /*****************************************************************************/ @@ -2484,14 +3822,13 @@ struct ec_params_console_read_v1 { * EC_RES_SUCCESS if the command was successful. * EC_RES_ERROR if the cut off command failed. */ +#define EC_CMD_BATTERY_CUT_OFF 0x0099 -#define EC_CMD_BATTERY_CUT_OFF 0x99 - -#define EC_BATTERY_CUTOFF_FLAG_AT_SHUTDOWN (1 << 0) +#define EC_BATTERY_CUTOFF_FLAG_AT_SHUTDOWN BIT(0) struct ec_params_battery_cutoff { uint8_t flags; -} __packed; +} __ec_align1; /*****************************************************************************/ /* USB port mux control. */ @@ -2499,11 +3836,11 @@ struct ec_params_battery_cutoff { /* * Switch USB mux or return to automatic switching. */ -#define EC_CMD_USB_MUX 0x9a +#define EC_CMD_USB_MUX 0x009A struct ec_params_usb_mux { uint8_t mux; -} __packed; +} __ec_align1; /*****************************************************************************/ /* LDOs / FETs control. */ @@ -2516,25 +3853,25 @@ enum ec_ldo_state { /* * Switch on/off a LDO. */ -#define EC_CMD_LDO_SET 0x9b +#define EC_CMD_LDO_SET 0x009B struct ec_params_ldo_set { uint8_t index; uint8_t state; -} __packed; +} __ec_align1; /* * Get LDO state. */ -#define EC_CMD_LDO_GET 0x9c +#define EC_CMD_LDO_GET 0x009C struct ec_params_ldo_get { uint8_t index; -} __packed; +} __ec_align1; struct ec_response_ldo_get { uint8_t state; -} __packed; +} __ec_align1; /*****************************************************************************/ /* Power info. */ @@ -2542,7 +3879,7 @@ struct ec_response_ldo_get { /* * Get power info. */ -#define EC_CMD_POWER_INFO 0x9d +#define EC_CMD_POWER_INFO 0x009D struct ec_response_power_info { uint32_t usb_dev_type; @@ -2550,21 +3887,21 @@ struct ec_response_power_info { uint16_t voltage_system; uint16_t current_system; uint16_t usb_current_limit; -} __packed; +} __ec_align4; /*****************************************************************************/ /* I2C passthru command */ -#define EC_CMD_I2C_PASSTHRU 0x9e +#define EC_CMD_I2C_PASSTHRU 0x009E /* Read data; if not present, message is a write */ -#define EC_I2C_FLAG_READ (1 << 15) +#define EC_I2C_FLAG_READ BIT(15) /* Mask for address */ #define EC_I2C_ADDR_MASK 0x3ff -#define EC_I2C_STATUS_NAK (1 << 0) /* Transfer was not acknowledged */ -#define EC_I2C_STATUS_TIMEOUT (1 << 1) /* Timeout during transfer */ +#define EC_I2C_STATUS_NAK BIT(0) /* Transfer was not acknowledged */ +#define EC_I2C_STATUS_TIMEOUT BIT(1) /* Timeout during transfer */ /* Any error */ #define EC_I2C_STATUS_ERROR (EC_I2C_STATUS_NAK | EC_I2C_STATUS_TIMEOUT) @@ -2572,49 +3909,49 @@ struct ec_response_power_info { struct ec_params_i2c_passthru_msg { uint16_t addr_flags; /* I2C slave address (7 or 10 bits) and flags */ uint16_t len; /* Number of bytes to read or write */ -} __packed; +} __ec_align2; struct ec_params_i2c_passthru { uint8_t port; /* I2C port number */ uint8_t num_msgs; /* Number of messages */ struct ec_params_i2c_passthru_msg msg[]; /* Data to write for all messages is concatenated here */ -} __packed; +} __ec_align2; struct ec_response_i2c_passthru { uint8_t i2c_status; /* Status flags (EC_I2C_STATUS_...) */ uint8_t num_msgs; /* Number of messages processed */ uint8_t data[]; /* Data read by messages concatenated here */ -} __packed; +} __ec_align1; /*****************************************************************************/ /* Power button hang detect */ -#define EC_CMD_HANG_DETECT 0x9f +#define EC_CMD_HANG_DETECT 0x009F /* Reasons to start hang detection timer */ /* Power button pressed */ -#define EC_HANG_START_ON_POWER_PRESS (1 << 0) +#define EC_HANG_START_ON_POWER_PRESS BIT(0) /* Lid closed */ -#define EC_HANG_START_ON_LID_CLOSE (1 << 1) +#define EC_HANG_START_ON_LID_CLOSE BIT(1) /* Lid opened */ -#define EC_HANG_START_ON_LID_OPEN (1 << 2) +#define EC_HANG_START_ON_LID_OPEN BIT(2) /* Start of AP S3->S0 transition (booting or resuming from suspend) */ -#define EC_HANG_START_ON_RESUME (1 << 3) +#define EC_HANG_START_ON_RESUME BIT(3) /* Reasons to cancel hang detection */ /* Power button released */ -#define EC_HANG_STOP_ON_POWER_RELEASE (1 << 8) +#define EC_HANG_STOP_ON_POWER_RELEASE BIT(8) /* Any host command from AP received */ -#define EC_HANG_STOP_ON_HOST_COMMAND (1 << 9) +#define EC_HANG_STOP_ON_HOST_COMMAND BIT(9) /* Stop on end of AP S0->S3 transition (suspending or shutting down) */ -#define EC_HANG_STOP_ON_SUSPEND (1 << 10) +#define EC_HANG_STOP_ON_SUSPEND BIT(10) /* * If this flag is set, all the other fields are ignored, and the hang detect @@ -2622,14 +3959,14 @@ struct ec_response_i2c_passthru { * without reconfiguring any of the other hang detect settings. Note that * you must previously have configured the timeouts. */ -#define EC_HANG_START_NOW (1 << 30) +#define EC_HANG_START_NOW BIT(30) /* * If this flag is set, all the other fields are ignored (including * EC_HANG_START_NOW). This provides the AP a way to stop the hang timer * without reconfiguring any of the other hang detect settings. */ -#define EC_HANG_STOP_NOW (1 << 31) +#define EC_HANG_STOP_NOW BIT(31) struct ec_params_hang_detect { /* Flags; see EC_HANG_* */ @@ -2640,7 +3977,7 @@ struct ec_params_hang_detect { /* Timeout in msec before generating warm reboot, if enabled */ uint16_t warm_reboot_timeout_msec; -} __packed; +} __ec_align4; /*****************************************************************************/ /* Commands for battery charging */ @@ -2649,7 +3986,7 @@ struct ec_params_hang_detect { * This is the single catch-all host command to exchange data regarding the * charge state machine (v2 and up). */ -#define EC_CMD_CHARGE_STATE 0xa0 +#define EC_CMD_CHARGE_STATE 0x00A0 /* Subcommands for this host command */ enum charge_state_command { @@ -2669,6 +4006,11 @@ enum charge_state_params { CS_PARAM_CHG_INPUT_CURRENT, /* charger input current limit */ CS_PARAM_CHG_STATUS, /* charger-specific status */ CS_PARAM_CHG_OPTION, /* charger-specific options */ + CS_PARAM_LIMIT_POWER, /* + * Check if power is limited due to + * low battery and / or a weak external + * charger. READ ONLY. + */ /* How many so far? */ CS_NUM_BASE_PARAMS, @@ -2676,30 +4018,39 @@ enum charge_state_params { CS_PARAM_CUSTOM_PROFILE_MIN = 0x10000, CS_PARAM_CUSTOM_PROFILE_MAX = 0x1ffff, + /* Range for CONFIG_CHARGE_STATE_DEBUG params */ + CS_PARAM_DEBUG_MIN = 0x20000, + CS_PARAM_DEBUG_CTL_MODE = 0x20000, + CS_PARAM_DEBUG_MANUAL_MODE, + CS_PARAM_DEBUG_SEEMS_DEAD, + CS_PARAM_DEBUG_SEEMS_DISCONNECTED, + CS_PARAM_DEBUG_BATT_REMOVED, + CS_PARAM_DEBUG_MANUAL_CURRENT, + CS_PARAM_DEBUG_MANUAL_VOLTAGE, + CS_PARAM_DEBUG_MAX = 0x2ffff, + /* Other custom param ranges go here... */ }; struct ec_params_charge_state { uint8_t cmd; /* enum charge_state_command */ union { - struct { - /* no args */ - } get_state; + /* get_state has no args */ - struct { + struct __ec_todo_unpacked { uint32_t param; /* enum charge_state_param */ } get_param; - struct { + struct __ec_todo_unpacked { uint32_t param; /* param to set */ uint32_t value; /* value to set */ } set_param; }; -} __packed; +} __ec_todo_packed; struct ec_response_charge_state { union { - struct { + struct __ec_align4 { int ac; int chg_voltage; int chg_current; @@ -2707,24 +4058,23 @@ struct ec_response_charge_state { int batt_state_of_charge; } get_state; - struct { + struct __ec_align4 { uint32_t value; } get_param; - struct { - /* no return values */ - } set_param; + + /* set_param returns no args */ }; -} __packed; +} __ec_align4; /* * Set maximum battery charging current. */ -#define EC_CMD_CHARGE_CURRENT_LIMIT 0xa1 +#define EC_CMD_CHARGE_CURRENT_LIMIT 0x00A1 struct ec_params_current_limit { uint32_t limit; /* in mA */ -} __packed; +} __ec_align4; /* * Set maximum external voltage / current. @@ -2735,23 +4085,69 @@ struct ec_params_current_limit { struct ec_params_external_power_limit_v1 { uint16_t current_lim; /* in mA, or EC_POWER_LIMIT_NONE to clear limit */ uint16_t voltage_lim; /* in mV, or EC_POWER_LIMIT_NONE to clear limit */ -} __packed; +} __ec_align2; #define EC_POWER_LIMIT_NONE 0xffff +/* + * Set maximum voltage & current of a dedicated charge port + */ +#define EC_CMD_OVERRIDE_DEDICATED_CHARGER_LIMIT 0x00A3 + +struct ec_params_dedicated_charger_limit { + uint16_t current_lim; /* in mA */ + uint16_t voltage_lim; /* in mV */ +} __ec_align2; + +/*****************************************************************************/ +/* Hibernate/Deep Sleep Commands */ + +/* Set the delay before going into hibernation. */ +#define EC_CMD_HIBERNATION_DELAY 0x00A8 + +struct ec_params_hibernation_delay { + /* + * Seconds to wait in G3 before hibernate. Pass in 0 to read the + * current settings without changing them. + */ + uint32_t seconds; +} __ec_align4; + +struct ec_response_hibernation_delay { + /* + * The current time in seconds in which the system has been in the G3 + * state. This value is reset if the EC transitions out of G3. + */ + uint32_t time_g3; + + /* + * The current time remaining in seconds until the EC should hibernate. + * This value is also reset if the EC transitions out of G3. + */ + uint32_t time_remaining; + + /* + * The current time in seconds that the EC should wait in G3 before + * hibernating. + */ + uint32_t hibernate_delay; +} __ec_align4; + /* Inform the EC when entering a sleep state */ -#define EC_CMD_HOST_SLEEP_EVENT 0xa9 +#define EC_CMD_HOST_SLEEP_EVENT 0x00A9 enum host_sleep_event { HOST_SLEEP_EVENT_S3_SUSPEND = 1, HOST_SLEEP_EVENT_S3_RESUME = 2, HOST_SLEEP_EVENT_S0IX_SUSPEND = 3, - HOST_SLEEP_EVENT_S0IX_RESUME = 4 + HOST_SLEEP_EVENT_S0IX_RESUME = 4, + /* S3 suspend with additional enabled wake sources */ + HOST_SLEEP_EVENT_S3_WAKEABLE_SUSPEND = 5, }; struct ec_params_host_sleep_event { uint8_t sleep_event; -} __packed; +} __ec_align1; /* * Use a default timeout value (CONFIG_SLEEP_TIMEOUT_MS) for detecting sleep @@ -2782,7 +4178,7 @@ struct ec_params_host_sleep_event_v1 { /* No parameters for non-suspend messages. */ }; -} __packed; +} __ec_align2; /* A timeout occurred when this bit is set */ #define EC_HOST_RESUME_SLEEP_TIMEOUT 0x80000000 @@ -2808,42 +4204,72 @@ struct ec_response_host_sleep_event_v1 { /* No response fields for non-resume messages. */ }; -} __packed; +} __ec_align4; + +/*****************************************************************************/ +/* Device events */ +#define EC_CMD_DEVICE_EVENT 0x00AA + +enum ec_device_event { + EC_DEVICE_EVENT_TRACKPAD, + EC_DEVICE_EVENT_DSP, + EC_DEVICE_EVENT_WIFI, +}; + +enum ec_device_event_param { + /* Get and clear pending device events */ + EC_DEVICE_EVENT_PARAM_GET_CURRENT_EVENTS, + /* Get device event mask */ + EC_DEVICE_EVENT_PARAM_GET_ENABLED_EVENTS, + /* Set device event mask */ + EC_DEVICE_EVENT_PARAM_SET_ENABLED_EVENTS, +}; + +#define EC_DEVICE_EVENT_MASK(event_code) BIT(event_code % 32) + +struct ec_params_device_event { + uint32_t event_mask; + uint8_t param; +} __ec_align_size1; + +struct ec_response_device_event { + uint32_t event_mask; +} __ec_align4; /*****************************************************************************/ /* Smart battery pass-through */ /* Get / Set 16-bit smart battery registers */ -#define EC_CMD_SB_READ_WORD 0xb0 -#define EC_CMD_SB_WRITE_WORD 0xb1 +#define EC_CMD_SB_READ_WORD 0x00B0 +#define EC_CMD_SB_WRITE_WORD 0x00B1 /* Get / Set string smart battery parameters * formatted as SMBUS "block". */ -#define EC_CMD_SB_READ_BLOCK 0xb2 -#define EC_CMD_SB_WRITE_BLOCK 0xb3 +#define EC_CMD_SB_READ_BLOCK 0x00B2 +#define EC_CMD_SB_WRITE_BLOCK 0x00B3 struct ec_params_sb_rd { uint8_t reg; -} __packed; +} __ec_align1; struct ec_response_sb_rd_word { uint16_t value; -} __packed; +} __ec_align2; struct ec_params_sb_wr_word { uint8_t reg; uint16_t value; -} __packed; +} __ec_align1; struct ec_response_sb_rd_block { uint8_t data[32]; -} __packed; +} __ec_align1; struct ec_params_sb_wr_block { uint8_t reg; uint16_t data[32]; -} __packed; +} __ec_align1; /*****************************************************************************/ /* Battery vendor parameters @@ -2854,7 +4280,7 @@ struct ec_params_sb_wr_block { * requested value. */ -#define EC_CMD_BATTERY_VENDOR_PARAM 0xb4 +#define EC_CMD_BATTERY_VENDOR_PARAM 0x00B4 enum ec_battery_vendor_param_mode { BATTERY_VENDOR_PARAM_MODE_GET = 0, @@ -2865,16 +4291,187 @@ struct ec_params_battery_vendor_param { uint32_t param; uint32_t value; uint8_t mode; -} __packed; +} __ec_align_size1; struct ec_response_battery_vendor_param { uint32_t value; -} __packed; +} __ec_align4; + +/*****************************************************************************/ +/* + * Smart Battery Firmware Update Commands + */ +#define EC_CMD_SB_FW_UPDATE 0x00B5 + +enum ec_sb_fw_update_subcmd { + EC_SB_FW_UPDATE_PREPARE = 0x0, + EC_SB_FW_UPDATE_INFO = 0x1, /*query sb info */ + EC_SB_FW_UPDATE_BEGIN = 0x2, /*check if protected */ + EC_SB_FW_UPDATE_WRITE = 0x3, /*check if protected */ + EC_SB_FW_UPDATE_END = 0x4, + EC_SB_FW_UPDATE_STATUS = 0x5, + EC_SB_FW_UPDATE_PROTECT = 0x6, + EC_SB_FW_UPDATE_MAX = 0x7, +}; + +#define SB_FW_UPDATE_CMD_WRITE_BLOCK_SIZE 32 +#define SB_FW_UPDATE_CMD_STATUS_SIZE 2 +#define SB_FW_UPDATE_CMD_INFO_SIZE 8 + +struct ec_sb_fw_update_header { + uint16_t subcmd; /* enum ec_sb_fw_update_subcmd */ + uint16_t fw_id; /* firmware id */ +} __ec_align4; + +struct ec_params_sb_fw_update { + struct ec_sb_fw_update_header hdr; + union { + /* EC_SB_FW_UPDATE_PREPARE = 0x0 */ + /* EC_SB_FW_UPDATE_INFO = 0x1 */ + /* EC_SB_FW_UPDATE_BEGIN = 0x2 */ + /* EC_SB_FW_UPDATE_END = 0x4 */ + /* EC_SB_FW_UPDATE_STATUS = 0x5 */ + /* EC_SB_FW_UPDATE_PROTECT = 0x6 */ + /* Those have no args */ + + /* EC_SB_FW_UPDATE_WRITE = 0x3 */ + struct __ec_align4 { + uint8_t data[SB_FW_UPDATE_CMD_WRITE_BLOCK_SIZE]; + } write; + }; +} __ec_align4; + +struct ec_response_sb_fw_update { + union { + /* EC_SB_FW_UPDATE_INFO = 0x1 */ + struct __ec_align1 { + uint8_t data[SB_FW_UPDATE_CMD_INFO_SIZE]; + } info; + + /* EC_SB_FW_UPDATE_STATUS = 0x5 */ + struct __ec_align1 { + uint8_t data[SB_FW_UPDATE_CMD_STATUS_SIZE]; + } status; + }; +} __ec_align1; + +/* + * Entering Verified Boot Mode Command + * Default mode is VBOOT_MODE_NORMAL if EC did not receive this command. + * Valid Modes are: normal, developer, and recovery. + */ +#define EC_CMD_ENTERING_MODE 0x00B6 + +struct ec_params_entering_mode { + int vboot_mode; +} __ec_align4; + +#define VBOOT_MODE_NORMAL 0 +#define VBOOT_MODE_DEVELOPER 1 +#define VBOOT_MODE_RECOVERY 2 + +/*****************************************************************************/ +/* + * I2C passthru protection command: Protects I2C tunnels against access on + * certain addresses (board-specific). + */ +#define EC_CMD_I2C_PASSTHRU_PROTECT 0x00B7 + +enum ec_i2c_passthru_protect_subcmd { + EC_CMD_I2C_PASSTHRU_PROTECT_STATUS = 0x0, + EC_CMD_I2C_PASSTHRU_PROTECT_ENABLE = 0x1, +}; + +struct ec_params_i2c_passthru_protect { + uint8_t subcmd; + uint8_t port; /* I2C port number */ +} __ec_align1; + +struct ec_response_i2c_passthru_protect { + uint8_t status; /* Status flags (0: unlocked, 1: locked) */ +} __ec_align1; + + +/*****************************************************************************/ +/* + * HDMI CEC commands + * + * These commands are for sending and receiving message via HDMI CEC + */ + +#define MAX_CEC_MSG_LEN 16 + +/* CEC message from the AP to be written on the CEC bus */ +#define EC_CMD_CEC_WRITE_MSG 0x00B8 + +/** + * struct ec_params_cec_write - Message to write to the CEC bus + * @msg: message content to write to the CEC bus + */ +struct ec_params_cec_write { + uint8_t msg[MAX_CEC_MSG_LEN]; +} __ec_align1; + +/* Set various CEC parameters */ +#define EC_CMD_CEC_SET 0x00BA + +/** + * struct ec_params_cec_set - CEC parameters set + * @cmd: parameter type, can be CEC_CMD_ENABLE or CEC_CMD_LOGICAL_ADDRESS + * @val: in case cmd is CEC_CMD_ENABLE, this field can be 0 to disable CEC + * or 1 to enable CEC functionality, in case cmd is + * CEC_CMD_LOGICAL_ADDRESS, this field encodes the requested logical + * address between 0 and 15 or 0xff to unregister + */ +struct ec_params_cec_set { + uint8_t cmd; /* enum cec_command */ + uint8_t val; +} __ec_align1; + +/* Read various CEC parameters */ +#define EC_CMD_CEC_GET 0x00BB + +/** + * struct ec_params_cec_get - CEC parameters get + * @cmd: parameter type, can be CEC_CMD_ENABLE or CEC_CMD_LOGICAL_ADDRESS + */ +struct ec_params_cec_get { + uint8_t cmd; /* enum cec_command */ +} __ec_align1; + +/** + * struct ec_response_cec_get - CEC parameters get response + * @val: in case cmd was CEC_CMD_ENABLE, this field will 0 if CEC is + * disabled or 1 if CEC functionality is enabled, + * in case cmd was CEC_CMD_LOGICAL_ADDRESS, this will encode the + * configured logical address between 0 and 15 or 0xff if unregistered + */ +struct ec_response_cec_get { + uint8_t val; +} __ec_align1; + +/* CEC parameters command */ +enum cec_command { + /* CEC reading, writing and events enable */ + CEC_CMD_ENABLE, + /* CEC logical address */ + CEC_CMD_LOGICAL_ADDRESS, +}; + +/* Events from CEC to AP */ +enum mkbp_cec_event { + /* Outgoing message was acknowledged by a follower */ + EC_MKBP_CEC_SEND_OK = BIT(0), + /* Outgoing message was not acknowledged */ + EC_MKBP_CEC_SEND_FAILED = BIT(1), +}; /*****************************************************************************/ + /* Commands for I2S recording on audio codec. */ #define EC_CMD_CODEC_I2S 0x00BC +#define EC_WOV_I2S_SAMPLE_RATE 48000 enum ec_codec_i2s_subcmd { EC_CODEC_SET_SAMPLE_DEPTH = 0x0, @@ -2884,6 +4481,7 @@ enum ec_codec_i2s_subcmd { EC_CODEC_I2S_SET_CONFIG = 0x4, EC_CODEC_I2S_SET_TDM_CONFIG = 0x5, EC_CODEC_I2S_SET_BCLK = 0x6, + EC_CODEC_I2S_SUBCMD_COUNT = 0x7, }; enum ec_sample_depth_value { @@ -2900,10 +4498,23 @@ enum ec_i2s_config { EC_DAI_FMT_PCM_TDM = 5, }; -struct ec_param_codec_i2s { - /* - * enum ec_codec_i2s_subcmd - */ +/* + * For subcommand EC_CODEC_GET_GAIN. + */ +struct __ec_align1 ec_codec_i2s_gain { + uint8_t left; + uint8_t right; +}; + +struct __ec_todo_unpacked ec_param_codec_i2s_tdm { + int16_t ch0_delay; /* 0 to 496 */ + int16_t ch1_delay; /* -1 to 496 */ + uint8_t adjacent_to_ch0; + uint8_t adjacent_to_ch1; +}; + +struct __ec_todo_packed ec_param_codec_i2s { + /* enum ec_codec_i2s_subcmd */ uint8_t cmd; union { /* @@ -2916,10 +4527,7 @@ struct ec_param_codec_i2s { * EC_CODEC_SET_GAIN * Value should be 0~43 for both channels. */ - struct ec_param_codec_i2s_set_gain { - uint8_t left; - uint8_t right; - } __packed gain; + struct ec_codec_i2s_gain gain; /* * EC_CODEC_I2S_ENABLE @@ -2928,7 +4536,7 @@ struct ec_param_codec_i2s { uint8_t i2s_enable; /* - * EC_CODEC_I2S_SET_COFNIG + * EC_CODEC_I2S_SET_CONFIG * Value should be one of ec_i2s_config. */ uint8_t i2s_config; @@ -2937,33 +4545,15 @@ struct ec_param_codec_i2s { * EC_CODEC_I2S_SET_TDM_CONFIG * Value should be one of ec_i2s_config. */ - struct ec_param_codec_i2s_tdm { - /* - * 0 to 496 - */ - int16_t ch0_delay; - /* - * -1 to 496 - */ - int16_t ch1_delay; - uint8_t adjacent_to_ch0; - uint8_t adjacent_to_ch1; - } __packed tdm_param; + struct ec_param_codec_i2s_tdm tdm_param; /* * EC_CODEC_I2S_SET_BCLK */ uint32_t bclk; }; -} __packed; +}; -/* - * For subcommand EC_CODEC_GET_GAIN. - */ -struct ec_response_codec_gain { - uint8_t left; - uint8_t right; -} __packed; /*****************************************************************************/ /* System commands */ @@ -2972,27 +4562,29 @@ struct ec_response_codec_gain { * TODO(crosbug.com/p/23747): This is a confusing name, since it doesn't * necessarily reboot the EC. Rename to "image" or something similar? */ -#define EC_CMD_REBOOT_EC 0xd2 +#define EC_CMD_REBOOT_EC 0x00D2 /* Command */ enum ec_reboot_cmd { EC_REBOOT_CANCEL = 0, /* Cancel a pending reboot */ EC_REBOOT_JUMP_RO = 1, /* Jump to RO without rebooting */ - EC_REBOOT_JUMP_RW = 2, /* Jump to RW without rebooting */ + EC_REBOOT_JUMP_RW = 2, /* Jump to active RW without rebooting */ /* (command 3 was jump to RW-B) */ EC_REBOOT_COLD = 4, /* Cold-reboot */ EC_REBOOT_DISABLE_JUMP = 5, /* Disable jump until next reboot */ - EC_REBOOT_HIBERNATE = 6 /* Hibernate EC */ + EC_REBOOT_HIBERNATE = 6, /* Hibernate EC */ + EC_REBOOT_HIBERNATE_CLEAR_AP_OFF = 7, /* and clears AP_OFF flag */ }; /* Flags for ec_params_reboot_ec.reboot_flags */ -#define EC_REBOOT_FLAG_RESERVED0 (1 << 0) /* Was recovery request */ -#define EC_REBOOT_FLAG_ON_AP_SHUTDOWN (1 << 1) /* Reboot after AP shutdown */ +#define EC_REBOOT_FLAG_RESERVED0 BIT(0) /* Was recovery request */ +#define EC_REBOOT_FLAG_ON_AP_SHUTDOWN BIT(1) /* Reboot after AP shutdown */ +#define EC_REBOOT_FLAG_SWITCH_RW_SLOT BIT(2) /* Switch RW slot */ struct ec_params_reboot_ec { uint8_t cmd; /* enum ec_reboot_cmd */ uint8_t flags; /* See EC_REBOOT_FLAG_* */ -} __packed; +} __ec_align1; /* * Get information on last EC panic. @@ -3000,196 +4592,7 @@ struct ec_params_reboot_ec { * Returns variable-length platform-dependent panic information. See panic.h * for details. */ -#define EC_CMD_GET_PANIC_INFO 0xd3 - -/*****************************************************************************/ -/* - * ACPI commands - * - * These are valid ONLY on the ACPI command/data port. - */ - -/* - * ACPI Read Embedded Controller - * - * This reads from ACPI memory space on the EC (EC_ACPI_MEM_*). - * - * Use the following sequence: - * - * - Write EC_CMD_ACPI_READ to EC_LPC_ADDR_ACPI_CMD - * - Wait for EC_LPC_CMDR_PENDING bit to clear - * - Write address to EC_LPC_ADDR_ACPI_DATA - * - Wait for EC_LPC_CMDR_DATA bit to set - * - Read value from EC_LPC_ADDR_ACPI_DATA - */ -#define EC_CMD_ACPI_READ 0x80 - -/* - * ACPI Write Embedded Controller - * - * This reads from ACPI memory space on the EC (EC_ACPI_MEM_*). - * - * Use the following sequence: - * - * - Write EC_CMD_ACPI_WRITE to EC_LPC_ADDR_ACPI_CMD - * - Wait for EC_LPC_CMDR_PENDING bit to clear - * - Write address to EC_LPC_ADDR_ACPI_DATA - * - Wait for EC_LPC_CMDR_PENDING bit to clear - * - Write value to EC_LPC_ADDR_ACPI_DATA - */ -#define EC_CMD_ACPI_WRITE 0x81 - -/* - * ACPI Query Embedded Controller - * - * This clears the lowest-order bit in the currently pending host events, and - * sets the result code to the 1-based index of the bit (event 0x00000001 = 1, - * event 0x80000000 = 32), or 0 if no event was pending. - */ -#define EC_CMD_ACPI_QUERY_EVENT 0x84 - -/* Valid addresses in ACPI memory space, for read/write commands */ - -/* Memory space version; set to EC_ACPI_MEM_VERSION_CURRENT */ -#define EC_ACPI_MEM_VERSION 0x00 -/* - * Test location; writing value here updates test compliment byte to (0xff - - * value). - */ -#define EC_ACPI_MEM_TEST 0x01 -/* Test compliment; writes here are ignored. */ -#define EC_ACPI_MEM_TEST_COMPLIMENT 0x02 - -/* Keyboard backlight brightness percent (0 - 100) */ -#define EC_ACPI_MEM_KEYBOARD_BACKLIGHT 0x03 -/* DPTF Target Fan Duty (0-100, 0xff for auto/none) */ -#define EC_ACPI_MEM_FAN_DUTY 0x04 - -/* - * DPTF temp thresholds. Any of the EC's temp sensors can have up to two - * independent thresholds attached to them. The current value of the ID - * register determines which sensor is affected by the THRESHOLD and COMMIT - * registers. The THRESHOLD register uses the same EC_TEMP_SENSOR_OFFSET scheme - * as the memory-mapped sensors. The COMMIT register applies those settings. - * - * The spec does not mandate any way to read back the threshold settings - * themselves, but when a threshold is crossed the AP needs a way to determine - * which sensor(s) are responsible. Each reading of the ID register clears and - * returns one sensor ID that has crossed one of its threshold (in either - * direction) since the last read. A value of 0xFF means "no new thresholds - * have tripped". Setting or enabling the thresholds for a sensor will clear - * the unread event count for that sensor. - */ -#define EC_ACPI_MEM_TEMP_ID 0x05 -#define EC_ACPI_MEM_TEMP_THRESHOLD 0x06 -#define EC_ACPI_MEM_TEMP_COMMIT 0x07 -/* - * Here are the bits for the COMMIT register: - * bit 0 selects the threshold index for the chosen sensor (0/1) - * bit 1 enables/disables the selected threshold (0 = off, 1 = on) - * Each write to the commit register affects one threshold. - */ -#define EC_ACPI_MEM_TEMP_COMMIT_SELECT_MASK (1 << 0) -#define EC_ACPI_MEM_TEMP_COMMIT_ENABLE_MASK (1 << 1) -/* - * Example: - * - * Set the thresholds for sensor 2 to 50 C and 60 C: - * write 2 to [0x05] -- select temp sensor 2 - * write 0x7b to [0x06] -- C_TO_K(50) - EC_TEMP_SENSOR_OFFSET - * write 0x2 to [0x07] -- enable threshold 0 with this value - * write 0x85 to [0x06] -- C_TO_K(60) - EC_TEMP_SENSOR_OFFSET - * write 0x3 to [0x07] -- enable threshold 1 with this value - * - * Disable the 60 C threshold, leaving the 50 C threshold unchanged: - * write 2 to [0x05] -- select temp sensor 2 - * write 0x1 to [0x07] -- disable threshold 1 - */ - -/* DPTF battery charging current limit */ -#define EC_ACPI_MEM_CHARGING_LIMIT 0x08 - -/* Charging limit is specified in 64 mA steps */ -#define EC_ACPI_MEM_CHARGING_LIMIT_STEP_MA 64 -/* Value to disable DPTF battery charging limit */ -#define EC_ACPI_MEM_CHARGING_LIMIT_DISABLED 0xff - -/* Current version of ACPI memory address space */ -#define EC_ACPI_MEM_VERSION_CURRENT 1 - - -/*****************************************************************************/ -/* - * HDMI CEC commands - * - * These commands are for sending and receiving message via HDMI CEC - */ -#define EC_MAX_CEC_MSG_LEN 16 - -/* CEC message from the AP to be written on the CEC bus */ -#define EC_CMD_CEC_WRITE_MSG 0x00B8 - -/** - * struct ec_params_cec_write - Message to write to the CEC bus - * @msg: message content to write to the CEC bus - */ -struct ec_params_cec_write { - uint8_t msg[EC_MAX_CEC_MSG_LEN]; -} __packed; - -/* Set various CEC parameters */ -#define EC_CMD_CEC_SET 0x00BA - -/** - * struct ec_params_cec_set - CEC parameters set - * @cmd: parameter type, can be CEC_CMD_ENABLE or CEC_CMD_LOGICAL_ADDRESS - * @val: in case cmd is CEC_CMD_ENABLE, this field can be 0 to disable CEC - * or 1 to enable CEC functionality, in case cmd is CEC_CMD_LOGICAL_ADDRESS, - * this field encodes the requested logical address between 0 and 15 - * or 0xff to unregister - */ -struct ec_params_cec_set { - uint8_t cmd; /* enum cec_command */ - uint8_t val; -} __packed; - -/* Read various CEC parameters */ -#define EC_CMD_CEC_GET 0x00BB - -/** - * struct ec_params_cec_get - CEC parameters get - * @cmd: parameter type, can be CEC_CMD_ENABLE or CEC_CMD_LOGICAL_ADDRESS - */ -struct ec_params_cec_get { - uint8_t cmd; /* enum cec_command */ -} __packed; - -/** - * struct ec_response_cec_get - CEC parameters get response - * @val: in case cmd was CEC_CMD_ENABLE, this field will 0 if CEC is - * disabled or 1 if CEC functionality is enabled, - * in case cmd was CEC_CMD_LOGICAL_ADDRESS, this will encode the - * configured logical address between 0 and 15 or 0xff if unregistered - */ -struct ec_response_cec_get { - uint8_t val; -} __packed; - -/* CEC parameters command */ -enum ec_cec_command { - /* CEC reading, writing and events enable */ - CEC_CMD_ENABLE, - /* CEC logical address */ - CEC_CMD_LOGICAL_ADDRESS, -}; - -/* Events from CEC to AP */ -enum mkbp_cec_event { - /* Outgoing message was acknowledged by a follower */ - EC_MKBP_CEC_SEND_OK = BIT(0), - /* Outgoing message was not acknowledged */ - EC_MKBP_CEC_SEND_FAILED = BIT(1), -}; +#define EC_CMD_GET_PANIC_INFO 0x00D3 /*****************************************************************************/ /* @@ -3208,7 +4611,7 @@ enum mkbp_cec_event { * * Use EC_CMD_REBOOT_EC to reboot the EC more politely. */ -#define EC_CMD_REBOOT 0xd1 /* Think "die" */ +#define EC_CMD_REBOOT 0x00D1 /* Think "die" */ /* * Resend last response (not supported on LPC). @@ -3217,7 +4620,7 @@ enum mkbp_cec_event { * there was no previous command, or the previous command's response was too * big to save. */ -#define EC_CMD_RESEND_RESPONSE 0xdb +#define EC_CMD_RESEND_RESPONSE 0x00DB /* * This header byte on a command indicate version 0. Any header byte less @@ -3229,9 +4632,7 @@ enum mkbp_cec_event { * * The old EC interface must not use commands 0xdc or higher. */ -#define EC_CMD_VERSION0 0xdc - -#endif /* !__ACPI__ */ +#define EC_CMD_VERSION0 0x00DC /*****************************************************************************/ /* @@ -3241,21 +4642,56 @@ enum mkbp_cec_event { */ /* EC to PD MCU exchange status command */ -#define EC_CMD_PD_EXCHANGE_STATUS 0x100 +#define EC_CMD_PD_EXCHANGE_STATUS 0x0100 +#define EC_VER_PD_EXCHANGE_STATUS 2 + +enum pd_charge_state { + PD_CHARGE_NO_CHANGE = 0, /* Don't change charge state */ + PD_CHARGE_NONE, /* No charging allowed */ + PD_CHARGE_5V, /* 5V charging only */ + PD_CHARGE_MAX /* Charge at max voltage */ +}; /* Status of EC being sent to PD */ +#define EC_STATUS_HIBERNATING BIT(0) + struct ec_params_pd_status { - int8_t batt_soc; /* battery state of charge */ -} __packed; + uint8_t status; /* EC status */ + int8_t batt_soc; /* battery state of charge */ + uint8_t charge_state; /* charging state (from enum pd_charge_state) */ +} __ec_align1; /* Status of PD being sent back to EC */ +#define PD_STATUS_HOST_EVENT BIT(0) /* Forward host event to AP */ +#define PD_STATUS_IN_RW BIT(1) /* Running RW image */ +#define PD_STATUS_JUMPED_TO_IMAGE BIT(2) /* Current image was jumped to */ +#define PD_STATUS_TCPC_ALERT_0 BIT(3) /* Alert active in port 0 TCPC */ +#define PD_STATUS_TCPC_ALERT_1 BIT(4) /* Alert active in port 1 TCPC */ +#define PD_STATUS_TCPC_ALERT_2 BIT(5) /* Alert active in port 2 TCPC */ +#define PD_STATUS_TCPC_ALERT_3 BIT(6) /* Alert active in port 3 TCPC */ +#define PD_STATUS_EC_INT_ACTIVE (PD_STATUS_TCPC_ALERT_0 | \ + PD_STATUS_TCPC_ALERT_1 | \ + PD_STATUS_HOST_EVENT) struct ec_response_pd_status { - int8_t status; /* PD MCU status */ - uint32_t curr_lim_ma; /* input current limit */ -} __packed; + uint32_t curr_lim_ma; /* input current limit */ + uint16_t status; /* PD MCU status */ + int8_t active_charge_port; /* active charging port */ +} __ec_align_size1; + +/* AP to PD MCU host event status command, cleared on read */ +#define EC_CMD_PD_HOST_EVENT_STATUS 0x0104 + +/* PD MCU host event status bits */ +#define PD_EVENT_UPDATE_DEVICE BIT(0) +#define PD_EVENT_POWER_CHANGE BIT(1) +#define PD_EVENT_IDENTITY_RECEIVED BIT(2) +#define PD_EVENT_DATA_SWAP BIT(3) +struct ec_response_host_event_status { + uint32_t status; /* PD MCU host event status */ +} __ec_align4; /* Set USB type-C port role and muxes */ -#define EC_CMD_USB_PD_CONTROL 0x101 +#define EC_CMD_USB_PD_CONTROL 0x0101 enum usb_pd_control_role { USB_PD_CTRL_ROLE_NO_CHANGE = 0, @@ -3263,6 +4699,8 @@ enum usb_pd_control_role { USB_PD_CTRL_ROLE_TOGGLE_OFF = 2, USB_PD_CTRL_ROLE_FORCE_SINK = 3, USB_PD_CTRL_ROLE_FORCE_SOURCE = 4, + USB_PD_CTRL_ROLE_FREEZE = 5, + USB_PD_CTRL_ROLE_COUNT }; enum usb_pd_control_mux { @@ -3272,6 +4710,7 @@ enum usb_pd_control_mux { USB_PD_CTRL_MUX_DP = 3, USB_PD_CTRL_MUX_DOCK = 4, USB_PD_CTRL_MUX_AUTO = 5, + USB_PD_CTRL_MUX_COUNT }; enum usb_pd_control_swap { @@ -3287,11 +4726,11 @@ struct ec_params_usb_pd_control { uint8_t role; uint8_t mux; uint8_t swap; -} __packed; +} __ec_align1; -#define PD_CTRL_RESP_ENABLED_COMMS (1 << 0) /* Communication enabled */ -#define PD_CTRL_RESP_ENABLED_CONNECTED (1 << 1) /* Device connected */ -#define PD_CTRL_RESP_ENABLED_PD_CAPABLE (1 << 2) /* Partner is PD capable */ +#define PD_CTRL_RESP_ENABLED_COMMS BIT(0) /* Communication enabled */ +#define PD_CTRL_RESP_ENABLED_CONNECTED BIT(1) /* Device connected */ +#define PD_CTRL_RESP_ENABLED_PD_CAPABLE BIT(2) /* Partner is PD capable */ #define PD_CTRL_RESP_ROLE_POWER BIT(0) /* 0=SNK/1=SRC */ #define PD_CTRL_RESP_ROLE_DATA BIT(1) /* 0=UFP/1=DFP */ @@ -3301,28 +4740,54 @@ struct ec_params_usb_pd_control { #define PD_CTRL_RESP_ROLE_USB_COMM BIT(5) /* Partner USB comm capable */ #define PD_CTRL_RESP_ROLE_EXT_POWERED BIT(6) /* Partner externally powerd */ +struct ec_response_usb_pd_control { + uint8_t enabled; + uint8_t role; + uint8_t polarity; + uint8_t state; +} __ec_align1; + struct ec_response_usb_pd_control_v1 { uint8_t enabled; uint8_t role; uint8_t polarity; char state[32]; -} __packed; +} __ec_align1; + +/* Values representing usbc PD CC state */ +#define USBC_PD_CC_NONE 0 /* No accessory connected */ +#define USBC_PD_CC_NO_UFP 1 /* No UFP accessory connected */ +#define USBC_PD_CC_AUDIO_ACC 2 /* Audio accessory connected */ +#define USBC_PD_CC_DEBUG_ACC 3 /* Debug accessory connected */ +#define USBC_PD_CC_UFP_ATTACHED 4 /* UFP attached to usbc */ +#define USBC_PD_CC_DFP_ATTACHED 5 /* DPF attached to usbc */ + +struct ec_response_usb_pd_control_v2 { + uint8_t enabled; + uint8_t role; + uint8_t polarity; + char state[32]; + uint8_t cc_state; /* USBC_PD_CC_*Encoded cc state */ + uint8_t dp_mode; /* Current DP pin mode (MODE_DP_PIN_[A-E]) */ + /* CL:1500994 Current cable type */ + uint8_t reserved_cable_type; +} __ec_align1; -#define EC_CMD_USB_PD_PORTS 0x102 +#define EC_CMD_USB_PD_PORTS 0x0102 /* Maximum number of PD ports on a device, num_ports will be <= this */ #define EC_USB_PD_MAX_PORTS 8 struct ec_response_usb_pd_ports { uint8_t num_ports; -} __packed; +} __ec_align1; -#define EC_CMD_USB_PD_POWER_INFO 0x103 +#define EC_CMD_USB_PD_POWER_INFO 0x0103 #define PD_POWER_CHARGING_PORT 0xff struct ec_params_usb_pd_power_info { uint8_t port; -} __packed; +} __ec_align1; enum usb_chg_type { USB_CHG_TYPE_NONE, @@ -3335,6 +4800,7 @@ enum usb_chg_type { USB_CHG_TYPE_OTHER, USB_CHG_TYPE_VBUS, USB_CHG_TYPE_UNKNOWN, + USB_CHG_TYPE_DEDICATED, }; enum usb_power_roles { USB_PD_PORT_POWER_DISCONNECTED, @@ -3348,7 +4814,7 @@ struct usb_chg_measures { uint16_t voltage_now; uint16_t current_max; uint16_t current_lim; -} __packed; +} __ec_align2; struct ec_response_usb_pd_power_info { uint8_t role; @@ -3357,11 +4823,8 @@ struct ec_response_usb_pd_power_info { uint8_t reserved1; struct usb_chg_measures meas; uint32_t max_power; -} __packed; +} __ec_align4; -struct ec_params_usb_pd_info_request { - uint8_t port; -} __packed; /* * This command will return the number of USB PD charge port + the number @@ -3371,7 +4834,47 @@ struct ec_params_usb_pd_info_request { #define EC_CMD_CHARGE_PORT_COUNT 0x0105 struct ec_response_charge_port_count { uint8_t port_count; -} __packed; +} __ec_align1; + +/* Write USB-PD device FW */ +#define EC_CMD_USB_PD_FW_UPDATE 0x0110 + +enum usb_pd_fw_update_cmds { + USB_PD_FW_REBOOT, + USB_PD_FW_FLASH_ERASE, + USB_PD_FW_FLASH_WRITE, + USB_PD_FW_ERASE_SIG, +}; + +struct ec_params_usb_pd_fw_update { + uint16_t dev_id; + uint8_t cmd; + uint8_t port; + uint32_t size; /* Size to write in bytes */ + /* Followed by data to write */ +} __ec_align4; + +/* Write USB-PD Accessory RW_HASH table entry */ +#define EC_CMD_USB_PD_RW_HASH_ENTRY 0x0111 +/* RW hash is first 20 bytes of SHA-256 of RW section */ +#define PD_RW_HASH_SIZE 20 +struct ec_params_usb_pd_rw_hash_entry { + uint16_t dev_id; + uint8_t dev_rw_hash[PD_RW_HASH_SIZE]; + uint8_t reserved; /* + * For alignment of current_image + * TODO(rspangler) but it's not aligned! + * Should have been reserved[2]. + */ + uint32_t current_image; /* One of ec_current_image */ +} __ec_align1; + +/* Read USB-PD Accessory info */ +#define EC_CMD_USB_PD_DEV_INFO 0x0112 + +struct ec_params_usb_pd_info_request { + uint8_t port; +} __ec_align1; /* Read USB-PD Device discovery info */ #define EC_CMD_USB_PD_DISCOVERY 0x0113 @@ -3379,7 +4882,7 @@ struct ec_params_usb_pd_discovery_entry { uint16_t vid; /* USB-IF VID */ uint16_t pid; /* USB-IF PID */ uint8_t ptype; /* product type (hub,periph,cable,ama) */ -} __packed; +} __ec_align_size1; /* Override default charge behavior */ #define EC_CMD_PD_CHARGE_PORT_OVERRIDE 0x0114 @@ -3393,9 +4896,13 @@ enum usb_pd_override_ports { struct ec_params_charge_port_override { int16_t override_port; /* Override port# */ -} __packed; +} __ec_align2; -/* Read (and delete) one entry of PD event log */ +/* + * Read (and delete) one entry of PD event log. + * TODO(crbug.com/751742): Make this host command more generic to accommodate + * future non-PD logs that use the same internal EC event_log. + */ #define EC_CMD_PD_GET_LOG_ENTRY 0x0115 struct ec_response_pd_log { @@ -3404,7 +4911,7 @@ struct ec_response_pd_log { uint8_t size_port; /* [7:5] port number [4:0] payload size in bytes */ uint16_t data; /* type-defined data payload */ uint8_t payload[0]; /* optional additional data payload: 0..16 bytes */ -} __packed; +} __ec_align4; /* The timestamp is the microsecond counter shifted to get about a ms. */ #define PD_LOG_TIMESTAMP_SHIFT 10 /* 1 LSB = 1024us */ @@ -3470,35 +4977,698 @@ struct mcdp_version { uint8_t major; uint8_t minor; uint16_t build; -} __packed; +} __ec_align4; struct mcdp_info { uint8_t family[2]; uint8_t chipid[2]; struct mcdp_version irom; struct mcdp_version fw; -} __packed; +} __ec_align4; /* struct mcdp_info field decoding */ #define MCDP_CHIPID(chipid) ((chipid[0] << 8) | chipid[1]) #define MCDP_FAMILY(family) ((family[0] << 8) | family[1]) +/* Get/Set USB-PD Alternate mode info */ +#define EC_CMD_USB_PD_GET_AMODE 0x0116 +struct ec_params_usb_pd_get_mode_request { + uint16_t svid_idx; /* SVID index to get */ + uint8_t port; /* port */ +} __ec_align_size1; + +struct ec_params_usb_pd_get_mode_response { + uint16_t svid; /* SVID */ + uint16_t opos; /* Object Position */ + uint32_t vdo[6]; /* Mode VDOs */ +} __ec_align4; + +#define EC_CMD_USB_PD_SET_AMODE 0x0117 + +enum pd_mode_cmd { + PD_EXIT_MODE = 0, + PD_ENTER_MODE = 1, + /* Not a command. Do NOT remove. */ + PD_MODE_CMD_COUNT, +}; + +struct ec_params_usb_pd_set_mode_request { + uint32_t cmd; /* enum pd_mode_cmd */ + uint16_t svid; /* SVID to set */ + uint8_t opos; /* Object Position */ + uint8_t port; /* port */ +} __ec_align4; + +/* Ask the PD MCU to record a log of a requested type */ +#define EC_CMD_PD_WRITE_LOG_ENTRY 0x0118 + +struct ec_params_pd_write_log_entry { + uint8_t type; /* event type : see PD_EVENT_xx above */ + uint8_t port; /* port#, or 0 for events unrelated to a given port */ +} __ec_align1; + + +/* Control USB-PD chip */ +#define EC_CMD_PD_CONTROL 0x0119 + +enum ec_pd_control_cmd { + PD_SUSPEND = 0, /* Suspend the PD chip (EC: stop talking to PD) */ + PD_RESUME, /* Resume the PD chip (EC: start talking to PD) */ + PD_RESET, /* Force reset the PD chip */ + PD_CONTROL_DISABLE, /* Disable further calls to this command */ + PD_CHIP_ON, /* Power on the PD chip */ +}; + +struct ec_params_pd_control { + uint8_t chip; /* chip id */ + uint8_t subcmd; +} __ec_align1; + /* Get info about USB-C SS muxes */ -#define EC_CMD_USB_PD_MUX_INFO 0x11a +#define EC_CMD_USB_PD_MUX_INFO 0x011A struct ec_params_usb_pd_mux_info { uint8_t port; /* USB-C port number */ -} __packed; +} __ec_align1; /* Flags representing mux state */ -#define USB_PD_MUX_USB_ENABLED (1 << 0) -#define USB_PD_MUX_DP_ENABLED (1 << 1) -#define USB_PD_MUX_POLARITY_INVERTED (1 << 2) -#define USB_PD_MUX_HPD_IRQ (1 << 3) +#define USB_PD_MUX_USB_ENABLED BIT(0) /* USB connected */ +#define USB_PD_MUX_DP_ENABLED BIT(1) /* DP connected */ +#define USB_PD_MUX_POLARITY_INVERTED BIT(2) /* CC line Polarity inverted */ +#define USB_PD_MUX_HPD_IRQ BIT(3) /* HPD IRQ is asserted */ +#define USB_PD_MUX_HPD_LVL BIT(4) /* HPD level is asserted */ struct ec_response_usb_pd_mux_info { uint8_t flags; /* USB_PD_MUX_*-encoded USB mux state */ -} __packed; +} __ec_align1; + +#define EC_CMD_PD_CHIP_INFO 0x011B + +struct ec_params_pd_chip_info { + uint8_t port; /* USB-C port number */ + uint8_t renew; /* Force renewal */ +} __ec_align1; + +struct ec_response_pd_chip_info { + uint16_t vendor_id; + uint16_t product_id; + uint16_t device_id; + union { + uint8_t fw_version_string[8]; + uint64_t fw_version_number; + }; +} __ec_align2; + +struct ec_response_pd_chip_info_v1 { + uint16_t vendor_id; + uint16_t product_id; + uint16_t device_id; + union { + uint8_t fw_version_string[8]; + uint64_t fw_version_number; + }; + union { + uint8_t min_req_fw_version_string[8]; + uint64_t min_req_fw_version_number; + }; +} __ec_align2; + +/* Run RW signature verification and get status */ +#define EC_CMD_RWSIG_CHECK_STATUS 0x011C + +struct ec_response_rwsig_check_status { + uint32_t status; +} __ec_align4; + +/* For controlling RWSIG task */ +#define EC_CMD_RWSIG_ACTION 0x011D + +enum rwsig_action { + RWSIG_ACTION_ABORT = 0, /* Abort RWSIG and prevent jumping */ + RWSIG_ACTION_CONTINUE = 1, /* Jump to RW immediately */ +}; + +struct ec_params_rwsig_action { + uint32_t action; +} __ec_align4; + +/* Run verification on a slot */ +#define EC_CMD_EFS_VERIFY 0x011E + +struct ec_params_efs_verify { + uint8_t region; /* enum ec_flash_region */ +} __ec_align1; + +/* + * Retrieve info from Cros Board Info store. Response is based on the data + * type. Integers return a uint32. Strings return a string, using the response + * size to determine how big it is. + */ +#define EC_CMD_GET_CROS_BOARD_INFO 0x011F +/* + * Write info into Cros Board Info on EEPROM. Write fails if the board has + * hardware write-protect enabled. + */ +#define EC_CMD_SET_CROS_BOARD_INFO 0x0120 + +enum cbi_data_tag { + CBI_TAG_BOARD_VERSION = 0, /* uint32_t or smaller */ + CBI_TAG_OEM_ID = 1, /* uint32_t or smaller */ + CBI_TAG_SKU_ID = 2, /* uint32_t or smaller */ + CBI_TAG_DRAM_PART_NUM = 3, /* variable length ascii, nul terminated. */ + CBI_TAG_OEM_NAME = 4, /* variable length ascii, nul terminated. */ + CBI_TAG_MODEL_ID = 5, /* uint32_t or smaller */ + CBI_TAG_COUNT, +}; + +/* + * Flags to control read operation + * + * RELOAD: Invalidate cache and read data from EEPROM. Useful to verify + * write was successful without reboot. + */ +#define CBI_GET_RELOAD BIT(0) + +struct ec_params_get_cbi { + uint32_t tag; /* enum cbi_data_tag */ + uint32_t flag; /* CBI_GET_* */ +} __ec_align4; + +/* + * Flags to control write behavior. + * + * NO_SYNC: Makes EC update data in RAM but skip writing to EEPROM. It's + * useful when writing multiple fields in a row. + * INIT: Need to be set when creating a new CBI from scratch. All fields + * will be initialized to zero first. + */ +#define CBI_SET_NO_SYNC BIT(0) +#define CBI_SET_INIT BIT(1) + +struct ec_params_set_cbi { + uint32_t tag; /* enum cbi_data_tag */ + uint32_t flag; /* CBI_SET_* */ + uint32_t size; /* Data size */ + uint8_t data[]; /* For string and raw data */ +} __ec_align1; + +/* + * Information about resets of the AP by the EC and the EC's own uptime. + */ +#define EC_CMD_GET_UPTIME_INFO 0x0121 + +struct ec_response_uptime_info { + /* + * Number of milliseconds since the last EC boot. Sysjump resets + * typically do not restart the EC's time_since_boot epoch. + * + * WARNING: The EC's sense of time is much less accurate than the AP's + * sense of time, in both phase and frequency. This timebase is similar + * to CLOCK_MONOTONIC_RAW, but with 1% or more frequency error. + */ + uint32_t time_since_ec_boot_ms; + + /* + * Number of times the AP was reset by the EC since the last EC boot. + * Note that the AP may be held in reset by the EC during the initial + * boot sequence, such that the very first AP boot may count as more + * than one here. + */ + uint32_t ap_resets_since_ec_boot; + + /* + * The set of flags which describe the EC's most recent reset. See + * include/system.h RESET_FLAG_* for details. + */ + uint32_t ec_reset_flags; + + /* Empty log entries have both the cause and timestamp set to zero. */ + struct ap_reset_log_entry { + /* + * See include/chipset.h: enum chipset_{reset,shutdown}_reason + * for details. + */ + uint16_t reset_cause; + + /* Reserved for protocol growth. */ + uint16_t reserved; + + /* + * The time of the reset's assertion, in milliseconds since the + * last EC boot, in the same epoch as time_since_ec_boot_ms. + * Set to zero if the log entry is empty. + */ + uint32_t reset_time_ms; + } recent_ap_reset[4]; +} __ec_align4; + +/* + * Add entropy to the device secret (stored in the rollback region). + * + * Depending on the chip, the operation may take a long time (e.g. to erase + * flash), so the commands are asynchronous. + */ +#define EC_CMD_ADD_ENTROPY 0x0122 + +enum add_entropy_action { + /* Add entropy to the current secret. */ + ADD_ENTROPY_ASYNC = 0, + /* + * Add entropy, and also make sure that the previous secret is erased. + * (this can be implemented by adding entropy multiple times until + * all rolback blocks have been overwritten). + */ + ADD_ENTROPY_RESET_ASYNC = 1, + /* Read back result from the previous operation. */ + ADD_ENTROPY_GET_RESULT = 2, +}; + +struct ec_params_rollback_add_entropy { + uint8_t action; +} __ec_align1; + +/* + * Perform a single read of a given ADC channel. + */ +#define EC_CMD_ADC_READ 0x0123 + +struct ec_params_adc_read { + uint8_t adc_channel; +} __ec_align1; + +struct ec_response_adc_read { + int32_t adc_value; +} __ec_align4; + +/* + * Read back rollback info + */ +#define EC_CMD_ROLLBACK_INFO 0x0124 + +struct ec_response_rollback_info { + int32_t id; /* Incrementing number to indicate which region to use. */ + int32_t rollback_min_version; + int32_t rw_rollback_version; +} __ec_align4; + + +/* Issue AP reset */ +#define EC_CMD_AP_RESET 0x0125 + +/*****************************************************************************/ +/* The command range 0x200-0x2FF is reserved for Rotor. */ + +/*****************************************************************************/ +/* + * Reserve a range of host commands for the CR51 firmware. + */ +#define EC_CMD_CR51_BASE 0x0300 +#define EC_CMD_CR51_LAST 0x03FF + +/*****************************************************************************/ +/* Fingerprint MCU commands: range 0x0400-0x040x */ + +/* Fingerprint SPI sensor passthru command: prototyping ONLY */ +#define EC_CMD_FP_PASSTHRU 0x0400 + +#define EC_FP_FLAG_NOT_COMPLETE 0x1 + +struct ec_params_fp_passthru { + uint16_t len; /* Number of bytes to write then read */ + uint16_t flags; /* EC_FP_FLAG_xxx */ + uint8_t data[]; /* Data to send */ +} __ec_align2; + +/* Configure the Fingerprint MCU behavior */ +#define EC_CMD_FP_MODE 0x0402 + +/* Put the sensor in its lowest power mode */ +#define FP_MODE_DEEPSLEEP BIT(0) +/* Wait to see a finger on the sensor */ +#define FP_MODE_FINGER_DOWN BIT(1) +/* Poll until the finger has left the sensor */ +#define FP_MODE_FINGER_UP BIT(2) +/* Capture the current finger image */ +#define FP_MODE_CAPTURE BIT(3) +/* Finger enrollment session on-going */ +#define FP_MODE_ENROLL_SESSION BIT(4) +/* Enroll the current finger image */ +#define FP_MODE_ENROLL_IMAGE BIT(5) +/* Try to match the current finger image */ +#define FP_MODE_MATCH BIT(6) +/* Reset and re-initialize the sensor. */ +#define FP_MODE_RESET_SENSOR BIT(7) +/* special value: don't change anything just read back current mode */ +#define FP_MODE_DONT_CHANGE BIT(31) + +#define FP_VALID_MODES (FP_MODE_DEEPSLEEP | \ + FP_MODE_FINGER_DOWN | \ + FP_MODE_FINGER_UP | \ + FP_MODE_CAPTURE | \ + FP_MODE_ENROLL_SESSION | \ + FP_MODE_ENROLL_IMAGE | \ + FP_MODE_MATCH | \ + FP_MODE_RESET_SENSOR | \ + FP_MODE_DONT_CHANGE) + +/* Capture types defined in bits [30..28] */ +#define FP_MODE_CAPTURE_TYPE_SHIFT 28 +#define FP_MODE_CAPTURE_TYPE_MASK (0x7 << FP_MODE_CAPTURE_TYPE_SHIFT) +/* + * This enum must remain ordered, if you add new values you must ensure that + * FP_CAPTURE_TYPE_MAX is still the last one. + */ +enum fp_capture_type { + /* Full blown vendor-defined capture (produces 'frame_size' bytes) */ + FP_CAPTURE_VENDOR_FORMAT = 0, + /* Simple raw image capture (produces width x height x bpp bits) */ + FP_CAPTURE_SIMPLE_IMAGE = 1, + /* Self test pattern (e.g. checkerboard) */ + FP_CAPTURE_PATTERN0 = 2, + /* Self test pattern (e.g. inverted checkerboard) */ + FP_CAPTURE_PATTERN1 = 3, + /* Capture for Quality test with fixed contrast */ + FP_CAPTURE_QUALITY_TEST = 4, + /* Capture for pixel reset value test */ + FP_CAPTURE_RESET_TEST = 5, + FP_CAPTURE_TYPE_MAX, +}; +/* Extracts the capture type from the sensor 'mode' word */ +#define FP_CAPTURE_TYPE(mode) (((mode) & FP_MODE_CAPTURE_TYPE_MASK) \ + >> FP_MODE_CAPTURE_TYPE_SHIFT) + +struct ec_params_fp_mode { + uint32_t mode; /* as defined by FP_MODE_ constants */ +} __ec_align4; + +struct ec_response_fp_mode { + uint32_t mode; /* as defined by FP_MODE_ constants */ +} __ec_align4; + +/* Retrieve Fingerprint sensor information */ +#define EC_CMD_FP_INFO 0x0403 + +/* Number of dead pixels detected on the last maintenance */ +#define FP_ERROR_DEAD_PIXELS(errors) ((errors) & 0x3FF) +/* Unknown number of dead pixels detected on the last maintenance */ +#define FP_ERROR_DEAD_PIXELS_UNKNOWN (0x3FF) +/* No interrupt from the sensor */ +#define FP_ERROR_NO_IRQ BIT(12) +/* SPI communication error */ +#define FP_ERROR_SPI_COMM BIT(13) +/* Invalid sensor Hardware ID */ +#define FP_ERROR_BAD_HWID BIT(14) +/* Sensor initialization failed */ +#define FP_ERROR_INIT_FAIL BIT(15) + +struct ec_response_fp_info_v0 { + /* Sensor identification */ + uint32_t vendor_id; + uint32_t product_id; + uint32_t model_id; + uint32_t version; + /* Image frame characteristics */ + uint32_t frame_size; + uint32_t pixel_format; /* using V4L2_PIX_FMT_ */ + uint16_t width; + uint16_t height; + uint16_t bpp; + uint16_t errors; /* see FP_ERROR_ flags above */ +} __ec_align4; + +struct ec_response_fp_info { + /* Sensor identification */ + uint32_t vendor_id; + uint32_t product_id; + uint32_t model_id; + uint32_t version; + /* Image frame characteristics */ + uint32_t frame_size; + uint32_t pixel_format; /* using V4L2_PIX_FMT_ */ + uint16_t width; + uint16_t height; + uint16_t bpp; + uint16_t errors; /* see FP_ERROR_ flags above */ + /* Template/finger current information */ + uint32_t template_size; /* max template size in bytes */ + uint16_t template_max; /* maximum number of fingers/templates */ + uint16_t template_valid; /* number of valid fingers/templates */ + uint32_t template_dirty; /* bitmap of templates with MCU side changes */ + uint32_t template_version; /* version of the template format */ +} __ec_align4; + +/* Get the last captured finger frame or a template content */ +#define EC_CMD_FP_FRAME 0x0404 + +/* constants defining the 'offset' field which also contains the frame index */ +#define FP_FRAME_INDEX_SHIFT 28 +/* Frame buffer where the captured image is stored */ +#define FP_FRAME_INDEX_RAW_IMAGE 0 +/* First frame buffer holding a template */ +#define FP_FRAME_INDEX_TEMPLATE 1 +#define FP_FRAME_GET_BUFFER_INDEX(offset) ((offset) >> FP_FRAME_INDEX_SHIFT) +#define FP_FRAME_OFFSET_MASK 0x0FFFFFFF + +/* Version of the format of the encrypted templates. */ +#define FP_TEMPLATE_FORMAT_VERSION 3 + +/* Constants for encryption parameters */ +#define FP_CONTEXT_NONCE_BYTES 12 +#define FP_CONTEXT_USERID_WORDS (32 / sizeof(uint32_t)) +#define FP_CONTEXT_TAG_BYTES 16 +#define FP_CONTEXT_SALT_BYTES 16 +#define FP_CONTEXT_TPM_BYTES 32 + +struct ec_fp_template_encryption_metadata { + /* + * Version of the structure format (N=3). + */ + uint16_t struct_version; + /* Reserved bytes, set to 0. */ + uint16_t reserved; + /* + * The salt is *only* ever used for key derivation. The nonce is unique, + * a different one is used for every message. + */ + uint8_t nonce[FP_CONTEXT_NONCE_BYTES]; + uint8_t salt[FP_CONTEXT_SALT_BYTES]; + uint8_t tag[FP_CONTEXT_TAG_BYTES]; +}; + +struct ec_params_fp_frame { + /* + * The offset contains the template index or FP_FRAME_INDEX_RAW_IMAGE + * in the high nibble, and the real offset within the frame in + * FP_FRAME_OFFSET_MASK. + */ + uint32_t offset; + uint32_t size; +} __ec_align4; + +/* Load a template into the MCU */ +#define EC_CMD_FP_TEMPLATE 0x0405 + +/* Flag in the 'size' field indicating that the full template has been sent */ +#define FP_TEMPLATE_COMMIT 0x80000000 + +struct ec_params_fp_template { + uint32_t offset; + uint32_t size; + uint8_t data[]; +} __ec_align4; + +/* Clear the current fingerprint user context and set a new one */ +#define EC_CMD_FP_CONTEXT 0x0406 + +struct ec_params_fp_context { + uint32_t userid[FP_CONTEXT_USERID_WORDS]; +} __ec_align4; + +#define EC_CMD_FP_STATS 0x0407 + +#define FPSTATS_CAPTURE_INV BIT(0) +#define FPSTATS_MATCHING_INV BIT(1) + +struct ec_response_fp_stats { + uint32_t capture_time_us; + uint32_t matching_time_us; + uint32_t overall_time_us; + struct { + uint32_t lo; + uint32_t hi; + } overall_t0; + uint8_t timestamps_invalid; + int8_t template_matched; +} __ec_align2; + +#define EC_CMD_FP_SEED 0x0408 +struct ec_params_fp_seed { + /* + * Version of the structure format (N=3). + */ + uint16_t struct_version; + /* Reserved bytes, set to 0. */ + uint16_t reserved; + /* Seed from the TPM. */ + uint8_t seed[FP_CONTEXT_TPM_BYTES]; +} __ec_align4; + +/*****************************************************************************/ +/* Touchpad MCU commands: range 0x0500-0x05FF */ + +/* Perform touchpad self test */ +#define EC_CMD_TP_SELF_TEST 0x0500 + +/* Get number of frame types, and the size of each type */ +#define EC_CMD_TP_FRAME_INFO 0x0501 + +struct ec_response_tp_frame_info { + uint32_t n_frames; + uint32_t frame_sizes[0]; +} __ec_align4; + +/* Create a snapshot of current frame readings */ +#define EC_CMD_TP_FRAME_SNAPSHOT 0x0502 + +/* Read the frame */ +#define EC_CMD_TP_FRAME_GET 0x0503 + +struct ec_params_tp_frame_get { + uint32_t frame_index; + uint32_t offset; + uint32_t size; +} __ec_align4; + +/*****************************************************************************/ +/* EC-EC communication commands: range 0x0600-0x06FF */ + +#define EC_COMM_TEXT_MAX 8 + +/* + * Get battery static information, i.e. information that never changes, or + * very infrequently. + */ +#define EC_CMD_BATTERY_GET_STATIC 0x0600 + +/** + * struct ec_params_battery_static_info - Battery static info parameters + * @index: Battery index. + */ +struct ec_params_battery_static_info { + uint8_t index; +} __ec_align_size1; + +/** + * struct ec_response_battery_static_info - Battery static info response + * @design_capacity: Battery Design Capacity (mAh) + * @design_voltage: Battery Design Voltage (mV) + * @manufacturer: Battery Manufacturer String + * @model: Battery Model Number String + * @serial: Battery Serial Number String + * @type: Battery Type String + * @cycle_count: Battery Cycle Count + */ +struct ec_response_battery_static_info { + uint16_t design_capacity; + uint16_t design_voltage; + char manufacturer[EC_COMM_TEXT_MAX]; + char model[EC_COMM_TEXT_MAX]; + char serial[EC_COMM_TEXT_MAX]; + char type[EC_COMM_TEXT_MAX]; + /* TODO(crbug.com/795991): Consider moving to dynamic structure. */ + uint32_t cycle_count; +} __ec_align4; + +/* + * Get battery dynamic information, i.e. information that is likely to change + * every time it is read. + */ +#define EC_CMD_BATTERY_GET_DYNAMIC 0x0601 + +/** + * struct ec_params_battery_dynamic_info - Battery dynamic info parameters + * @index: Battery index. + */ +struct ec_params_battery_dynamic_info { + uint8_t index; +} __ec_align_size1; + +/** + * struct ec_response_battery_dynamic_info - Battery dynamic info response + * @actual_voltage: Battery voltage (mV) + * @actual_current: Battery current (mA); negative=discharging + * @remaining_capacity: Remaining capacity (mAh) + * @full_capacity: Capacity (mAh, might change occasionally) + * @flags: Flags, see EC_BATT_FLAG_* + * @desired_voltage: Charging voltage desired by battery (mV) + * @desired_current: Charging current desired by battery (mA) + */ +struct ec_response_battery_dynamic_info { + int16_t actual_voltage; + int16_t actual_current; + int16_t remaining_capacity; + int16_t full_capacity; + int16_t flags; + int16_t desired_voltage; + int16_t desired_current; +} __ec_align2; + +/* + * Control charger chip. Used to control charger chip on the slave. + */ +#define EC_CMD_CHARGER_CONTROL 0x0602 + +/** + * struct ec_params_charger_control - Charger control parameters + * @max_current: Charger current (mA). Positive to allow base to draw up to + * max_current and (possibly) charge battery, negative to request current + * from base (OTG). + * @otg_voltage: Voltage (mV) to use in OTG mode, ignored if max_current is + * >= 0. + * @allow_charging: Allow base battery charging (only makes sense if + * max_current > 0). + */ +struct ec_params_charger_control { + int16_t max_current; + uint16_t otg_voltage; + uint8_t allow_charging; +} __ec_align_size1; + +/*****************************************************************************/ +/* + * Reserve a range of host commands for board-specific, experimental, or + * special purpose features. These can be (re)used without updating this file. + * + * CAUTION: Don't go nuts with this. Shipping products should document ALL + * their EC commands for easier development, testing, debugging, and support. + * + * All commands MUST be #defined to be 4-digit UPPER CASE hex values + * (e.g., 0x00AB, not 0xab) for CONFIG_HOSTCMD_SECTION_SORTED to work. + * + * In your experimental code, you may want to do something like this: + * + * #define EC_CMD_MAGIC_FOO 0x0000 + * #define EC_CMD_MAGIC_BAR 0x0001 + * #define EC_CMD_MAGIC_HEY 0x0002 + * + * DECLARE_PRIVATE_HOST_COMMAND(EC_CMD_MAGIC_FOO, magic_foo_handler, + * EC_VER_MASK(0); + * + * DECLARE_PRIVATE_HOST_COMMAND(EC_CMD_MAGIC_BAR, magic_bar_handler, + * EC_VER_MASK(0); + * + * DECLARE_PRIVATE_HOST_COMMAND(EC_CMD_MAGIC_HEY, magic_hey_handler, + * EC_VER_MASK(0); + */ +#define EC_CMD_BOARD_SPECIFIC_BASE 0x3E00 +#define EC_CMD_BOARD_SPECIFIC_LAST 0x3FFF + +/* + * Given the private host command offset, calculate the true private host + * command value. + */ +#define EC_PRIVATE_HOST_COMMAND_VALUE(command) \ + (EC_CMD_BOARD_SPECIFIC_BASE + (command)) /*****************************************************************************/ /* @@ -3538,4 +5708,6 @@ struct ec_response_usb_pd_mux_info { #define EC_LPC_ADDR_OLD_PARAM EC_HOST_CMD_REGION1 #define EC_OLD_PARAM_SIZE EC_HOST_CMD_REGION_SIZE + + #endif /* __CROS_EC_COMMANDS_H */ diff --git a/include/linux/mfd/da9062/registers.h b/include/linux/mfd/da9062/registers.h index fe04b708742b..2906bf6160fb 100644 --- a/include/linux/mfd/da9062/registers.h +++ b/include/linux/mfd/da9062/registers.h @@ -797,6 +797,9 @@ #define DA9062AA_BUCK3_SL_A_SHIFT 7 #define DA9062AA_BUCK3_SL_A_MASK BIT(7) +/* DA9062AA_VLDO[1-4]_A common */ +#define DA9062AA_VLDO_A_MIN_SEL 2 + /* DA9062AA_VLDO1_A = 0x0A9 */ #define DA9062AA_VLDO1_A_SHIFT 0 #define DA9062AA_VLDO1_A_MASK 0x3f diff --git a/include/linux/mfd/da9063/pdata.h b/include/linux/mfd/da9063/pdata.h index 77c566ab96ab..085edbf7601b 100644 --- a/include/linux/mfd/da9063/pdata.h +++ b/include/linux/mfd/da9063/pdata.h @@ -11,55 +11,6 @@ #ifndef __MFD_DA9063_PDATA_H__ #define __MFD_DA9063_PDATA_H__ -#include <linux/regulator/machine.h> - -/* - * Regulator configuration - */ -/* DA9063 and DA9063L regulator IDs */ -enum { - /* BUCKs */ - DA9063_ID_BCORE1, - DA9063_ID_BCORE2, - DA9063_ID_BPRO, - DA9063_ID_BMEM, - DA9063_ID_BIO, - DA9063_ID_BPERI, - - /* BCORE1 and BCORE2 in merged mode */ - DA9063_ID_BCORES_MERGED, - /* BMEM and BIO in merged mode */ - DA9063_ID_BMEM_BIO_MERGED, - /* When two BUCKs are merged, they cannot be reused separately */ - - /* LDOs on both DA9063 and DA9063L */ - DA9063_ID_LDO3, - DA9063_ID_LDO7, - DA9063_ID_LDO8, - DA9063_ID_LDO9, - DA9063_ID_LDO11, - - /* DA9063-only LDOs */ - DA9063_ID_LDO1, - DA9063_ID_LDO2, - DA9063_ID_LDO4, - DA9063_ID_LDO5, - DA9063_ID_LDO6, - DA9063_ID_LDO10, -}; - -/* Regulators platform data */ -struct da9063_regulator_data { - int id; - struct regulator_init_data *initdata; -}; - -struct da9063_regulators_pdata { - unsigned n_regulators; - struct da9063_regulator_data *regulator_data; -}; - - /* * RGB LED configuration */ diff --git a/include/linux/mfd/lp87565.h b/include/linux/mfd/lp87565.h index e619def115b4..ce965354bbad 100644 --- a/include/linux/mfd/lp87565.h +++ b/include/linux/mfd/lp87565.h @@ -14,6 +14,7 @@ enum lp87565_device_type { LP87565_DEVICE_TYPE_UNKNOWN = 0, + LP87565_DEVICE_TYPE_LP87561_Q1, LP87565_DEVICE_TYPE_LP87565_Q1, }; @@ -246,6 +247,7 @@ enum LP87565_regulator_id { LP87565_BUCK_3, LP87565_BUCK_10, LP87565_BUCK_23, + LP87565_BUCK_3210, }; /** diff --git a/include/linux/mfd/madera/core.h b/include/linux/mfd/madera/core.h index 4d5d51a9c8a6..7ffa696cce7c 100644 --- a/include/linux/mfd/madera/core.h +++ b/include/linux/mfd/madera/core.h @@ -1,12 +1,8 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0-only */ /* * MFD internals for Cirrus Logic Madera codecs * * Copyright (C) 2015-2018 Cirrus Logic - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by the - * Free Software Foundation; version 2. */ #ifndef MADERA_CORE_H @@ -26,15 +22,21 @@ enum madera_type { CS47L85 = 2, CS47L90 = 3, CS47L91 = 4, + CS47L92 = 5, + CS47L93 = 6, WM1840 = 7, + CS47L15 = 8, + CS42L92 = 9, }; #define MADERA_MAX_CORE_SUPPLIES 2 #define MADERA_MAX_GPIOS 40 +#define CS47L15_NUM_GPIOS 15 #define CS47L35_NUM_GPIOS 16 #define CS47L85_NUM_GPIOS 40 #define CS47L90_NUM_GPIOS 38 +#define CS47L92_NUM_GPIOS 16 #define MADERA_MAX_MICBIAS 4 diff --git a/include/linux/mfd/madera/pdata.h b/include/linux/mfd/madera/pdata.h index 8dc852402dbb..fa9595dd42ba 100644 --- a/include/linux/mfd/madera/pdata.h +++ b/include/linux/mfd/madera/pdata.h @@ -1,12 +1,8 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0-only */ /* * Platform data for Cirrus Logic Madera codecs * * Copyright (C) 2015-2018 Cirrus Logic - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by the - * Free Software Foundation; version 2. */ #ifndef MADERA_PDATA_H @@ -16,6 +12,7 @@ #include <linux/regulator/arizona-ldo1.h> #include <linux/regulator/arizona-micsupp.h> #include <linux/regulator/machine.h> +#include <sound/madera-pdata.h> #define MADERA_MAX_MICBIAS 4 #define MADERA_MAX_CHILD_MICBIAS 4 @@ -34,11 +31,13 @@ struct madera_codec_pdata; * @micvdd: Substruct of pdata for the MICVDD regulator * @irq_flags: Mode for primary IRQ (defaults to active low) * @gpio_base: Base GPIO number - * @gpio_configs: Array of GPIO configurations (See Documentation/pinctrl.txt) + * @gpio_configs: Array of GPIO configurations (See + * Documentation/driver-api/pinctl.rst) * @n_gpio_configs: Number of entries in gpio_configs * @gpsw: General purpose switch mode setting. Depends on the external * hardware connected to the switch. (See the SW1_MODE field * in the datasheet for the available values for your codec) + * @codec: Substruct of pdata for the ASoC codec driver */ struct madera_pdata { struct gpio_desc *reset; @@ -53,6 +52,8 @@ struct madera_pdata { int n_gpio_configs; u32 gpsw[MADERA_MAX_GPSW]; + + struct madera_codec_pdata codec; }; #endif diff --git a/include/linux/mfd/madera/registers.h b/include/linux/mfd/madera/registers.h index 977e06101711..fe909d177762 100644 --- a/include/linux/mfd/madera/registers.h +++ b/include/linux/mfd/madera/registers.h @@ -1,12 +1,8 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0-only */ /* * Madera register definitions * * Copyright (C) 2015-2018 Cirrus Logic - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by the - * Free Software Foundation; version 2. */ #ifndef MADERA_REGISTERS_H @@ -76,10 +72,14 @@ #define MADERA_FLL1_CONTROL_4 0x174 #define MADERA_FLL1_CONTROL_5 0x175 #define MADERA_FLL1_CONTROL_6 0x176 -#define MADERA_FLL1_LOOP_FILTER_TEST_1 0x177 -#define MADERA_FLL1_NCO_TEST_0 0x178 +#define CS47L92_FLL1_CONTROL_7 0x177 +#define CS47L92_FLL1_CONTROL_8 0x178 #define MADERA_FLL1_CONTROL_7 0x179 +#define CS47L92_FLL1_CONTROL_9 0x179 #define MADERA_FLL1_EFS_2 0x17A +#define CS47L92_FLL1_CONTROL_10 0x17A +#define MADERA_FLL1_CONTROL_11 0x17B +#define MADERA_FLL1_DIGITAL_TEST_1 0x17D #define CS47L35_FLL1_SYNCHRONISER_1 0x17F #define CS47L35_FLL1_SYNCHRONISER_2 0x180 #define CS47L35_FLL1_SYNCHRONISER_3 0x181 @@ -98,16 +98,21 @@ #define MADERA_FLL1_SYNCHRONISER_7 0x187 #define MADERA_FLL1_SPREAD_SPECTRUM 0x189 #define MADERA_FLL1_GPIO_CLOCK 0x18A +#define CS47L92_FLL1_GPIO_CLOCK 0x18E #define MADERA_FLL2_CONTROL_1 0x191 #define MADERA_FLL2_CONTROL_2 0x192 #define MADERA_FLL2_CONTROL_3 0x193 #define MADERA_FLL2_CONTROL_4 0x194 #define MADERA_FLL2_CONTROL_5 0x195 #define MADERA_FLL2_CONTROL_6 0x196 -#define MADERA_FLL2_LOOP_FILTER_TEST_1 0x197 -#define MADERA_FLL2_NCO_TEST_0 0x198 +#define CS47L92_FLL2_CONTROL_7 0x197 +#define CS47L92_FLL2_CONTROL_8 0x198 #define MADERA_FLL2_CONTROL_7 0x199 +#define CS47L92_FLL2_CONTROL_9 0x199 #define MADERA_FLL2_EFS_2 0x19A +#define CS47L92_FLL2_CONTROL_10 0x19A +#define MADERA_FLL2_CONTROL_11 0x19B +#define MADERA_FLL2_DIGITAL_TEST_1 0x19D #define MADERA_FLL2_SYNCHRONISER_1 0x1A1 #define MADERA_FLL2_SYNCHRONISER_2 0x1A2 #define MADERA_FLL2_SYNCHRONISER_3 0x1A3 @@ -117,14 +122,13 @@ #define MADERA_FLL2_SYNCHRONISER_7 0x1A7 #define MADERA_FLL2_SPREAD_SPECTRUM 0x1A9 #define MADERA_FLL2_GPIO_CLOCK 0x1AA +#define CS47L92_FLL2_GPIO_CLOCK 0x1AE #define MADERA_FLL3_CONTROL_1 0x1B1 #define MADERA_FLL3_CONTROL_2 0x1B2 #define MADERA_FLL3_CONTROL_3 0x1B3 #define MADERA_FLL3_CONTROL_4 0x1B4 #define MADERA_FLL3_CONTROL_5 0x1B5 #define MADERA_FLL3_CONTROL_6 0x1B6 -#define MADERA_FLL3_LOOP_FILTER_TEST_1 0x1B7 -#define MADERA_FLL3_NCO_TEST_0 0x1B8 #define MADERA_FLL3_CONTROL_7 0x1B9 #define MADERA_FLL3_SYNCHRONISER_1 0x1C1 #define MADERA_FLL3_SYNCHRONISER_2 0x1C2 @@ -244,6 +248,8 @@ #define MADERA_IN6R_CONTROL 0x33C #define MADERA_ADC_DIGITAL_VOLUME_6R 0x33D #define MADERA_DMIC6R_CONTROL 0x33E +#define CS47L15_ADC_INT_BIAS 0x3A8 +#define CS47L15_PGA_BIAS_SEL 0x3C4 #define MADERA_OUTPUT_ENABLES_1 0x400 #define MADERA_OUTPUT_STATUS_1 0x401 #define MADERA_RAW_OUTPUT_STATUS_1 0x406 @@ -265,6 +271,7 @@ #define MADERA_NOISE_GATE_SELECT_2R 0x41F #define MADERA_OUTPUT_PATH_CONFIG_3L 0x420 #define MADERA_DAC_DIGITAL_VOLUME_3L 0x421 +#define MADERA_OUTPUT_PATH_CONFIG_3 0x422 #define MADERA_NOISE_GATE_SELECT_3L 0x423 #define MADERA_OUTPUT_PATH_CONFIG_3R 0x424 #define MADERA_DAC_DIGITAL_VOLUME_3R 0x425 @@ -287,9 +294,6 @@ #define MADERA_OUTPUT_PATH_CONFIG_6R 0x43C #define MADERA_DAC_DIGITAL_VOLUME_6R 0x43D #define MADERA_NOISE_GATE_SELECT_6R 0x43F -#define MADERA_DRE_ENABLE 0x440 -#define MADERA_EDRE_ENABLE 0x448 -#define MADERA_EDRE_MANUAL 0x44A #define MADERA_DAC_AEC_CONTROL_1 0x450 #define MADERA_DAC_AEC_CONTROL_2 0x451 #define MADERA_NOISE_GATE_CONTROL 0x458 @@ -367,8 +371,20 @@ #define MADERA_AIF3_FRAME_CTRL_2 0x588 #define MADERA_AIF3_FRAME_CTRL_3 0x589 #define MADERA_AIF3_FRAME_CTRL_4 0x58A +#define MADERA_AIF3_FRAME_CTRL_5 0x58B +#define MADERA_AIF3_FRAME_CTRL_6 0x58C +#define MADERA_AIF3_FRAME_CTRL_7 0x58D +#define MADERA_AIF3_FRAME_CTRL_8 0x58E +#define MADERA_AIF3_FRAME_CTRL_9 0x58F +#define MADERA_AIF3_FRAME_CTRL_10 0x590 #define MADERA_AIF3_FRAME_CTRL_11 0x591 #define MADERA_AIF3_FRAME_CTRL_12 0x592 +#define MADERA_AIF3_FRAME_CTRL_13 0x593 +#define MADERA_AIF3_FRAME_CTRL_14 0x594 +#define MADERA_AIF3_FRAME_CTRL_15 0x595 +#define MADERA_AIF3_FRAME_CTRL_16 0x596 +#define MADERA_AIF3_FRAME_CTRL_17 0x597 +#define MADERA_AIF3_FRAME_CTRL_18 0x598 #define MADERA_AIF3_TX_ENABLES 0x599 #define MADERA_AIF3_RX_ENABLES 0x59A #define MADERA_AIF3_FORCE_WRITE 0x59B @@ -660,6 +676,54 @@ #define MADERA_AIF3TX2MIX_INPUT_3_VOLUME 0x78D #define MADERA_AIF3TX2MIX_INPUT_4_SOURCE 0x78E #define MADERA_AIF3TX2MIX_INPUT_4_VOLUME 0x78F +#define MADERA_AIF3TX3MIX_INPUT_1_SOURCE 0x790 +#define MADERA_AIF3TX3MIX_INPUT_1_VOLUME 0x791 +#define MADERA_AIF3TX3MIX_INPUT_2_SOURCE 0x792 +#define MADERA_AIF3TX3MIX_INPUT_2_VOLUME 0x793 +#define MADERA_AIF3TX3MIX_INPUT_3_SOURCE 0x794 +#define MADERA_AIF3TX3MIX_INPUT_3_VOLUME 0x795 +#define MADERA_AIF3TX3MIX_INPUT_4_SOURCE 0x796 +#define MADERA_AIF3TX3MIX_INPUT_4_VOLUME 0x797 +#define MADERA_AIF3TX4MIX_INPUT_1_SOURCE 0x798 +#define MADERA_AIF3TX4MIX_INPUT_1_VOLUME 0x799 +#define MADERA_AIF3TX4MIX_INPUT_2_SOURCE 0x79A +#define MADERA_AIF3TX4MIX_INPUT_2_VOLUME 0x79B +#define MADERA_AIF3TX4MIX_INPUT_3_SOURCE 0x79C +#define MADERA_AIF3TX4MIX_INPUT_3_VOLUME 0x79D +#define MADERA_AIF3TX4MIX_INPUT_4_SOURCE 0x79E +#define MADERA_AIF3TX4MIX_INPUT_4_VOLUME 0x79F +#define CS47L92_AIF3TX5MIX_INPUT_1_SOURCE 0x7A0 +#define CS47L92_AIF3TX5MIX_INPUT_1_VOLUME 0x7A1 +#define CS47L92_AIF3TX5MIX_INPUT_2_SOURCE 0x7A2 +#define CS47L92_AIF3TX5MIX_INPUT_2_VOLUME 0x7A3 +#define CS47L92_AIF3TX5MIX_INPUT_3_SOURCE 0x7A4 +#define CS47L92_AIF3TX5MIX_INPUT_3_VOLUME 0x7A5 +#define CS47L92_AIF3TX5MIX_INPUT_4_SOURCE 0x7A6 +#define CS47L92_AIF3TX5MIX_INPUT_4_VOLUME 0x7A7 +#define CS47L92_AIF3TX6MIX_INPUT_1_SOURCE 0x7A8 +#define CS47L92_AIF3TX6MIX_INPUT_1_VOLUME 0x7A9 +#define CS47L92_AIF3TX6MIX_INPUT_2_SOURCE 0x7AA +#define CS47L92_AIF3TX6MIX_INPUT_2_VOLUME 0x7AB +#define CS47L92_AIF3TX6MIX_INPUT_3_SOURCE 0x7AC +#define CS47L92_AIF3TX6MIX_INPUT_3_VOLUME 0x7AD +#define CS47L92_AIF3TX6MIX_INPUT_4_SOURCE 0x7AE +#define CS47L92_AIF3TX6MIX_INPUT_4_VOLUME 0x7AF +#define CS47L92_AIF3TX7MIX_INPUT_1_SOURCE 0x7B0 +#define CS47L92_AIF3TX7MIX_INPUT_1_VOLUME 0x7B1 +#define CS47L92_AIF3TX7MIX_INPUT_2_SOURCE 0x7B2 +#define CS47L92_AIF3TX7MIX_INPUT_2_VOLUME 0x7B3 +#define CS47L92_AIF3TX7MIX_INPUT_3_SOURCE 0x7B4 +#define CS47L92_AIF3TX7MIX_INPUT_3_VOLUME 0x7B5 +#define CS47L92_AIF3TX7MIX_INPUT_4_SOURCE 0x7B6 +#define CS47L92_AIF3TX7MIX_INPUT_4_VOLUME 0x7B7 +#define CS47L92_AIF3TX8MIX_INPUT_1_SOURCE 0x7B8 +#define CS47L92_AIF3TX8MIX_INPUT_1_VOLUME 0x7B9 +#define CS47L92_AIF3TX8MIX_INPUT_2_SOURCE 0x7BA +#define CS47L92_AIF3TX8MIX_INPUT_2_VOLUME 0x7BB +#define CS47L92_AIF3TX8MIX_INPUT_3_SOURCE 0x7BC +#define CS47L92_AIF3TX8MIX_INPUT_3_VOLUME 0x7BD +#define CS47L92_AIF3TX8MIX_INPUT_4_SOURCE 0x7BE +#define CS47L92_AIF3TX8MIX_INPUT_4_VOLUME 0x7BF #define MADERA_AIF4TX1MIX_INPUT_1_SOURCE 0x7A0 #define MADERA_AIF4TX1MIX_INPUT_1_VOLUME 0x7A1 #define MADERA_AIF4TX1MIX_INPUT_2_SOURCE 0x7A2 @@ -1103,68 +1167,8 @@ #define MADERA_FCR_ADC_REFORMATTER_CONTROL 0xF73 #define MADERA_FCR_COEFF_START 0xF74 #define MADERA_FCR_COEFF_END 0xFC5 -#define MADERA_DAC_COMP_1 0x1300 -#define MADERA_DAC_COMP_2 0x1302 -#define MADERA_FRF_COEFFICIENT_1L_1 0x1380 -#define MADERA_FRF_COEFFICIENT_1L_2 0x1381 -#define MADERA_FRF_COEFFICIENT_1L_3 0x1382 -#define MADERA_FRF_COEFFICIENT_1L_4 0x1383 -#define MADERA_FRF_COEFFICIENT_1R_1 0x1390 -#define MADERA_FRF_COEFFICIENT_1R_2 0x1391 -#define MADERA_FRF_COEFFICIENT_1R_3 0x1392 -#define MADERA_FRF_COEFFICIENT_1R_4 0x1393 -#define MADERA_FRF_COEFFICIENT_2L_1 0x13A0 -#define MADERA_FRF_COEFFICIENT_2L_2 0x13A1 -#define MADERA_FRF_COEFFICIENT_2L_3 0x13A2 -#define MADERA_FRF_COEFFICIENT_2L_4 0x13A3 -#define MADERA_FRF_COEFFICIENT_2R_1 0x13B0 -#define MADERA_FRF_COEFFICIENT_2R_2 0x13B1 -#define MADERA_FRF_COEFFICIENT_2R_3 0x13B2 -#define MADERA_FRF_COEFFICIENT_2R_4 0x13B3 -#define MADERA_FRF_COEFFICIENT_3L_1 0x13C0 -#define MADERA_FRF_COEFFICIENT_3L_2 0x13C1 -#define MADERA_FRF_COEFFICIENT_3L_3 0x13C2 -#define MADERA_FRF_COEFFICIENT_3L_4 0x13C3 -#define MADERA_FRF_COEFFICIENT_3R_1 0x13D0 -#define MADERA_FRF_COEFFICIENT_3R_2 0x13D1 -#define MADERA_FRF_COEFFICIENT_3R_3 0x13D2 -#define MADERA_FRF_COEFFICIENT_3R_4 0x13D3 -#define MADERA_FRF_COEFFICIENT_4L_1 0x13E0 -#define MADERA_FRF_COEFFICIENT_4L_2 0x13E1 -#define MADERA_FRF_COEFFICIENT_4L_3 0x13E2 -#define MADERA_FRF_COEFFICIENT_4L_4 0x13E3 -#define MADERA_FRF_COEFFICIENT_4R_1 0x13F0 -#define MADERA_FRF_COEFFICIENT_4R_2 0x13F1 -#define MADERA_FRF_COEFFICIENT_4R_3 0x13F2 -#define MADERA_FRF_COEFFICIENT_4R_4 0x13F3 -#define CS47L35_FRF_COEFFICIENT_4L_1 0x13A0 -#define CS47L35_FRF_COEFFICIENT_4L_2 0x13A1 -#define CS47L35_FRF_COEFFICIENT_4L_3 0x13A2 -#define CS47L35_FRF_COEFFICIENT_4L_4 0x13A3 -#define CS47L35_FRF_COEFFICIENT_5L_1 0x13B0 -#define CS47L35_FRF_COEFFICIENT_5L_2 0x13B1 -#define CS47L35_FRF_COEFFICIENT_5L_3 0x13B2 -#define CS47L35_FRF_COEFFICIENT_5L_4 0x13B3 -#define CS47L35_FRF_COEFFICIENT_5R_1 0x13C0 -#define CS47L35_FRF_COEFFICIENT_5R_2 0x13C1 -#define CS47L35_FRF_COEFFICIENT_5R_3 0x13C2 -#define CS47L35_FRF_COEFFICIENT_5R_4 0x13C3 -#define MADERA_FRF_COEFFICIENT_5L_1 0x1400 -#define MADERA_FRF_COEFFICIENT_5L_2 0x1401 -#define MADERA_FRF_COEFFICIENT_5L_3 0x1402 -#define MADERA_FRF_COEFFICIENT_5L_4 0x1403 -#define MADERA_FRF_COEFFICIENT_5R_1 0x1410 -#define MADERA_FRF_COEFFICIENT_5R_2 0x1411 -#define MADERA_FRF_COEFFICIENT_5R_3 0x1412 -#define MADERA_FRF_COEFFICIENT_5R_4 0x1413 -#define MADERA_FRF_COEFFICIENT_6L_1 0x1420 -#define MADERA_FRF_COEFFICIENT_6L_2 0x1421 -#define MADERA_FRF_COEFFICIENT_6L_3 0x1422 -#define MADERA_FRF_COEFFICIENT_6L_4 0x1423 -#define MADERA_FRF_COEFFICIENT_6R_1 0x1430 -#define MADERA_FRF_COEFFICIENT_6R_2 0x1431 -#define MADERA_FRF_COEFFICIENT_6R_3 0x1432 -#define MADERA_FRF_COEFFICIENT_6R_4 0x1433 +#define MADERA_AUXPDM1_CTRL_0 0x10C0 +#define MADERA_AUXPDM1_CTRL_1 0x10C1 #define MADERA_DFC1_CTRL 0x1480 #define MADERA_DFC1_RX 0x1482 #define MADERA_DFC1_TX 0x1484 @@ -1202,6 +1206,8 @@ #define MADERA_GPIO1_CTRL_2 0x1701 #define MADERA_GPIO2_CTRL_1 0x1702 #define MADERA_GPIO2_CTRL_2 0x1703 +#define MADERA_GPIO15_CTRL_1 0x171C +#define MADERA_GPIO15_CTRL_2 0x171D #define MADERA_GPIO16_CTRL_1 0x171E #define MADERA_GPIO16_CTRL_2 0x171F #define MADERA_GPIO38_CTRL_1 0x174A @@ -1232,6 +1238,7 @@ #define MADERA_IRQ2_CTRL 0x1A82 #define MADERA_INTERRUPT_RAW_STATUS_1 0x1AA0 #define MADERA_WSEQ_SEQUENCE_1 0x3000 +#define MADERA_WSEQ_SEQUENCE_225 0x31C0 #define MADERA_WSEQ_SEQUENCE_252 0x31F6 #define CS47L35_OTP_HPDET_CAL_1 0x31F8 #define CS47L35_OTP_HPDET_CAL_2 0x31FA @@ -1441,6 +1448,12 @@ #define MADERA_OPCLK_ASYNC_SEL_WIDTH 3 /* (0x0171) FLL1_Control_1 */ +#define CS47L92_FLL1_REFCLK_SRC_MASK 0xF000 +#define CS47L92_FLL1_REFCLK_SRC_SHIFT 12 +#define CS47L92_FLL1_REFCLK_SRC_WIDTH 4 +#define MADERA_FLL1_HOLD_MASK 0x0004 +#define MADERA_FLL1_HOLD_SHIFT 2 +#define MADERA_FLL1_HOLD_WIDTH 1 #define MADERA_FLL1_FREERUN 0x0002 #define MADERA_FLL1_FREERUN_MASK 0x0002 #define MADERA_FLL1_FREERUN_SHIFT 1 @@ -1473,6 +1486,9 @@ #define MADERA_FLL1_FRATIO_MASK 0x0F00 #define MADERA_FLL1_FRATIO_SHIFT 8 #define MADERA_FLL1_FRATIO_WIDTH 4 +#define MADERA_FLL1_FB_DIV_MASK 0x03FF +#define MADERA_FLL1_FB_DIV_SHIFT 0 +#define MADERA_FLL1_FB_DIV_WIDTH 10 /* (0x0176) FLL1_Control_6 */ #define MADERA_FLL1_REFCLK_DIV_MASK 0x00C0 @@ -1482,15 +1498,6 @@ #define MADERA_FLL1_REFCLK_SRC_SHIFT 0 #define MADERA_FLL1_REFCLK_SRC_WIDTH 4 -/* (0x0177) FLL1_Loop_Filter_Test_1 */ -#define MADERA_FLL1_FRC_INTEG_UPD 0x8000 -#define MADERA_FLL1_FRC_INTEG_UPD_MASK 0x8000 -#define MADERA_FLL1_FRC_INTEG_UPD_SHIFT 15 -#define MADERA_FLL1_FRC_INTEG_UPD_WIDTH 1 -#define MADERA_FLL1_FRC_INTEG_VAL_MASK 0x0FFF -#define MADERA_FLL1_FRC_INTEG_VAL_SHIFT 0 -#define MADERA_FLL1_FRC_INTEG_VAL_WIDTH 12 - /* (0x0179) FLL1_Control_7 */ #define MADERA_FLL1_GAIN_MASK 0x003c #define MADERA_FLL1_GAIN_SHIFT 2 @@ -1504,6 +1511,30 @@ #define MADERA_FLL1_PHASE_ENA_SHIFT 11 #define MADERA_FLL1_PHASE_ENA_WIDTH 1 +/* (0x017A) FLL1_Control_10 */ +#define MADERA_FLL1_HP_MASK 0xC000 +#define MADERA_FLL1_HP_SHIFT 14 +#define MADERA_FLL1_HP_WIDTH 2 +#define MADERA_FLL1_PHASEDET_ENA_MASK 0x1000 +#define MADERA_FLL1_PHASEDET_ENA_SHIFT 12 +#define MADERA_FLL1_PHASEDET_ENA_WIDTH 1 + +/* (0x017B) FLL1_Control_11 */ +#define MADERA_FLL1_LOCKDET_THR_MASK 0x001E +#define MADERA_FLL1_LOCKDET_THR_SHIFT 1 +#define MADERA_FLL1_LOCKDET_THR_WIDTH 4 +#define MADERA_FLL1_LOCKDET_MASK 0x0001 +#define MADERA_FLL1_LOCKDET_SHIFT 0 +#define MADERA_FLL1_LOCKDET_WIDTH 1 + +/* (0x017D) FLL1_Digital_Test_1 */ +#define MADERA_FLL1_SYNC_EFS_ENA_MASK 0x0100 +#define MADERA_FLL1_SYNC_EFS_ENA_SHIFT 8 +#define MADERA_FLL1_SYNC_EFS_ENA_WIDTH 1 +#define MADERA_FLL1_CLK_VCO_FAST_SRC_MASK 0x0003 +#define MADERA_FLL1_CLK_VCO_FAST_SRC_SHIFT 0 +#define MADERA_FLL1_CLK_VCO_FAST_SRC_WIDTH 2 + /* (0x0181) FLL1_Synchroniser_1 */ #define MADERA_FLL1_SYNC_ENA 0x0001 #define MADERA_FLL1_SYNC_ENA_MASK 0x0001 @@ -1625,6 +1656,13 @@ #define MADERA_LDO2_ENA_WIDTH 1 /* (0x0218) Mic_Bias_Ctrl_1 */ +#define MADERA_MICB1_EXT_CAP 0x8000 +#define MADERA_MICB1_EXT_CAP_MASK 0x8000 +#define MADERA_MICB1_EXT_CAP_SHIFT 15 +#define MADERA_MICB1_EXT_CAP_WIDTH 1 +#define MADERA_MICB1_LVL_MASK 0x01E0 +#define MADERA_MICB1_LVL_SHIFT 5 +#define MADERA_MICB1_LVL_WIDTH 4 #define MADERA_MICB1_ENA 0x0001 #define MADERA_MICB1_ENA_MASK 0x0001 #define MADERA_MICB1_ENA_SHIFT 0 @@ -2308,6 +2346,17 @@ #define MADERA_OUT1R_ENA_SHIFT 0 #define MADERA_OUT1R_ENA_WIDTH 1 +/* (0x0408) Output_Rate_1 */ +#define MADERA_CP_DAC_MODE_MASK 0x0040 +#define MADERA_CP_DAC_MODE_SHIFT 6 +#define MADERA_CP_DAC_MODE_WIDTH 1 +#define MADERA_OUT_EXT_CLK_DIV_MASK 0x0030 +#define MADERA_OUT_EXT_CLK_DIV_SHIFT 4 +#define MADERA_OUT_EXT_CLK_DIV_WIDTH 2 +#define MADERA_OUT_CLK_SRC_MASK 0x0007 +#define MADERA_OUT_CLK_SRC_SHIFT 0 +#define MADERA_OUT_CLK_SRC_WIDTH 3 + /* (0x0409) Output_Volume_Ramp */ #define MADERA_OUT_VD_RAMP_MASK 0x0070 #define MADERA_OUT_VD_RAMP_SHIFT 4 @@ -2829,6 +2878,30 @@ #define MADERA_AIF2RX1_ENA_WIDTH 1 /* (0x0599) AIF3_Tx_Enables */ +#define MADERA_AIF3TX8_ENA 0x0080 +#define MADERA_AIF3TX8_ENA_MASK 0x0080 +#define MADERA_AIF3TX8_ENA_SHIFT 7 +#define MADERA_AIF3TX8_ENA_WIDTH 1 +#define MADERA_AIF3TX7_ENA 0x0040 +#define MADERA_AIF3TX7_ENA_MASK 0x0040 +#define MADERA_AIF3TX7_ENA_SHIFT 6 +#define MADERA_AIF3TX7_ENA_WIDTH 1 +#define MADERA_AIF3TX6_ENA 0x0020 +#define MADERA_AIF3TX6_ENA_MASK 0x0020 +#define MADERA_AIF3TX6_ENA_SHIFT 5 +#define MADERA_AIF3TX6_ENA_WIDTH 1 +#define MADERA_AIF3TX5_ENA 0x0010 +#define MADERA_AIF3TX5_ENA_MASK 0x0010 +#define MADERA_AIF3TX5_ENA_SHIFT 4 +#define MADERA_AIF3TX5_ENA_WIDTH 1 +#define MADERA_AIF3TX4_ENA 0x0008 +#define MADERA_AIF3TX4_ENA_MASK 0x0008 +#define MADERA_AIF3TX4_ENA_SHIFT 3 +#define MADERA_AIF3TX4_ENA_WIDTH 1 +#define MADERA_AIF3TX3_ENA 0x0004 +#define MADERA_AIF3TX3_ENA_MASK 0x0004 +#define MADERA_AIF3TX3_ENA_SHIFT 2 +#define MADERA_AIF3TX3_ENA_WIDTH 1 #define MADERA_AIF3TX2_ENA 0x0002 #define MADERA_AIF3TX2_ENA_MASK 0x0002 #define MADERA_AIF3TX2_ENA_SHIFT 1 @@ -2839,6 +2912,30 @@ #define MADERA_AIF3TX1_ENA_WIDTH 1 /* (0x059A) AIF3_Rx_Enables */ +#define MADERA_AIF3RX8_ENA 0x0080 +#define MADERA_AIF3RX8_ENA_MASK 0x0080 +#define MADERA_AIF3RX8_ENA_SHIFT 7 +#define MADERA_AIF3RX8_ENA_WIDTH 1 +#define MADERA_AIF3RX7_ENA 0x0040 +#define MADERA_AIF3RX7_ENA_MASK 0x0040 +#define MADERA_AIF3RX7_ENA_SHIFT 6 +#define MADERA_AIF3RX7_ENA_WIDTH 1 +#define MADERA_AIF3RX6_ENA 0x0020 +#define MADERA_AIF3RX6_ENA_MASK 0x0020 +#define MADERA_AIF3RX6_ENA_SHIFT 5 +#define MADERA_AIF3RX6_ENA_WIDTH 1 +#define MADERA_AIF3RX5_ENA 0x0010 +#define MADERA_AIF3RX5_ENA_MASK 0x0010 +#define MADERA_AIF3RX5_ENA_SHIFT 4 +#define MADERA_AIF3RX5_ENA_WIDTH 1 +#define MADERA_AIF3RX4_ENA 0x0008 +#define MADERA_AIF3RX4_ENA_MASK 0x0008 +#define MADERA_AIF3RX4_ENA_SHIFT 3 +#define MADERA_AIF3RX4_ENA_WIDTH 1 +#define MADERA_AIF3RX3_ENA 0x0004 +#define MADERA_AIF3RX3_ENA_MASK 0x0004 +#define MADERA_AIF3RX3_ENA_SHIFT 2 +#define MADERA_AIF3RX3_ENA_WIDTH 1 #define MADERA_AIF3RX2_ENA 0x0002 #define MADERA_AIF3RX2_ENA_MASK 0x0002 #define MADERA_AIF3RX2_ENA_SHIFT 1 @@ -3453,6 +3550,25 @@ #define MADERA_FCR_MIC_MODE_SEL_SHIFT 2 #define MADERA_FCR_MIC_MODE_SEL_WIDTH 2 +/* (0x10C0) AUXPDM1_CTRL_0 */ +#define MADERA_AUXPDM1_SRC_MASK 0x0F00 +#define MADERA_AUXPDM1_SRC_SHIFT 8 +#define MADERA_AUXPDM1_SRC_WIDTH 4 +#define MADERA_AUXPDM1_TXEDGE_MASK 0x0010 +#define MADERA_AUXPDM1_TXEDGE_SHIFT 4 +#define MADERA_AUXPDM1_TXEDGE_WIDTH 1 +#define MADERA_AUXPDM1_MSTR_MASK 0x0008 +#define MADERA_AUXPDM1_MSTR_SHIFT 3 +#define MADERA_AUXPDM1_MSTR_WIDTH 1 +#define MADERA_AUXPDM1_ENABLE_MASK 0x0001 +#define MADERA_AUXPDM1_ENABLE_SHIFT 0 +#define MADERA_AUXPDM1_ENABLE_WIDTH 1 + +/* (0x10C1) AUXPDM1_CTRL_1 */ +#define MADERA_AUXPDM1_CLK_FREQ_MASK 0xC000 +#define MADERA_AUXPDM1_CLK_FREQ_SHIFT 14 +#define MADERA_AUXPDM1_CLK_FREQ_WIDTH 2 + /* (0x1480) DFC1_CTRL_W0 */ #define MADERA_DFC1_RATE_MASK 0x007C #define MADERA_DFC1_RATE_SHIFT 2 diff --git a/include/linux/mfd/rk808.h b/include/linux/mfd/rk808.h index 1d831c7222b9..7cfd2b0504df 100644 --- a/include/linux/mfd/rk808.h +++ b/include/linux/mfd/rk808.h @@ -374,6 +374,7 @@ enum rk805_reg { #define SWITCH1_EN BIT(5) #define DEV_OFF_RST BIT(3) #define DEV_OFF BIT(0) +#define RTC_STOP BIT(0) #define VB_LO_ACT BIT(4) #define VB_LO_SEL_3500MV (7 << 0) @@ -387,7 +388,179 @@ enum rk805_reg { #define SHUTDOWN_FUN (0x2 << 2) #define SLEEP_FUN (0x1 << 2) #define RK8XX_ID_MSK 0xfff0 +#define PWM_MODE_MSK BIT(7) #define FPWM_MODE BIT(7) +#define AUTO_PWM_MODE 0 + +enum rk817_reg_id { + RK817_ID_DCDC1 = 0, + RK817_ID_DCDC2, + RK817_ID_DCDC3, + RK817_ID_DCDC4, + RK817_ID_LDO1, + RK817_ID_LDO2, + RK817_ID_LDO3, + RK817_ID_LDO4, + RK817_ID_LDO5, + RK817_ID_LDO6, + RK817_ID_LDO7, + RK817_ID_LDO8, + RK817_ID_LDO9, + RK817_ID_BOOST, + RK817_ID_BOOST_OTG_SW, + RK817_NUM_REGULATORS +}; + +enum rk809_reg_id { + RK809_ID_DCDC5 = RK817_ID_BOOST, + RK809_ID_SW1, + RK809_ID_SW2, + RK809_NUM_REGULATORS +}; + +#define RK817_SECONDS_REG 0x00 +#define RK817_MINUTES_REG 0x01 +#define RK817_HOURS_REG 0x02 +#define RK817_DAYS_REG 0x03 +#define RK817_MONTHS_REG 0x04 +#define RK817_YEARS_REG 0x05 +#define RK817_WEEKS_REG 0x06 +#define RK817_ALARM_SECONDS_REG 0x07 +#define RK817_ALARM_MINUTES_REG 0x08 +#define RK817_ALARM_HOURS_REG 0x09 +#define RK817_ALARM_DAYS_REG 0x0a +#define RK817_ALARM_MONTHS_REG 0x0b +#define RK817_ALARM_YEARS_REG 0x0c +#define RK817_RTC_CTRL_REG 0xd +#define RK817_RTC_STATUS_REG 0xe +#define RK817_RTC_INT_REG 0xf +#define RK817_RTC_COMP_LSB_REG 0x10 +#define RK817_RTC_COMP_MSB_REG 0x11 + +#define RK817_POWER_EN_REG(i) (0xb1 + (i)) +#define RK817_POWER_SLP_EN_REG(i) (0xb5 + (i)) + +#define RK817_POWER_CONFIG (0xb9) + +#define RK817_BUCK_CONFIG_REG(i) (0xba + (i) * 3) + +#define RK817_BUCK1_ON_VSEL_REG 0xBB +#define RK817_BUCK1_SLP_VSEL_REG 0xBC + +#define RK817_BUCK2_CONFIG_REG 0xBD +#define RK817_BUCK2_ON_VSEL_REG 0xBE +#define RK817_BUCK2_SLP_VSEL_REG 0xBF + +#define RK817_BUCK3_CONFIG_REG 0xC0 +#define RK817_BUCK3_ON_VSEL_REG 0xC1 +#define RK817_BUCK3_SLP_VSEL_REG 0xC2 + +#define RK817_BUCK4_CONFIG_REG 0xC3 +#define RK817_BUCK4_ON_VSEL_REG 0xC4 +#define RK817_BUCK4_SLP_VSEL_REG 0xC5 + +#define RK817_LDO_ON_VSEL_REG(idx) (0xcc + (idx) * 2) +#define RK817_BOOST_OTG_CFG (0xde) + +#define RK817_ID_MSB 0xed +#define RK817_ID_LSB 0xee + +#define RK817_SYS_STS 0xf0 +#define RK817_SYS_CFG(i) (0xf1 + (i)) + +#define RK817_ON_SOURCE_REG 0xf5 +#define RK817_OFF_SOURCE_REG 0xf6 + +/* INTERRUPT REGISTER */ +#define RK817_INT_STS_REG0 0xf8 +#define RK817_INT_STS_MSK_REG0 0xf9 +#define RK817_INT_STS_REG1 0xfa +#define RK817_INT_STS_MSK_REG1 0xfb +#define RK817_INT_STS_REG2 0xfc +#define RK817_INT_STS_MSK_REG2 0xfd +#define RK817_GPIO_INT_CFG 0xfe + +/* IRQ Definitions */ +#define RK817_IRQ_PWRON_FALL 0 +#define RK817_IRQ_PWRON_RISE 1 +#define RK817_IRQ_PWRON 2 +#define RK817_IRQ_PWMON_LP 3 +#define RK817_IRQ_HOTDIE 4 +#define RK817_IRQ_RTC_ALARM 5 +#define RK817_IRQ_RTC_PERIOD 6 +#define RK817_IRQ_VB_LO 7 +#define RK817_IRQ_PLUG_IN 8 +#define RK817_IRQ_PLUG_OUT 9 +#define RK817_IRQ_CHRG_TERM 10 +#define RK817_IRQ_CHRG_TIME 11 +#define RK817_IRQ_CHRG_TS 12 +#define RK817_IRQ_USB_OV 13 +#define RK817_IRQ_CHRG_IN_CLMP 14 +#define RK817_IRQ_BAT_DIS_ILIM 15 +#define RK817_IRQ_GATE_GPIO 16 +#define RK817_IRQ_TS_GPIO 17 +#define RK817_IRQ_CODEC_PD 18 +#define RK817_IRQ_CODEC_PO 19 +#define RK817_IRQ_CLASSD_MUTE_DONE 20 +#define RK817_IRQ_CLASSD_OCP 21 +#define RK817_IRQ_BAT_OVP 22 +#define RK817_IRQ_CHRG_BAT_HI 23 +#define RK817_IRQ_END (RK817_IRQ_CHRG_BAT_HI + 1) + +/* + * rtc_ctrl 0xd + * same as 808, except bit4 + */ +#define RK817_RTC_CTRL_RSV4 BIT(4) + +/* power config 0xb9 */ +#define RK817_BUCK3_FB_RES_MSK BIT(6) +#define RK817_BUCK3_FB_RES_INTER BIT(6) +#define RK817_BUCK3_FB_RES_EXT 0 + +/* buck config 0xba */ +#define RK817_RAMP_RATE_OFFSET 6 +#define RK817_RAMP_RATE_MASK (0x3 << RK817_RAMP_RATE_OFFSET) +#define RK817_RAMP_RATE_3MV_PER_US (0x0 << RK817_RAMP_RATE_OFFSET) +#define RK817_RAMP_RATE_6_3MV_PER_US (0x1 << RK817_RAMP_RATE_OFFSET) +#define RK817_RAMP_RATE_12_5MV_PER_US (0x2 << RK817_RAMP_RATE_OFFSET) +#define RK817_RAMP_RATE_25MV_PER_US (0x3 << RK817_RAMP_RATE_OFFSET) + +/* sys_cfg1 0xf2 */ +#define RK817_HOTDIE_TEMP_MSK (0x3 << 4) +#define RK817_HOTDIE_85 (0x0 << 4) +#define RK817_HOTDIE_95 (0x1 << 4) +#define RK817_HOTDIE_105 (0x2 << 4) +#define RK817_HOTDIE_115 (0x3 << 4) + +#define RK817_TSD_TEMP_MSK BIT(6) +#define RK817_TSD_140 0 +#define RK817_TSD_160 BIT(6) + +#define RK817_CLK32KOUT2_EN BIT(7) + +/* sys_cfg3 0xf4 */ +#define RK817_SLPPIN_FUNC_MSK (0x3 << 3) +#define SLPPIN_NULL_FUN (0x0 << 3) +#define SLPPIN_SLP_FUN (0x1 << 3) +#define SLPPIN_DN_FUN (0x2 << 3) +#define SLPPIN_RST_FUN (0x3 << 3) + +#define RK817_RST_FUNC_MSK (0x3 << 6) +#define RK817_RST_FUNC_SFT (6) +#define RK817_RST_FUNC_CNT (3) +#define RK817_RST_FUNC_DEV (0) /* reset the dev */ +#define RK817_RST_FUNC_REG (0x1 << 6) /* reset the reg only */ + +#define RK817_SLPPOL_MSK BIT(5) +#define RK817_SLPPOL_H BIT(5) +#define RK817_SLPPOL_L (0) + +/* gpio&int 0xfe */ +#define RK817_INT_POL_MSK BIT(1) +#define RK817_INT_POL_H BIT(1) +#define RK817_INT_POL_L 0 +#define RK809_BUCK5_CONFIG(i) (RK817_BOOST_OTG_CFG + (i) * 1) enum { BUCK_ILMIN_50MA, @@ -435,6 +608,8 @@ enum { enum { RK805_ID = 0x8050, RK808_ID = 0x0000, + RK809_ID = 0x8090, + RK817_ID = 0x8170, RK818_ID = 0x8181, }; @@ -445,5 +620,7 @@ struct rk808 { long variant; const struct regmap_config *regmap_cfg; const struct regmap_irq_chip *regmap_irq_chip; + void (*pm_pwroff_fn)(void); + void (*pm_pwroff_prep_fn)(void); }; #endif /* __LINUX_REGULATOR_RK808_H */ diff --git a/include/linux/mfd/rohm-bd70528.h b/include/linux/mfd/rohm-bd70528.h new file mode 100644 index 000000000000..1013e60c5b25 --- /dev/null +++ b/include/linux/mfd/rohm-bd70528.h @@ -0,0 +1,408 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Copyright (C) 2018 ROHM Semiconductors */ + +#ifndef __LINUX_MFD_BD70528_H__ +#define __LINUX_MFD_BD70528_H__ + +#include <linux/bits.h> +#include <linux/device.h> +#include <linux/mfd/rohm-generic.h> +#include <linux/regmap.h> + +enum { + BD70528_BUCK1, + BD70528_BUCK2, + BD70528_BUCK3, + BD70528_LDO1, + BD70528_LDO2, + BD70528_LDO3, + BD70528_LED1, + BD70528_LED2, +}; + +struct bd70528_data { + struct rohm_regmap_dev chip; + struct mutex rtc_timer_lock; +}; + +#define BD70528_BUCK_VOLTS 17 +#define BD70528_BUCK_VOLTS 17 +#define BD70528_BUCK_VOLTS 17 +#define BD70528_LDO_VOLTS 0x20 + +#define BD70528_REG_BUCK1_EN 0x0F +#define BD70528_REG_BUCK1_VOLT 0x15 +#define BD70528_REG_BUCK2_EN 0x10 +#define BD70528_REG_BUCK2_VOLT 0x16 +#define BD70528_REG_BUCK3_EN 0x11 +#define BD70528_REG_BUCK3_VOLT 0x17 +#define BD70528_REG_LDO1_EN 0x1b +#define BD70528_REG_LDO1_VOLT 0x1e +#define BD70528_REG_LDO2_EN 0x1c +#define BD70528_REG_LDO2_VOLT 0x1f +#define BD70528_REG_LDO3_EN 0x1d +#define BD70528_REG_LDO3_VOLT 0x20 +#define BD70528_REG_LED_CTRL 0x2b +#define BD70528_REG_LED_VOLT 0x29 +#define BD70528_REG_LED_EN 0x2a + +/* main irq registers */ +#define BD70528_REG_INT_MAIN 0x7E +#define BD70528_REG_INT_MAIN_MASK 0x74 + +/* 'sub irq' registers */ +#define BD70528_REG_INT_SHDN 0x7F +#define BD70528_REG_INT_PWR_FLT 0x80 +#define BD70528_REG_INT_VR_FLT 0x81 +#define BD70528_REG_INT_MISC 0x82 +#define BD70528_REG_INT_BAT1 0x83 +#define BD70528_REG_INT_BAT2 0x84 +#define BD70528_REG_INT_RTC 0x85 +#define BD70528_REG_INT_GPIO 0x86 +#define BD70528_REG_INT_OP_FAIL 0x87 + +#define BD70528_REG_INT_SHDN_MASK 0x75 +#define BD70528_REG_INT_PWR_FLT_MASK 0x76 +#define BD70528_REG_INT_VR_FLT_MASK 0x77 +#define BD70528_REG_INT_MISC_MASK 0x78 +#define BD70528_REG_INT_BAT1_MASK 0x79 +#define BD70528_REG_INT_BAT2_MASK 0x7a +#define BD70528_REG_INT_RTC_MASK 0x7b +#define BD70528_REG_INT_GPIO_MASK 0x7c +#define BD70528_REG_INT_OP_FAIL_MASK 0x7d + +/* Reset related 'magic' registers */ +#define BD70528_REG_SHIPMODE 0x03 +#define BD70528_REG_HWRESET 0x04 +#define BD70528_REG_WARMRESET 0x05 +#define BD70528_REG_STANDBY 0x06 + +/* GPIO registers */ +#define BD70528_REG_GPIO_STATE 0x8F + +#define BD70528_REG_GPIO1_IN 0x4d +#define BD70528_REG_GPIO2_IN 0x4f +#define BD70528_REG_GPIO3_IN 0x51 +#define BD70528_REG_GPIO4_IN 0x53 +#define BD70528_REG_GPIO1_OUT 0x4e +#define BD70528_REG_GPIO2_OUT 0x50 +#define BD70528_REG_GPIO3_OUT 0x52 +#define BD70528_REG_GPIO4_OUT 0x54 + +/* clk control */ + +#define BD70528_REG_CLK_OUT 0x2c + +/* RTC */ + +#define BD70528_REG_RTC_COUNT_H 0x2d +#define BD70528_REG_RTC_COUNT_L 0x2e +#define BD70528_REG_RTC_SEC 0x2f +#define BD70528_REG_RTC_MINUTE 0x30 +#define BD70528_REG_RTC_HOUR 0x31 +#define BD70528_REG_RTC_WEEK 0x32 +#define BD70528_REG_RTC_DAY 0x33 +#define BD70528_REG_RTC_MONTH 0x34 +#define BD70528_REG_RTC_YEAR 0x35 + +#define BD70528_REG_RTC_ALM_SEC 0x36 +#define BD70528_REG_RTC_ALM_START BD70528_REG_RTC_ALM_SEC +#define BD70528_REG_RTC_ALM_MINUTE 0x37 +#define BD70528_REG_RTC_ALM_HOUR 0x38 +#define BD70528_REG_RTC_ALM_WEEK 0x39 +#define BD70528_REG_RTC_ALM_DAY 0x3a +#define BD70528_REG_RTC_ALM_MONTH 0x3b +#define BD70528_REG_RTC_ALM_YEAR 0x3c +#define BD70528_REG_RTC_ALM_MASK 0x3d +#define BD70528_REG_RTC_ALM_REPEAT 0x3e +#define BD70528_REG_RTC_START BD70528_REG_RTC_SEC + +#define BD70528_REG_RTC_WAKE_SEC 0x43 +#define BD70528_REG_RTC_WAKE_START BD70528_REG_RTC_WAKE_SEC +#define BD70528_REG_RTC_WAKE_MIN 0x44 +#define BD70528_REG_RTC_WAKE_HOUR 0x45 +#define BD70528_REG_RTC_WAKE_CTRL 0x46 + +#define BD70528_REG_ELAPSED_TIMER_EN 0x42 +#define BD70528_REG_WAKE_EN 0x46 + +/* WDT registers */ +#define BD70528_REG_WDT_CTRL 0x4A +#define BD70528_REG_WDT_HOUR 0x49 +#define BD70528_REG_WDT_MINUTE 0x48 +#define BD70528_REG_WDT_SEC 0x47 + +/* Charger / Battery */ +#define BD70528_REG_CHG_CURR_STAT 0x59 +#define BD70528_REG_CHG_BAT_STAT 0x57 +#define BD70528_REG_CHG_BAT_TEMP 0x58 +#define BD70528_REG_CHG_IN_STAT 0x56 +#define BD70528_REG_CHG_DCIN_ILIM 0x5d +#define BD70528_REG_CHG_CHG_CURR_WARM 0x61 +#define BD70528_REG_CHG_CHG_CURR_COLD 0x62 + +/* Masks for main IRQ register bits */ +enum { + BD70528_INT_SHDN, +#define BD70528_INT_SHDN_MASK BIT(BD70528_INT_SHDN) + BD70528_INT_PWR_FLT, +#define BD70528_INT_PWR_FLT_MASK BIT(BD70528_INT_PWR_FLT) + BD70528_INT_VR_FLT, +#define BD70528_INT_VR_FLT_MASK BIT(BD70528_INT_VR_FLT) + BD70528_INT_MISC, +#define BD70528_INT_MISC_MASK BIT(BD70528_INT_MISC) + BD70528_INT_BAT1, +#define BD70528_INT_BAT1_MASK BIT(BD70528_INT_BAT1) + BD70528_INT_RTC, +#define BD70528_INT_RTC_MASK BIT(BD70528_INT_RTC) + BD70528_INT_GPIO, +#define BD70528_INT_GPIO_MASK BIT(BD70528_INT_GPIO) + BD70528_INT_OP_FAIL, +#define BD70528_INT_OP_FAIL_MASK BIT(BD70528_INT_OP_FAIL) +}; + +/* IRQs */ +enum { + /* Shutdown register IRQs */ + BD70528_INT_LONGPUSH, + BD70528_INT_WDT, + BD70528_INT_HWRESET, + BD70528_INT_RSTB_FAULT, + BD70528_INT_VBAT_UVLO, + BD70528_INT_TSD, + BD70528_INT_RSTIN, + /* Power failure register IRQs */ + BD70528_INT_BUCK1_FAULT, + BD70528_INT_BUCK2_FAULT, + BD70528_INT_BUCK3_FAULT, + BD70528_INT_LDO1_FAULT, + BD70528_INT_LDO2_FAULT, + BD70528_INT_LDO3_FAULT, + BD70528_INT_LED1_FAULT, + BD70528_INT_LED2_FAULT, + /* VR FAULT register IRQs */ + BD70528_INT_BUCK1_OCP, + BD70528_INT_BUCK2_OCP, + BD70528_INT_BUCK3_OCP, + BD70528_INT_LED1_OCP, + BD70528_INT_LED2_OCP, + BD70528_INT_BUCK1_FULLON, + BD70528_INT_BUCK2_FULLON, + /* PMU register interrupts */ + BD70528_INT_SHORTPUSH, + BD70528_INT_AUTO_WAKEUP, + BD70528_INT_STATE_CHANGE, + /* Charger 1 register IRQs */ + BD70528_INT_BAT_OV_RES, + BD70528_INT_BAT_OV_DET, + BD70528_INT_DBAT_DET, + BD70528_INT_BATTSD_COLD_RES, + BD70528_INT_BATTSD_COLD_DET, + BD70528_INT_BATTSD_HOT_RES, + BD70528_INT_BATTSD_HOT_DET, + BD70528_INT_CHG_TSD, + /* Charger 2 register IRQs */ + BD70528_INT_BAT_RMV, + BD70528_INT_BAT_DET, + BD70528_INT_DCIN2_OV_RES, + BD70528_INT_DCIN2_OV_DET, + BD70528_INT_DCIN2_RMV, + BD70528_INT_DCIN2_DET, + BD70528_INT_DCIN1_RMV, + BD70528_INT_DCIN1_DET, + /* RTC register IRQs */ + BD70528_INT_RTC_ALARM, + BD70528_INT_ELPS_TIM, + /* GPIO register IRQs */ + BD70528_INT_GPIO0, + BD70528_INT_GPIO1, + BD70528_INT_GPIO2, + BD70528_INT_GPIO3, + /* Invalid operation register IRQs */ + BD70528_INT_BUCK1_DVS_OPFAIL, + BD70528_INT_BUCK2_DVS_OPFAIL, + BD70528_INT_BUCK3_DVS_OPFAIL, + BD70528_INT_LED1_VOLT_OPFAIL, + BD70528_INT_LED2_VOLT_OPFAIL, +}; + +/* Masks */ +#define BD70528_INT_LONGPUSH_MASK 0x1 +#define BD70528_INT_WDT_MASK 0x2 +#define BD70528_INT_HWRESET_MASK 0x4 +#define BD70528_INT_RSTB_FAULT_MASK 0x8 +#define BD70528_INT_VBAT_UVLO_MASK 0x10 +#define BD70528_INT_TSD_MASK 0x20 +#define BD70528_INT_RSTIN_MASK 0x40 + +#define BD70528_INT_BUCK1_FAULT_MASK 0x1 +#define BD70528_INT_BUCK2_FAULT_MASK 0x2 +#define BD70528_INT_BUCK3_FAULT_MASK 0x4 +#define BD70528_INT_LDO1_FAULT_MASK 0x8 +#define BD70528_INT_LDO2_FAULT_MASK 0x10 +#define BD70528_INT_LDO3_FAULT_MASK 0x20 +#define BD70528_INT_LED1_FAULT_MASK 0x40 +#define BD70528_INT_LED2_FAULT_MASK 0x80 + +#define BD70528_INT_BUCK1_OCP_MASK 0x1 +#define BD70528_INT_BUCK2_OCP_MASK 0x2 +#define BD70528_INT_BUCK3_OCP_MASK 0x4 +#define BD70528_INT_LED1_OCP_MASK 0x8 +#define BD70528_INT_LED2_OCP_MASK 0x10 +#define BD70528_INT_BUCK1_FULLON_MASK 0x20 +#define BD70528_INT_BUCK2_FULLON_MASK 0x40 + +#define BD70528_INT_SHORTPUSH_MASK 0x1 +#define BD70528_INT_AUTO_WAKEUP_MASK 0x2 +#define BD70528_INT_STATE_CHANGE_MASK 0x10 + +#define BD70528_INT_BAT_OV_RES_MASK 0x1 +#define BD70528_INT_BAT_OV_DET_MASK 0x2 +#define BD70528_INT_DBAT_DET_MASK 0x4 +#define BD70528_INT_BATTSD_COLD_RES_MASK 0x8 +#define BD70528_INT_BATTSD_COLD_DET_MASK 0x10 +#define BD70528_INT_BATTSD_HOT_RES_MASK 0x20 +#define BD70528_INT_BATTSD_HOT_DET_MASK 0x40 +#define BD70528_INT_CHG_TSD_MASK 0x80 + +#define BD70528_INT_BAT_RMV_MASK 0x1 +#define BD70528_INT_BAT_DET_MASK 0x2 +#define BD70528_INT_DCIN2_OV_RES_MASK 0x4 +#define BD70528_INT_DCIN2_OV_DET_MASK 0x8 +#define BD70528_INT_DCIN2_RMV_MASK 0x10 +#define BD70528_INT_DCIN2_DET_MASK 0x20 +#define BD70528_INT_DCIN1_RMV_MASK 0x40 +#define BD70528_INT_DCIN1_DET_MASK 0x80 + +#define BD70528_INT_RTC_ALARM_MASK 0x1 +#define BD70528_INT_ELPS_TIM_MASK 0x2 + +#define BD70528_INT_GPIO0_MASK 0x1 +#define BD70528_INT_GPIO1_MASK 0x2 +#define BD70528_INT_GPIO2_MASK 0x4 +#define BD70528_INT_GPIO3_MASK 0x8 + +#define BD70528_INT_BUCK1_DVS_OPFAIL_MASK 0x1 +#define BD70528_INT_BUCK2_DVS_OPFAIL_MASK 0x2 +#define BD70528_INT_BUCK3_DVS_OPFAIL_MASK 0x4 +#define BD70528_INT_LED1_VOLT_OPFAIL_MASK 0x10 +#define BD70528_INT_LED2_VOLT_OPFAIL_MASK 0x20 + +#define BD70528_DEBOUNCE_MASK 0x3 + +#define BD70528_DEBOUNCE_DISABLE 0 +#define BD70528_DEBOUNCE_15MS 1 +#define BD70528_DEBOUNCE_30MS 2 +#define BD70528_DEBOUNCE_50MS 3 + +#define BD70528_GPIO_DRIVE_MASK 0x2 +#define BD70528_GPIO_PUSH_PULL 0x0 +#define BD70528_GPIO_OPEN_DRAIN 0x2 + +#define BD70528_GPIO_OUT_EN_MASK 0x80 +#define BD70528_GPIO_OUT_ENABLE 0x80 +#define BD70528_GPIO_OUT_DISABLE 0x0 + +#define BD70528_GPIO_OUT_HI 0x1 +#define BD70528_GPIO_OUT_LO 0x0 +#define BD70528_GPIO_OUT_MASK 0x1 + +#define BD70528_GPIO_IN_STATE_BASE 1 + +#define BD70528_CLK_OUT_EN_MASK 0x1 + +/* RTC masks to mask out reserved bits */ + +#define BD70528_MASK_RTC_SEC 0x7f +#define BD70528_MASK_RTC_MINUTE 0x7f +#define BD70528_MASK_RTC_HOUR_24H 0x80 +#define BD70528_MASK_RTC_HOUR_PM 0x20 +#define BD70528_MASK_RTC_HOUR 0x1f +#define BD70528_MASK_RTC_DAY 0x3f +#define BD70528_MASK_RTC_WEEK 0x07 +#define BD70528_MASK_RTC_MONTH 0x1f +#define BD70528_MASK_RTC_YEAR 0xff +#define BD70528_MASK_RTC_COUNT_L 0x7f + +#define BD70528_MASK_ELAPSED_TIMER_EN 0x1 +/* Mask second, min and hour fields + * HW would support ALM irq for over 24h + * (by setting day, month and year too) + * but as we wish to keep this same as for + * wake-up we limit ALM to 24H and only + * unmask sec, min and hour + */ +#define BD70528_MASK_ALM_EN 0x7 +#define BD70528_MASK_WAKE_EN 0x1 + +/* WDT masks */ +#define BD70528_MASK_WDT_EN 0x1 +#define BD70528_MASK_WDT_HOUR 0x1 +#define BD70528_MASK_WDT_MINUTE 0x7f +#define BD70528_MASK_WDT_SEC 0x7f + +#define BD70528_WDT_STATE_BIT 0x1 +#define BD70528_ELAPSED_STATE_BIT 0x2 +#define BD70528_WAKE_STATE_BIT 0x4 + +/* Charger masks */ +#define BD70528_MASK_CHG_STAT 0x7f +#define BD70528_MASK_CHG_BAT_TIMER 0x20 +#define BD70528_MASK_CHG_BAT_OVERVOLT 0x10 +#define BD70528_MASK_CHG_BAT_DETECT 0x1 +#define BD70528_MASK_CHG_DCIN1_UVLO 0x1 +#define BD70528_MASK_CHG_DCIN_ILIM 0x3f +#define BD70528_MASK_CHG_CHG_CURR 0x1f +#define BD70528_MASK_CHG_TRICKLE_CURR 0x10 + +/* + * Note, external battery register is the lonely rider at + * address 0xc5. See how to stuff that in the regmap + */ +#define BD70528_MAX_REGISTER 0x94 + +/* Buck control masks */ +#define BD70528_MASK_RUN_EN 0x4 +#define BD70528_MASK_STBY_EN 0x2 +#define BD70528_MASK_IDLE_EN 0x1 +#define BD70528_MASK_LED1_EN 0x1 +#define BD70528_MASK_LED2_EN 0x10 + +#define BD70528_MASK_BUCK_VOLT 0xf +#define BD70528_MASK_LDO_VOLT 0x1f +#define BD70528_MASK_LED1_VOLT 0x1 +#define BD70528_MASK_LED2_VOLT 0x10 + +/* Misc irq masks */ +#define BD70528_INT_MASK_SHORT_PUSH 1 +#define BD70528_INT_MASK_AUTO_WAKE 2 +#define BD70528_INT_MASK_POWER_STATE 4 + +#define BD70528_MASK_BUCK_RAMP 0x10 +#define BD70528_SIFT_BUCK_RAMP 4 + +#if IS_ENABLED(CONFIG_BD70528_WATCHDOG) + +int bd70528_wdt_set(struct rohm_regmap_dev *data, int enable, int *old_state); +void bd70528_wdt_lock(struct rohm_regmap_dev *data); +void bd70528_wdt_unlock(struct rohm_regmap_dev *data); + +#else /* CONFIG_BD70528_WATCHDOG */ + +static inline int bd70528_wdt_set(struct rohm_regmap_dev *data, int enable, + int *old_state) +{ + return 0; +} + +static inline void bd70528_wdt_lock(struct rohm_regmap_dev *data) +{ +} + +static inline void bd70528_wdt_unlock(struct rohm_regmap_dev *data) +{ +} + +#endif /* CONFIG_BD70528_WATCHDOG */ + +#endif /* __LINUX_MFD_BD70528_H__ */ diff --git a/include/linux/mfd/rohm-bd718x7.h b/include/linux/mfd/rohm-bd718x7.h index fd194bfc836f..7f2dbde402a1 100644 --- a/include/linux/mfd/rohm-bd718x7.h +++ b/include/linux/mfd/rohm-bd718x7.h @@ -4,15 +4,10 @@ #ifndef __LINUX_MFD_BD718XX_H__ #define __LINUX_MFD_BD718XX_H__ +#include <linux/mfd/rohm-generic.h> #include <linux/regmap.h> enum { - BD718XX_TYPE_BD71837 = 0, - BD718XX_TYPE_BD71847, - BD718XX_TYPE_AMOUNT -}; - -enum { BD718XX_BUCK1 = 0, BD718XX_BUCK2, BD718XX_BUCK3, @@ -321,18 +316,17 @@ enum { BD718XX_PWRBTN_LONG_PRESS_15S }; -struct bd718xx_clk; - struct bd718xx { - unsigned int chip_type; - struct device *dev; - struct regmap *regmap; - unsigned long int id; + /* + * Please keep this as the first member here as some + * drivers (clk) supporting more than one chip may only know this + * generic struct 'struct rohm_regmap_dev' and assume it is + * the first chunk of parent device's private data. + */ + struct rohm_regmap_dev chip; int chip_irq; struct regmap_irq_chip_data *irq_data; - - struct bd718xx_clk *clk; }; #endif /* __LINUX_MFD_BD718XX_H__ */ diff --git a/include/linux/mfd/rohm-generic.h b/include/linux/mfd/rohm-generic.h new file mode 100644 index 000000000000..bff15ac26f2c --- /dev/null +++ b/include/linux/mfd/rohm-generic.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Copyright (C) 2018 ROHM Semiconductors */ + +#ifndef __LINUX_MFD_ROHM_H__ +#define __LINUX_MFD_ROHM_H__ + +enum { + ROHM_CHIP_TYPE_BD71837 = 0, + ROHM_CHIP_TYPE_BD71847, + ROHM_CHIP_TYPE_BD70528, + ROHM_CHIP_TYPE_AMOUNT +}; + +struct rohm_regmap_dev { + unsigned int chip_type; + struct device *dev; + struct regmap *regmap; +}; + +#endif diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h index 3ca17eb89aa2..f1631a39acfc 100644 --- a/include/linux/mfd/samsung/core.h +++ b/include/linux/mfd/samsung/core.h @@ -20,6 +20,7 @@ #define MIN_850_MV 850000 #define MIN_800_MV 800000 #define MIN_750_MV 750000 +#define MIN_650_MV 650000 #define MIN_600_MV 600000 #define MIN_500_MV 500000 diff --git a/include/linux/mfd/samsung/s2mps11.h b/include/linux/mfd/samsung/s2mps11.h index 6e7668a389a1..4805c90609c4 100644 --- a/include/linux/mfd/samsung/s2mps11.h +++ b/include/linux/mfd/samsung/s2mps11.h @@ -170,7 +170,9 @@ enum s2mps11_regulators { #define S2MPS11_ENABLE_MASK (0x03 << S2MPS11_ENABLE_SHIFT) #define S2MPS11_ENABLE_SHIFT 0x06 #define S2MPS11_LDO_N_VOLTAGES (S2MPS11_LDO_VSEL_MASK + 1) -#define S2MPS11_BUCK_N_VOLTAGES (S2MPS11_BUCK_VSEL_MASK + 1) +#define S2MPS11_BUCK12346_N_VOLTAGES 153 +#define S2MPS11_BUCK5_N_VOLTAGES 216 +#define S2MPS11_BUCK7810_N_VOLTAGES 225 #define S2MPS11_BUCK9_N_VOLTAGES (S2MPS11_BUCK9_VSEL_MASK + 1) #define S2MPS11_RAMP_DELAY 25000 /* uV/us */ @@ -188,4 +190,9 @@ enum s2mps11_regulators { #define S2MPS11_BUCK6_RAMP_EN_SHIFT 0 #define S2MPS11_PMIC_EN_SHIFT 6 +/* + * Bits for "enable suspend" (On/Off controlled by PWREN) + * are the same as in S2MPS14: S2MPS14_ENABLE_SUSPEND + */ + #endif /* __LINUX_MFD_S2MPS11_H */ diff --git a/include/linux/mfd/stmfx.h b/include/linux/mfd/stmfx.h index d890595b89b6..3c67983678ec 100644 --- a/include/linux/mfd/stmfx.h +++ b/include/linux/mfd/stmfx.h @@ -5,7 +5,7 @@ */ #ifndef MFD_STMFX_H -#define MFX_STMFX_H +#define MFD_STMFX_H #include <linux/regmap.h> diff --git a/include/linux/mfd/syscon.h b/include/linux/mfd/syscon.h index f0273c9e972b..8cfda0554381 100644 --- a/include/linux/mfd/syscon.h +++ b/include/linux/mfd/syscon.h @@ -19,7 +19,6 @@ struct device_node; #ifdef CONFIG_MFD_SYSCON extern struct regmap *syscon_node_to_regmap(struct device_node *np); extern struct regmap *syscon_regmap_lookup_by_compatible(const char *s); -extern struct regmap *syscon_regmap_lookup_by_pdevname(const char *s); extern struct regmap *syscon_regmap_lookup_by_phandle( struct device_node *np, const char *property); @@ -34,11 +33,6 @@ static inline struct regmap *syscon_regmap_lookup_by_compatible(const char *s) return ERR_PTR(-ENOTSUPP); } -static inline struct regmap *syscon_regmap_lookup_by_pdevname(const char *s) -{ - return ERR_PTR(-ENOTSUPP); -} - static inline struct regmap *syscon_regmap_lookup_by_phandle( struct device_node *np, const char *property) diff --git a/include/linux/mfd/ti-lmu-register.h b/include/linux/mfd/ti-lmu-register.h index 222cb14c5b0f..116a749e0302 100644 --- a/include/linux/mfd/ti-lmu-register.h +++ b/include/linux/mfd/ti-lmu-register.h @@ -187,47 +187,26 @@ #define LM3695_MAX_REG 0x14 -/* LM3697 */ -#define LM3697_REG_HVLED_OUTPUT_CFG 0x10 -#define LM3697_HVLED1_CFG_MASK BIT(0) -#define LM3697_HVLED2_CFG_MASK BIT(1) -#define LM3697_HVLED3_CFG_MASK BIT(2) -#define LM3697_HVLED1_CFG_SHIFT 0 -#define LM3697_HVLED2_CFG_SHIFT 1 -#define LM3697_HVLED3_CFG_SHIFT 2 +/* LM36274 */ +#define LM36274_REG_REV 0x01 +#define LM36274_REG_BL_CFG_1 0x02 +#define LM36274_REG_BL_CFG_2 0x03 +#define LM36274_REG_BRT_LSB 0x04 +#define LM36274_REG_BRT_MSB 0x05 +#define LM36274_REG_BL_EN 0x08 + +#define LM36274_REG_BIAS_CONFIG_1 0x09 +#define LM36274_EXT_EN_MASK BIT(0) +#define LM36274_EN_VNEG_MASK BIT(1) +#define LM36274_EN_VPOS_MASK BIT(2) + +#define LM36274_REG_BIAS_CONFIG_2 0x0a +#define LM36274_REG_BIAS_CONFIG_3 0x0b +#define LM36274_REG_VOUT_BOOST 0x0c +#define LM36274_REG_VOUT_POS 0x0d +#define LM36274_REG_VOUT_NEG 0x0e +#define LM36274_VOUT_MASK 0x3F + +#define LM36274_MAX_REG 0x13 -#define LM3697_REG_BL0_RAMP 0x11 -#define LM3697_REG_BL1_RAMP 0x12 -#define LM3697_RAMPUP_MASK 0xF0 -#define LM3697_RAMPUP_SHIFT 4 -#define LM3697_RAMPDN_MASK 0x0F -#define LM3697_RAMPDN_SHIFT 0 - -#define LM3697_REG_RAMP_CONF 0x14 -#define LM3697_RAMP_MASK 0x0F -#define LM3697_RAMP_EACH 0x05 - -#define LM3697_REG_PWM_CFG 0x1C -#define LM3697_PWM_A_MASK BIT(0) -#define LM3697_PWM_B_MASK BIT(1) - -#define LM3697_REG_IMAX_A 0x17 -#define LM3697_REG_IMAX_B 0x18 - -#define LM3697_REG_FEEDBACK_ENABLE 0x19 - -#define LM3697_REG_BRT_A_LSB 0x20 -#define LM3697_REG_BRT_A_MSB 0x21 -#define LM3697_REG_BRT_B_LSB 0x22 -#define LM3697_REG_BRT_B_MSB 0x23 - -#define LM3697_REG_ENABLE 0x24 - -#define LM3697_REG_OPEN_FAULT_STATUS 0xB0 - -#define LM3697_REG_SHORT_FAULT_STATUS 0xB2 - -#define LM3697_REG_MONITOR_ENABLE 0xB4 - -#define LM3697_MAX_REG 0xB4 #endif diff --git a/include/linux/mfd/ti-lmu.h b/include/linux/mfd/ti-lmu.h index 7d1e9c24f818..0bc0e8199798 100644 --- a/include/linux/mfd/ti-lmu.h +++ b/include/linux/mfd/ti-lmu.h @@ -23,7 +23,7 @@ enum ti_lmu_id { LM3632, LM3633, LM3695, - LM3697, + LM36274, LMU_MAX_ID, }; @@ -65,6 +65,9 @@ enum lm363x_regulator_id { LM3632_BOOST, /* Boost output */ LM3632_LDO_POS, /* Positive display bias output */ LM3632_LDO_NEG, /* Negative display bias output */ + LM36274_BOOST, /* Boost output */ + LM36274_LDO_POS, /* Positive display bias output */ + LM36274_LDO_NEG, /* Negative display bias output */ }; /** diff --git a/include/linux/mfd/wm831x/pdata.h b/include/linux/mfd/wm831x/pdata.h index 071cdf3e16cf..986986fe4e4e 100644 --- a/include/linux/mfd/wm831x/pdata.h +++ b/include/linux/mfd/wm831x/pdata.h @@ -47,7 +47,6 @@ struct wm831x_battery_pdata { * I2C or SPI buses. */ struct wm831x_buckv_pdata { - int dvs_gpio; /** CPU GPIO to use for DVS switching */ int dvs_control_src; /** Hardware DVS source to use (1 or 2) */ int dvs_init_state; /** DVS state to expect on startup */ int dvs_state_gpio; /** CPU GPIO to use for monitoring status */ diff --git a/include/linux/migrate.h b/include/linux/migrate.h index e13d9bf2f9a5..7f04754c7f2b 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -77,8 +77,7 @@ extern void migrate_page_copy(struct page *newpage, struct page *page); extern int migrate_huge_page_move_mapping(struct address_space *mapping, struct page *newpage, struct page *page); extern int migrate_page_move_mapping(struct address_space *mapping, - struct page *newpage, struct page *page, enum migrate_mode mode, - int extra_count); + struct page *newpage, struct page *page, int extra_count); #else static inline void putback_movable_pages(struct list_head *l) {} diff --git a/include/linux/mlx5/accel.h b/include/linux/mlx5/accel.h index 70e7e5673ce9..5613e677a5f9 100644 --- a/include/linux/mlx5/accel.h +++ b/include/linux/mlx5/accel.h @@ -114,7 +114,7 @@ enum mlx5_accel_ipsec_cap { MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN = 1 << 7, }; -#ifdef CONFIG_MLX5_ACCEL +#ifdef CONFIG_MLX5_FPGA_IPSEC u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev); diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h index 769326ea1d9b..40748fc1b11b 100644 --- a/include/linux/mlx5/cq.h +++ b/include/linux/mlx5/cq.h @@ -47,7 +47,7 @@ struct mlx5_core_cq { struct completion free; unsigned vector; unsigned int irqn; - void (*comp) (struct mlx5_core_cq *); + void (*comp)(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe); void (*event) (struct mlx5_core_cq *, enum mlx5_event); u32 cons_index; unsigned arm_sn; @@ -55,7 +55,7 @@ struct mlx5_core_cq { int pid; struct { struct list_head list; - void (*comp)(struct mlx5_core_cq *); + void (*comp)(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe); void *priv; } tasklet_ctx; int reset_notify_added; @@ -185,7 +185,7 @@ static inline void mlx5_cq_put(struct mlx5_core_cq *cq) } int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, - u32 *in, int inlen); + u32 *in, int inlen, u32 *out, int outlen); int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, u32 *out, int outlen); diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index fc2b6e807f06..ce9839c8bc1a 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -342,7 +342,7 @@ enum mlx5_event { MLX5_EVENT_TYPE_PAGE_FAULT = 0xc, MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd, - MLX5_EVENT_TYPE_HOST_PARAMS_CHANGE = 0xe, + MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED = 0xe, MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c, @@ -351,7 +351,7 @@ enum mlx5_event { MLX5_EVENT_TYPE_DEVICE_TRACER = 0x26, - MLX5_EVENT_TYPE_MAX = MLX5_EVENT_TYPE_DEVICE_TRACER + 1, + MLX5_EVENT_TYPE_MAX = 0x100, }; enum { @@ -437,6 +437,7 @@ enum { MLX5_OPCODE_SET_PSV = 0x20, MLX5_OPCODE_GET_PSV = 0x21, MLX5_OPCODE_CHECK_PSV = 0x22, + MLX5_OPCODE_DUMP = 0x23, MLX5_OPCODE_RGET_PSV = 0x26, MLX5_OPCODE_RCHECK_PSV = 0x27, @@ -445,6 +446,14 @@ enum { }; enum { + MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS = 0x20, +}; + +enum { + MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS = 0x20, +}; + +enum { MLX5_SET_PORT_RESET_QKEY = 0, MLX5_SET_PORT_GUID0 = 16, MLX5_SET_PORT_NODE_GUID = 17, @@ -510,6 +519,10 @@ struct mlx5_cmd_layout { u8 status_own; }; +enum mlx5_fatal_assert_bit_offsets { + MLX5_RFR_OFFSET = 31, +}; + struct health_buffer { __be32 assert_var[5]; __be32 rsvd0[3]; @@ -518,12 +531,16 @@ struct health_buffer { __be32 rsvd1[2]; __be32 fw_ver; __be32 hw_id; - __be32 rsvd2; + __be32 rfr; u8 irisc_index; u8 synd; __be16 ext_synd; }; +enum mlx5_initializing_bit_offsets { + MLX5_FW_RESET_SUPPORTED_OFFSET = 30, +}; + enum mlx5_cmd_addr_l_sz_offset { MLX5_NIC_IFC_OFFSET = 8, }; @@ -1077,6 +1094,9 @@ enum mlx5_cap_type { MLX5_CAP_DEBUG, MLX5_CAP_RESERVED_14, MLX5_CAP_DEV_MEM, + MLX5_CAP_RESERVED_16, + MLX5_CAP_TLS, + MLX5_CAP_DEV_EVENT = 0x14, /* NUM OF CAP Types */ MLX5_CAP_NUM }; @@ -1255,6 +1275,12 @@ enum mlx5_qcam_feature_groups { #define MLX5_CAP64_DEV_MEM(mdev, cap)\ MLX5_GET64(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap) +#define MLX5_CAP_TLS(mdev, cap) \ + MLX5_GET(tls_cap, (mdev)->caps.hca_cur[MLX5_CAP_TLS], cap) + +#define MLX5_CAP_DEV_EVENT(mdev, cap)\ + MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca_cur[MLX5_CAP_DEV_EVENT], cap) + enum { MLX5_CMD_STAT_OK = 0x0, MLX5_CMD_STAT_INT_ERR = 0x1, diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 5a27246db883..0e6da1840c7d 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -41,7 +41,7 @@ #include <linux/semaphore.h> #include <linux/slab.h> #include <linux/vmalloc.h> -#include <linux/radix-tree.h> +#include <linux/xarray.h> #include <linux/workqueue.h> #include <linux/mempool.h> #include <linux/interrupt.h> @@ -53,6 +53,7 @@ #include <linux/mlx5/eq.h> #include <linux/timecounter.h> #include <linux/ptp_clock_kernel.h> +#include <net/devlink.h> enum { MLX5_BOARD_ID_LEN = 64, @@ -107,6 +108,7 @@ enum { MLX5_REG_FPGA_CAP = 0x4022, MLX5_REG_FPGA_CTRL = 0x4023, MLX5_REG_FPGA_ACCESS_REG = 0x4024, + MLX5_REG_CORE_DUMP = 0x402e, MLX5_REG_PCAP = 0x5001, MLX5_REG_PMTU = 0x5003, MLX5_REG_PTYS = 0x5004, @@ -137,6 +139,7 @@ enum { MLX5_REG_MTPPS = 0x9053, MLX5_REG_MTPPSE = 0x9054, MLX5_REG_MPEGC = 0x9056, + MLX5_REG_MCQS = 0x9060, MLX5_REG_MCQI = 0x9061, MLX5_REG_MCC = 0x9062, MLX5_REG_MCDA = 0x9063, @@ -180,6 +183,11 @@ enum port_state_policy { MLX5_POLICY_INVALID = 0xffffffff }; +enum mlx5_coredev_type { + MLX5_COREDEV_PF, + MLX5_COREDEV_VF +}; + struct mlx5_field_desc { struct dentry *dent; int i; @@ -433,13 +441,18 @@ struct mlx5_core_health { struct timer_list timer; u32 prev; int miss_counter; - bool sick; + u8 synd; + u32 fatal_error; + u32 crdump_size; /* wq spinlock to synchronize draining */ spinlock_t wq_lock; struct workqueue_struct *wq; unsigned long flags; - struct work_struct work; + struct work_struct fatal_report_work; + struct work_struct report_work; struct delayed_work recover_work; + struct devlink_health_reporter *fw_reporter; + struct devlink_health_reporter *fw_fatal_reporter; }; struct mlx5_qp_table { @@ -451,13 +464,6 @@ struct mlx5_qp_table { struct radix_tree_root tree; }; -struct mlx5_mkey_table { - /* protect radix tree - */ - rwlock_t lock; - struct radix_tree_root tree; -}; - struct mlx5_vf_context { int enabled; u64 port_guid; @@ -468,7 +474,7 @@ struct mlx5_vf_context { struct mlx5_core_sriov { struct mlx5_vf_context *vfs_ctx; int num_vfs; - int enabled_vfs; + u16 max_vfs; }; struct mlx5_fc_stats { @@ -490,6 +496,7 @@ struct mlx5_eswitch; struct mlx5_lag; struct mlx5_devcom; struct mlx5_eq_table; +struct mlx5_irq_table; struct mlx5_rate_limit { u32 rate; @@ -519,6 +526,8 @@ struct mlx5_core_roce { }; struct mlx5_priv { + /* IRQ table valid only for real pci devices PF or VF */ + struct mlx5_irq_table *irq_table; struct mlx5_eq_table *eq_table; /* pages stuff */ @@ -541,9 +550,7 @@ struct mlx5_priv { struct dentry *cmdif_debugfs; /* end: qp staff */ - /* start: mkey staff */ - struct mlx5_mkey_table mkey_table; - /* end: mkey staff */ + struct xarray mkey_table; /* start: alloc staff */ /* protect buffer alocation according to numa node */ @@ -570,7 +577,6 @@ struct mlx5_priv { struct mlx5_core_sriov sriov; struct mlx5_lag *lag; struct mlx5_devcom *devcom; - unsigned long pci_dev_data; struct mlx5_core_roce roce; struct mlx5_fc_stats fc_stats; struct mlx5_rl_table rl_table; @@ -580,6 +586,7 @@ struct mlx5_priv { }; enum mlx5_device_state { + MLX5_DEVICE_STATE_UNINITIALIZED, MLX5_DEVICE_STATE_UP, MLX5_DEVICE_STATE_INTERNAL_ERROR, }; @@ -646,9 +653,11 @@ struct mlx5_clock { struct mlx5_fw_tracer; struct mlx5_vxlan; +struct mlx5_geneve; struct mlx5_core_dev { struct device *device; + enum mlx5_coredev_type coredev_type; struct pci_dev *pdev; /* sync pci state */ struct mutex pci_status_mutex; @@ -680,6 +689,7 @@ struct mlx5_core_dev { u32 issi; struct mlx5e_resources mlx5e_res; struct mlx5_vxlan *vxlan; + struct mlx5_geneve *geneve; struct { struct mlx5_rsvd_gids reserved_gids; u32 roce_en; @@ -690,6 +700,7 @@ struct mlx5_core_dev { struct mlx5_clock clock; struct mlx5_ib_clock_info *clock_info; struct mlx5_fw_tracer *tracer; + u32 vsc_addr; }; struct mlx5_db { @@ -901,7 +912,6 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev); void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health); void mlx5_drain_health_wq(struct mlx5_core_dev *dev); void mlx5_trigger_health_work(struct mlx5_core_dev *dev); -void mlx5_drain_health_recovery(struct mlx5_core_dev *dev); int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, struct mlx5_frag_buf *buf, int node); int mlx5_buf_alloc(struct mlx5_core_dev *dev, @@ -1042,6 +1052,8 @@ int mlx5_register_interface(struct mlx5_interface *intf); void mlx5_unregister_interface(struct mlx5_interface *intf); int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb); int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb); +int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb); +int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb); int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); @@ -1082,9 +1094,9 @@ enum { MLX5_PCI_DEV_IS_VF = 1 << 0, }; -static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev) +static inline bool mlx5_core_is_pf(const struct mlx5_core_dev *dev) { - return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF); + return dev->coredev_type == MLX5_COREDEV_PF; } static inline bool mlx5_core_is_ecpf(struct mlx5_core_dev *dev) @@ -1092,23 +1104,20 @@ static inline bool mlx5_core_is_ecpf(struct mlx5_core_dev *dev) return dev->caps.embedded_cpu; } -static inline bool mlx5_core_is_ecpf_esw_manager(struct mlx5_core_dev *dev) +static inline bool +mlx5_core_is_ecpf_esw_manager(const struct mlx5_core_dev *dev) { return dev->caps.embedded_cpu && MLX5_CAP_GEN(dev, eswitch_manager); } -static inline bool mlx5_ecpf_vport_exists(struct mlx5_core_dev *dev) +static inline bool mlx5_ecpf_vport_exists(const struct mlx5_core_dev *dev) { return mlx5_core_is_pf(dev) && MLX5_CAP_ESW(dev, ecpf_vport_exists); } -#define MLX5_HOST_PF_MAX_VFS (127u) -static inline u16 mlx5_core_max_vfs(struct mlx5_core_dev *dev) +static inline u16 mlx5_core_max_vfs(const struct mlx5_core_dev *dev) { - if (mlx5_core_is_ecpf_esw_manager(dev)) - return MLX5_HOST_PF_MAX_VFS; - else - return pci_sriov_get_totalvfs(dev->pdev); + return dev->priv.sriov.max_vfs; } static inline int mlx5_get_gid_table_len(u16 param) diff --git a/include/linux/mlx5/eq.h b/include/linux/mlx5/eq.h index 00045cc4ea11..e49d8c0d4f26 100644 --- a/include/linux/mlx5/eq.h +++ b/include/linux/mlx5/eq.h @@ -4,17 +4,7 @@ #ifndef MLX5_CORE_EQ_H #define MLX5_CORE_EQ_H -enum { - MLX5_EQ_PAGEREQ_IDX = 0, - MLX5_EQ_CMD_IDX = 1, - MLX5_EQ_ASYNC_IDX = 2, - /* reserved to be used by mlx5_core ulps (mlx5e/mlx5_ib) */ - MLX5_EQ_PFAULT_IDX = 3, - MLX5_EQ_MAX_ASYNC_EQS, - /* completion eqs vector indices start here */ - MLX5_EQ_VEC_COMP_BASE = MLX5_EQ_MAX_ASYNC_EQS, -}; - +#define MLX5_IRQ_VEC_COMP_BASE 1 #define MLX5_NUM_CMD_EQE (32) #define MLX5_NUM_ASYNC_EQE (0x1000) #define MLX5_NUM_SPARE_EQE (0x80) @@ -23,18 +13,19 @@ struct mlx5_eq; struct mlx5_core_dev; struct mlx5_eq_param { - u8 index; + u8 irq_index; int nent; - u64 mask; - void *context; - irq_handler_t handler; + u64 mask[4]; }; struct mlx5_eq * -mlx5_eq_create_generic(struct mlx5_core_dev *dev, const char *name, - struct mlx5_eq_param *param); +mlx5_eq_create_generic(struct mlx5_core_dev *dev, struct mlx5_eq_param *param); int mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq); +int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq, + struct notifier_block *nb); +void mlx5_eq_disable(struct mlx5_core_dev *dev, struct mlx5_eq *eq, + struct notifier_block *nb); struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc); void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm); diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h index cf226c190329..46b5ba029802 100644 --- a/include/linux/mlx5/eswitch.h +++ b/include/linux/mlx5/eswitch.h @@ -7,13 +7,14 @@ #define _MLX5_ESWITCH_ #include <linux/mlx5/driver.h> +#include <net/devlink.h> #define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_manager) enum { - SRIOV_NONE, - SRIOV_LEGACY, - SRIOV_OFFLOADS + MLX5_ESWITCH_NONE, + MLX5_ESWITCH_LEGACY, + MLX5_ESWITCH_OFFLOADS }; enum { @@ -29,25 +30,29 @@ enum { }; struct mlx5_eswitch_rep; -struct mlx5_eswitch_rep_if { - int (*load)(struct mlx5_core_dev *dev, - struct mlx5_eswitch_rep *rep); - void (*unload)(struct mlx5_eswitch_rep *rep); - void *(*get_proto_dev)(struct mlx5_eswitch_rep *rep); - void *priv; - atomic_t state; +struct mlx5_eswitch_rep_ops { + int (*load)(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep); + void (*unload)(struct mlx5_eswitch_rep *rep); + void *(*get_proto_dev)(struct mlx5_eswitch_rep *rep); +}; + +struct mlx5_eswitch_rep_data { + void *priv; + atomic_t state; }; struct mlx5_eswitch_rep { - struct mlx5_eswitch_rep_if rep_if[NUM_REP_TYPES]; + struct mlx5_eswitch_rep_data rep_data[NUM_REP_TYPES]; u16 vport; u8 hw_id[ETH_ALEN]; u16 vlan; + /* Only IB rep is using vport_index */ + u16 vport_index; u32 vlan_refcount; }; void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw, - struct mlx5_eswitch_rep_if *rep_if, + const struct mlx5_eswitch_rep_ops *ops, u8 rep_type); void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type); void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw, @@ -60,4 +65,35 @@ u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw); struct mlx5_flow_handle * mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, u16 vport_num, u32 sqn); + +u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev); + +#ifdef CONFIG_MLX5_ESWITCH +enum devlink_eswitch_encap_mode +mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev); + +bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw); +u32 mlx5_eswitch_get_vport_metadata_for_match(const struct mlx5_eswitch *esw, + u16 vport_num); +#else /* CONFIG_MLX5_ESWITCH */ +static inline enum devlink_eswitch_encap_mode +mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev) +{ + return DEVLINK_ESWITCH_ENCAP_MODE_NONE; +} + +static inline bool +mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw) +{ + return false; +}; + +static inline u32 +mlx5_eswitch_get_vport_metadata_for_match(const struct mlx5_eswitch *esw, + int vport_num) +{ + return 0; +}; +#endif /* CONFIG_MLX5_ESWITCH */ + #endif diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index e690ba0f965c..f049af3f3cd8 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -47,6 +47,7 @@ enum { enum { MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT = BIT(0), MLX5_FLOW_TABLE_TUNNEL_EN_DECAP = BIT(1), + MLX5_FLOW_TABLE_TERMINATION = BIT(2), }; #define LEFTOVERS_RULE_NUM 2 @@ -87,10 +88,21 @@ struct mlx5_flow_group; struct mlx5_flow_namespace; struct mlx5_flow_handle; +enum { + FLOW_CONTEXT_HAS_TAG = BIT(0), +}; + +struct mlx5_flow_context { + u32 flags; + u32 flow_tag; + u32 flow_source; +}; + struct mlx5_flow_spec { u8 match_criteria_enable; u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)]; u32 match_value[MLX5_ST_SZ_DW(fte_match_param)]; + struct mlx5_flow_context flow_context; }; enum { @@ -172,13 +184,11 @@ struct mlx5_fs_vlan { #define MLX5_FS_VLAN_DEPTH 2 enum { - FLOW_ACT_HAS_TAG = BIT(0), - FLOW_ACT_NO_APPEND = BIT(1), + FLOW_ACT_NO_APPEND = BIT(0), }; struct mlx5_flow_act { u32 action; - u32 flow_tag; u32 reformat_id; u32 modify_id; uintptr_t esp_id; @@ -189,7 +199,6 @@ struct mlx5_flow_act { #define MLX5_DECLARE_FLOW_ACT(name) \ struct mlx5_flow_act name = { .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,\ - .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG, \ .reformat_id = 0, \ .modify_id = 0, \ .flags = 0, } @@ -199,7 +208,7 @@ struct mlx5_flow_act { */ struct mlx5_flow_handle * mlx5_add_flow_rules(struct mlx5_flow_table *ft, - struct mlx5_flow_spec *spec, + const struct mlx5_flow_spec *spec, struct mlx5_flow_act *flow_act, struct mlx5_flow_destination *dest, int num_dest); @@ -211,6 +220,7 @@ int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler, struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging); void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter); +u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter); void mlx5_fc_query_cached(struct mlx5_fc *counter, u64 *bytes, u64 *packets, u64 *lastuse); int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter, diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 5e74305e2e57..ec571fd7fcf8 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -91,6 +91,20 @@ enum { enum { MLX5_OBJ_TYPE_GENEVE_TLV_OPT = 0x000b, + MLX5_OBJ_TYPE_MKEY = 0xff01, + MLX5_OBJ_TYPE_QP = 0xff02, + MLX5_OBJ_TYPE_PSV = 0xff03, + MLX5_OBJ_TYPE_RMP = 0xff04, + MLX5_OBJ_TYPE_XRC_SRQ = 0xff05, + MLX5_OBJ_TYPE_RQ = 0xff06, + MLX5_OBJ_TYPE_SQ = 0xff07, + MLX5_OBJ_TYPE_TIR = 0xff08, + MLX5_OBJ_TYPE_TIS = 0xff09, + MLX5_OBJ_TYPE_DCT = 0xff0a, + MLX5_OBJ_TYPE_XRQ = 0xff0b, + MLX5_OBJ_TYPE_RQT = 0xff0e, + MLX5_OBJ_TYPE_FLOW_COUNTER = 0xff0f, + MLX5_OBJ_TYPE_CQ = 0xff10, }; enum { @@ -106,6 +120,9 @@ enum { MLX5_CMD_OP_QUERY_ISSI = 0x10a, MLX5_CMD_OP_SET_ISSI = 0x10b, MLX5_CMD_OP_SET_DRIVER_VERSION = 0x10d, + MLX5_CMD_OP_QUERY_SF_PARTITION = 0x111, + MLX5_CMD_OP_ALLOC_SF = 0x113, + MLX5_CMD_OP_DEALLOC_SF = 0x114, MLX5_CMD_OP_CREATE_MKEY = 0x200, MLX5_CMD_OP_QUERY_MKEY = 0x201, MLX5_CMD_OP_DESTROY_MKEY = 0x202, @@ -155,7 +172,7 @@ enum { MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY = 0x725, MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY = 0x726, MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS = 0x727, - MLX5_CMD_OP_QUERY_HOST_PARAMS = 0x740, + MLX5_CMD_OP_QUERY_ESW_FUNCTIONS = 0x740, MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750, MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751, MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT = 0x752, @@ -382,7 +399,8 @@ struct mlx5_ifc_flow_table_prop_layout_bits { u8 reformat_and_modify_action[0x1]; u8 reserved_at_15[0x2]; u8 table_miss_action_domain[0x1]; - u8 reserved_at_18[0x8]; + u8 termination_table[0x1]; + u8 reserved_at_19[0x7]; u8 reserved_at_20[0x2]; u8 log_max_ft_size[0x6]; u8 log_max_modify_header_context[0x8]; @@ -527,7 +545,21 @@ struct mlx5_ifc_fte_match_set_misc2_bits { struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls_over_udp; - u8 reserved_at_80[0x100]; + u8 metadata_reg_c_7[0x20]; + + u8 metadata_reg_c_6[0x20]; + + u8 metadata_reg_c_5[0x20]; + + u8 metadata_reg_c_4[0x20]; + + u8 metadata_reg_c_3[0x20]; + + u8 metadata_reg_c_2[0x20]; + + u8 metadata_reg_c_1[0x20]; + + u8 metadata_reg_c_0[0x20]; u8 metadata_reg_a[0x20]; @@ -635,8 +667,22 @@ struct mlx5_ifc_flow_table_nic_cap_bits { u8 reserved_at_e00[0x7200]; }; +enum { + MLX5_FDB_TO_VPORT_REG_C_0 = 0x01, + MLX5_FDB_TO_VPORT_REG_C_1 = 0x02, + MLX5_FDB_TO_VPORT_REG_C_2 = 0x04, + MLX5_FDB_TO_VPORT_REG_C_3 = 0x08, + MLX5_FDB_TO_VPORT_REG_C_4 = 0x10, + MLX5_FDB_TO_VPORT_REG_C_5 = 0x20, + MLX5_FDB_TO_VPORT_REG_C_6 = 0x40, + MLX5_FDB_TO_VPORT_REG_C_7 = 0x80, +}; + struct mlx5_ifc_flow_table_eswitch_cap_bits { - u8 reserved_at_0[0x1a]; + u8 fdb_to_vport_reg_c_id[0x8]; + u8 reserved_at_8[0xf]; + u8 flow_source[0x1]; + u8 reserved_at_18[0x2]; u8 multi_fdb_encap[0x1]; u8 reserved_at_1b[0x1]; u8 fdb_multi_path_to_table[0x1]; @@ -664,7 +710,11 @@ struct mlx5_ifc_e_switch_cap_bits { u8 vport_svlan_insert[0x1]; u8 vport_cvlan_insert_if_not_exist[0x1]; u8 vport_cvlan_insert_overwrite[0x1]; - u8 reserved_at_5[0x16]; + u8 reserved_at_5[0x3]; + u8 esw_uplink_ingress_acl[0x1]; + u8 reserved_at_9[0x10]; + u8 esw_functions_changed[0x1]; + u8 reserved_at_1a[0x1]; u8 ecpf_vport_exists[0x1]; u8 counter_eswitch_affinity[0x1]; u8 merged_eswitch[0x1]; @@ -680,7 +730,11 @@ struct mlx5_ifc_e_switch_cap_bits { u8 reserved_2b[0x6]; u8 max_encap_header_size[0xa]; - u8 reserved_40[0x7c0]; + u8 reserved_at_40[0xb]; + u8 log_max_esw_sf[0x5]; + u8 esw_sf_base_id[0x10]; + + u8 reserved_at_60[0x7a0]; }; @@ -715,7 +769,9 @@ struct mlx5_ifc_qos_cap_bits { }; struct mlx5_ifc_debug_cap_bits { - u8 reserved_at_0[0x20]; + u8 core_dump_general[0x1]; + u8 core_dump_qp[0x1]; + u8 reserved_at_2[0x1e]; u8 reserved_at_20[0x2]; u8 stall_detect[0x1]; @@ -749,7 +805,8 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { u8 swp[0x1]; u8 swp_csum[0x1]; u8 swp_lso[0x1]; - u8 reserved_at_23[0xd]; + u8 cqe_checksum_full[0x1]; + u8 reserved_at_24[0xc]; u8 max_vxlan_udp_ports[0x8]; u8 reserved_at_38[0x6]; u8 max_geneve_opt_len[0x1]; @@ -818,6 +875,12 @@ struct mlx5_ifc_device_mem_cap_bits { u8 reserved_at_180[0x680]; }; +struct mlx5_ifc_device_event_cap_bits { + u8 user_affiliated_events[4][0x40]; + + u8 user_unaffiliated_events[4][0x40]; +}; + enum { MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_1_BYTE = 0x0, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_2_BYTES = 0x2, @@ -911,6 +974,16 @@ struct mlx5_ifc_vector_calc_cap_bits { u8 reserved_at_c0[0x720]; }; +struct mlx5_ifc_tls_cap_bits { + u8 tls_1_2_aes_gcm_128[0x1]; + u8 tls_1_3_aes_gcm_128[0x1]; + u8 tls_1_2_aes_gcm_256[0x1]; + u8 tls_1_3_aes_gcm_256[0x1]; + u8 reserved_at_4[0x1c]; + + u8 reserved_at_20[0x7e0]; +}; + enum { MLX5_WQ_TYPE_LINKED_LIST = 0x0, MLX5_WQ_TYPE_CYCLIC = 0x1, @@ -975,7 +1048,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 log_max_srq_sz[0x8]; u8 log_max_qp_sz[0x8]; - u8 reserved_at_90[0x8]; + u8 event_cap[0x1]; + u8 reserved_at_91[0x7]; u8 prio_tag_required[0x1]; u8 reserved_at_99[0x2]; u8 log_max_qp[0x5]; @@ -1023,7 +1097,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 cc_modify_allowed[0x1]; u8 start_pad[0x1]; u8 cache_line_128byte[0x1]; - u8 reserved_at_165[0xa]; + u8 reserved_at_165[0x4]; + u8 rts2rts_qp_counters_set_id[0x1]; + u8 reserved_at_16a[0x5]; u8 qcam_reg[0x1]; u8 gid_table_size[0x10]; @@ -1240,7 +1316,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_440[0x20]; - u8 reserved_at_460[0x3]; + u8 tls[0x1]; + u8 reserved_at_461[0x2]; u8 log_max_uctx[0x5]; u8 reserved_at_468[0x3]; u8 log_max_umem[0x5]; @@ -1265,7 +1342,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 max_geneve_tlv_option_data_len[0x5]; u8 reserved_at_570[0x10]; - u8 reserved_at_580[0x3c]; + u8 reserved_at_580[0x33]; + u8 log_max_dek[0x5]; + u8 reserved_at_5b8[0x4]; u8 mini_cqe_resp_stride_index[0x1]; u8 cqe_128_always[0x1]; u8 cqe_compression_128[0x1]; @@ -1295,13 +1374,26 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_640[0x10]; u8 num_q_monitor_counters[0x10]; - u8 reserved_at_660[0x40]; + u8 reserved_at_660[0x20]; + + u8 sf[0x1]; + u8 sf_set_partition[0x1]; + u8 reserved_at_682[0x1]; + u8 log_max_sf[0x5]; + u8 reserved_at_688[0x8]; + u8 log_min_sf_size[0x8]; + u8 max_num_sf_partitions[0x8]; u8 uctx_cap[0x20]; u8 reserved_at_6c0[0x4]; u8 flex_parser_id_geneve_tlv_option_0[0x4]; - u8 reserved_at_6c8[0x138]; + u8 reserved_at_6c8[0x28]; + u8 sf_base_id[0x10]; + + u8 reserved_at_700[0x80]; + u8 vhca_tunnel_commands[0x40]; + u8 reserved_at_7c0[0x40]; }; enum mlx5_flow_destination_type { @@ -2531,7 +2623,9 @@ union mlx5_ifc_hca_cap_union_bits { struct mlx5_ifc_e_switch_cap_bits e_switch_cap; struct mlx5_ifc_vector_calc_cap_bits vector_calc_cap; struct mlx5_ifc_qos_cap_bits qos_cap; + struct mlx5_ifc_debug_cap_bits debug_cap; struct mlx5_ifc_fpga_cap_bits fpga_cap; + struct mlx5_ifc_tls_cap_bits tls_cap; u8 reserved_at_0[0x8000]; }; @@ -2549,6 +2643,12 @@ enum { MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 = 0x800, }; +enum { + MLX5_FLOW_CONTEXT_FLOW_SOURCE_ANY_VPORT = 0x0, + MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK = 0x1, + MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT = 0x2, +}; + struct mlx5_ifc_vlan_bits { u8 ethtype[0x10]; u8 prio[0x3]; @@ -2568,7 +2668,9 @@ struct mlx5_ifc_flow_context_bits { u8 action[0x10]; u8 extended_destination[0x1]; - u8 reserved_at_80[0x7]; + u8 reserved_at_81[0x1]; + u8 flow_source[0x2]; + u8 reserved_at_84[0x4]; u8 destination_list_size[0x18]; u8 reserved_at_a0[0x8]; @@ -2663,7 +2765,8 @@ struct mlx5_ifc_traffic_counter_bits { struct mlx5_ifc_tisc_bits { u8 strict_lag_tx_port_affinity[0x1]; - u8 reserved_at_1[0x3]; + u8 tls_en[0x1]; + u8 reserved_at_1[0x2]; u8 lag_tx_port_affinity[0x04]; u8 reserved_at_8[0x4]; @@ -2677,7 +2780,11 @@ struct mlx5_ifc_tisc_bits { u8 reserved_at_140[0x8]; u8 underlay_qpn[0x18]; - u8 reserved_at_160[0x3a0]; + + u8 reserved_at_160[0x8]; + u8 pd[0x18]; + + u8 reserved_at_180[0x380]; }; enum { @@ -3093,12 +3200,14 @@ struct mlx5_ifc_hca_vport_context_bits { }; struct mlx5_ifc_esw_vport_context_bits { - u8 reserved_at_0[0x3]; + u8 fdb_to_vport_reg_c[0x1]; + u8 reserved_at_1[0x2]; u8 vport_svlan_strip[0x1]; u8 vport_cvlan_strip[0x1]; u8 vport_svlan_insert[0x1]; u8 vport_cvlan_insert[0x2]; - u8 reserved_at_8[0x18]; + u8 fdb_to_vport_reg_c_id[0x8]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x20]; @@ -4979,7 +5088,8 @@ struct mlx5_ifc_modify_esw_vport_context_out_bits { }; struct mlx5_ifc_esw_vport_context_fields_select_bits { - u8 reserved_at_0[0x1c]; + u8 reserved_at_0[0x1b]; + u8 fdb_to_vport_reg_c_id[0x1]; u8 vport_cvlan_insert[0x1]; u8 vport_svlan_insert[0x1]; u8 vport_cvlan_strip[0x1]; @@ -5176,6 +5286,7 @@ enum { MLX5_ACTION_IN_FIELD_OUT_DIPV4 = 0x16, MLX5_ACTION_IN_FIELD_OUT_FIRST_VID = 0x17, MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT = 0x47, + MLX5_ACTION_IN_FIELD_METADATA_REG_C_0 = 0x51, }; struct mlx5_ifc_alloc_modify_header_context_out_bits { @@ -5864,10 +5975,12 @@ struct mlx5_ifc_modify_cq_in_bits { struct mlx5_ifc_cqc_bits cq_context; - u8 reserved_at_280[0x40]; + u8 reserved_at_280[0x60]; u8 cq_umem_valid[0x1]; - u8 reserved_at_2c1[0x5bf]; + u8 reserved_at_2e1[0x1f]; + + u8 reserved_at_300[0x580]; u8 pas[0][0x40]; }; @@ -7236,7 +7349,8 @@ struct mlx5_ifc_create_flow_table_out_bits { struct mlx5_ifc_flow_table_context_bits { u8 reformat_en[0x1]; u8 decap_en[0x1]; - u8 reserved_at_2[0x2]; + u8 reserved_at_2[0x1]; + u8 termination_table[0x1]; u8 table_miss_action[0x4]; u8 level[0x8]; u8 reserved_at_10[0x8]; @@ -7355,9 +7469,9 @@ struct mlx5_ifc_create_eq_in_bits { u8 reserved_at_280[0x40]; - u8 event_bitmask[0x40]; + u8 event_bitmask[4][0x40]; - u8 reserved_at_300[0x580]; + u8 reserved_at_3c0[0x4c0]; u8 pas[0][0x40]; }; @@ -8475,7 +8589,7 @@ struct mlx5_ifc_mcam_access_reg_bits { u8 mcda[0x1]; u8 mcc[0x1]; u8 mcqi[0x1]; - u8 reserved_at_1f[0x1]; + u8 mcqs[0x1]; u8 regs_95_to_87[0x9]; u8 mpegc[0x1]; @@ -8546,6 +8660,18 @@ struct mlx5_ifc_qcam_reg_bits { u8 reserved_at_1c0[0x80]; }; +struct mlx5_ifc_core_dump_reg_bits { + u8 reserved_at_0[0x18]; + u8 core_dump_type[0x8]; + + u8 reserved_at_20[0x30]; + u8 vhca_id[0x10]; + + u8 reserved_at_60[0x8]; + u8 qpn[0x18]; + u8 reserved_at_80[0x180]; +}; + struct mlx5_ifc_pcap_reg_bits { u8 reserved_at_0[0x8]; u8 local_port[0x8]; @@ -8955,6 +9081,24 @@ struct mlx5_ifc_mtppse_reg_bits { u8 reserved_at_40[0x40]; }; +struct mlx5_ifc_mcqs_reg_bits { + u8 last_index_flag[0x1]; + u8 reserved_at_1[0x7]; + u8 fw_device[0x8]; + u8 component_index[0x10]; + + u8 reserved_at_20[0x10]; + u8 identifier[0x10]; + + u8 reserved_at_40[0x17]; + u8 component_status[0x5]; + u8 component_update_state[0x4]; + + u8 last_update_state_changer_type[0x4]; + u8 last_update_state_changer_host_id[0x4]; + u8 reserved_at_68[0x18]; +}; + struct mlx5_ifc_mcqi_cap_bits { u8 supported_info_bitmask[0x20]; @@ -8975,6 +9119,43 @@ struct mlx5_ifc_mcqi_cap_bits { u8 reserved_at_86[0x1a]; }; +struct mlx5_ifc_mcqi_version_bits { + u8 reserved_at_0[0x2]; + u8 build_time_valid[0x1]; + u8 user_defined_time_valid[0x1]; + u8 reserved_at_4[0x14]; + u8 version_string_length[0x8]; + + u8 version[0x20]; + + u8 build_time[0x40]; + + u8 user_defined_time[0x40]; + + u8 build_tool_version[0x20]; + + u8 reserved_at_e0[0x20]; + + u8 version_string[92][0x8]; +}; + +struct mlx5_ifc_mcqi_activation_method_bits { + u8 pending_server_ac_power_cycle[0x1]; + u8 pending_server_dc_power_cycle[0x1]; + u8 pending_server_reboot[0x1]; + u8 pending_fw_reset[0x1]; + u8 auto_activate[0x1]; + u8 all_hosts_sync[0x1]; + u8 device_hw_reset[0x1]; + u8 reserved_at_7[0x19]; +}; + +union mlx5_ifc_mcqi_reg_data_bits { + struct mlx5_ifc_mcqi_cap_bits mcqi_caps; + struct mlx5_ifc_mcqi_version_bits mcqi_version; + struct mlx5_ifc_mcqi_activation_method_bits mcqi_activation_mathod; +}; + struct mlx5_ifc_mcqi_reg_bits { u8 read_pending_component[0x1]; u8 reserved_at_1[0xf]; @@ -8992,7 +9173,7 @@ struct mlx5_ifc_mcqi_reg_bits { u8 reserved_at_a0[0x10]; u8 data_size[0x10]; - u8 data[0][0x20]; + union mlx5_ifc_mcqi_reg_data_bits data[0]; }; struct mlx5_ifc_mcc_reg_bits { @@ -9518,7 +9699,7 @@ struct mlx5_ifc_general_obj_in_cmd_hdr_bits { u8 opcode[0x10]; u8 uid[0x10]; - u8 reserved_at_20[0x10]; + u8 vhca_tunnel_id[0x10]; u8 obj_type[0x10]; u8 obj_id[0x20]; @@ -9689,10 +9870,11 @@ struct mlx5_ifc_mtrc_ctrl_bits { struct mlx5_ifc_host_params_context_bits { u8 host_number[0x8]; - u8 reserved_at_8[0x8]; + u8 reserved_at_8[0x7]; + u8 host_pf_disabled[0x1]; u8 host_num_of_vfs[0x10]; - u8 reserved_at_20[0x10]; + u8 host_total_vfs[0x10]; u8 host_pci_bus[0x10]; u8 reserved_at_40[0x10]; @@ -9704,7 +9886,7 @@ struct mlx5_ifc_host_params_context_bits { u8 reserved_at_80[0x180]; }; -struct mlx5_ifc_query_host_params_in_bits { +struct mlx5_ifc_query_esw_functions_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; @@ -9714,7 +9896,7 @@ struct mlx5_ifc_query_host_params_in_bits { u8 reserved_at_40[0x40]; }; -struct mlx5_ifc_query_host_params_out_bits { +struct mlx5_ifc_query_esw_functions_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -9725,6 +9907,165 @@ struct mlx5_ifc_query_host_params_out_bits { struct mlx5_ifc_host_params_context_bits host_params_context; u8 reserved_at_280[0x180]; + u8 host_sf_enable[0][0x40]; +}; + +struct mlx5_ifc_sf_partition_bits { + u8 reserved_at_0[0x10]; + u8 log_num_sf[0x8]; + u8 log_sf_bar_size[0x8]; +}; + +struct mlx5_ifc_query_sf_partitions_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x18]; + u8 num_sf_partitions[0x8]; + + u8 reserved_at_60[0x20]; + + struct mlx5_ifc_sf_partition_bits sf_partition[0]; +}; + +struct mlx5_ifc_query_sf_partitions_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_dealloc_sf_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_dealloc_sf_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x10]; + u8 function_id[0x10]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_alloc_sf_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_alloc_sf_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x10]; + u8 function_id[0x10]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_affiliated_event_header_bits { + u8 reserved_at_0[0x10]; + u8 obj_type[0x10]; + + u8 obj_id[0x20]; +}; + +enum { + MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = BIT(0xc), +}; + +enum { + MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = 0xc, +}; + +struct mlx5_ifc_encryption_key_obj_bits { + u8 modify_field_select[0x40]; + + u8 reserved_at_40[0x14]; + u8 key_size[0x4]; + u8 reserved_at_58[0x4]; + u8 key_type[0x4]; + + u8 reserved_at_60[0x8]; + u8 pd[0x18]; + + u8 reserved_at_80[0x180]; + u8 key[8][0x20]; + + u8 reserved_at_300[0x500]; +}; + +struct mlx5_ifc_create_encryption_key_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr; + struct mlx5_ifc_encryption_key_obj_bits encryption_key_object; +}; + +enum { + MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128 = 0x0, + MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_256 = 0x1, +}; + +enum { + MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_DEK = 0x1, +}; + +struct mlx5_ifc_tls_static_params_bits { + u8 const_2[0x2]; + u8 tls_version[0x4]; + u8 const_1[0x2]; + u8 reserved_at_8[0x14]; + u8 encryption_standard[0x4]; + + u8 reserved_at_20[0x20]; + + u8 initial_record_number[0x40]; + + u8 resync_tcp_sn[0x20]; + + u8 gcm_iv[0x20]; + + u8 implicit_iv[0x40]; + + u8 reserved_at_100[0x8]; + u8 dek_index[0x18]; + + u8 reserved_at_120[0xe0]; +}; + +struct mlx5_ifc_tls_progress_params_bits { + u8 valid[0x1]; + u8 reserved_at_1[0x7]; + u8 pd[0x18]; + + u8 next_record_tcp_sn[0x20]; + + u8 hw_resync_tcp_sn[0x20]; + + u8 record_tracker_state[0x2]; + u8 auth_state[0x2]; + u8 reserved_at_64[0x4]; + u8 hw_offset_record_number[0x18]; }; #endif /* MLX5_IFC_H */ diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index 3ba4edbd17a6..ae63b1ae9004 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -37,7 +37,8 @@ #include <linux/mlx5/driver.h> #define MLX5_INVALID_LKEY 0x100 -#define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 5) +/* UMR (3 WQE_BB's) + SIG (3 WQE_BB's) + PSV (mem) + PSV (wire) */ +#define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 8) #define MLX5_DIF_SIZE 8 #define MLX5_STRIDE_BLOCK_OP 0x400 #define MLX5_CPY_GRD_MASK 0xc0 @@ -70,6 +71,7 @@ enum mlx5_qp_optpar { MLX5_QP_OPTPAR_CQN_RCV = 1 << 19, MLX5_QP_OPTPAR_DC_HS = 1 << 20, MLX5_QP_OPTPAR_DC_KEY = 1 << 21, + MLX5_QP_OPTPAR_COUNTER_SET_ID = 1 << 25, }; enum mlx5_qp_state { @@ -202,7 +204,12 @@ struct mlx5_wqe_ctrl_seg { u8 signature; u8 rsvd[2]; u8 fm_ce_se; - __be32 imm; + union { + __be32 general_id; + __be32 imm; + __be32 umr_mkey; + __be32 tisn; + }; }; #define MLX5_WQE_CTRL_DS_MASK 0x3f @@ -551,11 +558,6 @@ static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u return radix_tree_lookup(&dev->priv.qp_table.tree, qpn); } -static inline struct mlx5_core_mkey *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u32 key) -{ - return radix_tree_lookup(&dev->priv.mkey_table.tree, key); -} - int mlx5_core_create_dct(struct mlx5_core_dev *dev, struct mlx5_core_dct *qp, u32 *in, int inlen, diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h index 3d1c6cdbbba7..16060fb9b5e5 100644 --- a/include/linux/mlx5/vport.h +++ b/include/linux/mlx5/vport.h @@ -44,9 +44,6 @@ MLX5_VPORT_UPLINK_PLACEHOLDER + \ MLX5_VPORT_ECPF_PLACEHOLDER(mdev)) -#define MLX5_TOTAL_VPORTS(mdev) (MLX5_SPECIAL_VPORTS(mdev) + \ - mlx5_core_max_vfs(mdev)) - #define MLX5_VPORT_MANAGER(mdev) \ (MLX5_CAP_GEN(mdev, vport_group_manager) && \ (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \ @@ -58,6 +55,7 @@ enum { MLX5_CAP_INLINE_MODE_NOT_REQUIRED, }; +/* Vport number for each function must keep unchanged */ enum { MLX5_VPORT_PF = 0x0, MLX5_VPORT_FIRST_VF = 0x1, @@ -69,7 +67,8 @@ u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport); int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport, u8 other_vport, u8 state); int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, - u16 vport, u8 *addr); + u16 vport, bool other, u8 *addr); +int mlx5_query_mac_address(struct mlx5_core_dev *mdev, u8 *addr); int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, u16 vport, u8 *min_inline); void mlx5_query_min_inline(struct mlx5_core_dev *mdev, u8 *min_inline); diff --git a/include/linux/mm.h b/include/linux/mm.h index dd0b5f4e1e45..0334ca97c584 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -541,13 +541,30 @@ static inline void vma_set_anonymous(struct vm_area_struct *vma) vma->vm_ops = NULL; } +static inline bool vma_is_anonymous(struct vm_area_struct *vma) +{ + return !vma->vm_ops; +} + +#ifdef CONFIG_SHMEM +/* + * The vma_is_shmem is not inline because it is used only by slow + * paths in userfault. + */ +bool vma_is_shmem(struct vm_area_struct *vma); +#else +static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; } +#endif + +int vma_is_stack_for_current(struct vm_area_struct *vma); + /* flush_tlb_range() takes a vma, not a mm, and can care about flags */ #define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) } struct mmu_gather; struct inode; -#if !defined(__HAVE_ARCH_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE) +#if !defined(CONFIG_ARCH_HAS_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE) static inline int pmd_devmap(pmd_t pmd) { return 0; @@ -633,6 +650,11 @@ static inline bool is_vmalloc_addr(const void *x) return false; #endif } + +#ifndef is_ioremap_addr +#define is_ioremap_addr(x) is_vmalloc_addr(x) +#endif + #ifdef CONFIG_MMU extern int is_vmalloc_or_module_addr(const void *x); #else @@ -932,8 +954,6 @@ static inline bool is_zone_device_page(const struct page *page) #endif #ifdef CONFIG_DEV_PAGEMAP_OPS -void dev_pagemap_get_ops(void); -void dev_pagemap_put_ops(void); void __put_devmap_managed_page(struct page *page); DECLARE_STATIC_KEY_FALSE(devmap_managed_key); static inline bool put_devmap_managed_page(struct page *page) @@ -944,7 +964,6 @@ static inline bool put_devmap_managed_page(struct page *page) return false; switch (page->pgmap->type) { case MEMORY_DEVICE_PRIVATE: - case MEMORY_DEVICE_PUBLIC: case MEMORY_DEVICE_FS_DAX: __put_devmap_managed_page(page); return true; @@ -954,60 +973,28 @@ static inline bool put_devmap_managed_page(struct page *page) return false; } -static inline bool is_device_private_page(const struct page *page) -{ - return is_zone_device_page(page) && - page->pgmap->type == MEMORY_DEVICE_PRIVATE; -} - -static inline bool is_device_public_page(const struct page *page) -{ - return is_zone_device_page(page) && - page->pgmap->type == MEMORY_DEVICE_PUBLIC; -} - -#ifdef CONFIG_PCI_P2PDMA -static inline bool is_pci_p2pdma_page(const struct page *page) -{ - return is_zone_device_page(page) && - page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA; -} -#else /* CONFIG_PCI_P2PDMA */ -static inline bool is_pci_p2pdma_page(const struct page *page) -{ - return false; -} -#endif /* CONFIG_PCI_P2PDMA */ - #else /* CONFIG_DEV_PAGEMAP_OPS */ -static inline void dev_pagemap_get_ops(void) -{ -} - -static inline void dev_pagemap_put_ops(void) -{ -} - static inline bool put_devmap_managed_page(struct page *page) { return false; } +#endif /* CONFIG_DEV_PAGEMAP_OPS */ static inline bool is_device_private_page(const struct page *page) { - return false; -} - -static inline bool is_device_public_page(const struct page *page) -{ - return false; + return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) && + IS_ENABLED(CONFIG_DEVICE_PRIVATE) && + is_zone_device_page(page) && + page->pgmap->type == MEMORY_DEVICE_PRIVATE; } static inline bool is_pci_p2pdma_page(const struct page *page) { - return false; + return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) && + IS_ENABLED(CONFIG_PCI_P2PDMA) && + is_zone_device_page(page) && + page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA; } -#endif /* CONFIG_DEV_PAGEMAP_OPS */ /* 127: arbitrary random number, small enough to assemble well */ #define page_ref_zero_or_close_to_overflow(page) \ @@ -1431,10 +1418,8 @@ struct zap_details { pgoff_t last_index; /* Highest page->index to unmap */ }; -struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr, - pte_t pte, bool with_public_device); -#define vm_normal_page(vma, addr, pte) _vm_normal_page(vma, addr, pte, false) - +struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, + pte_t pte); struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t pmd); @@ -1575,6 +1560,10 @@ long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, int get_user_pages_fast(unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages); +int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc); +int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc, + struct task_struct *task, bool bypass_rlim); + /* Container for pinned pfns / pages */ struct frame_vector { unsigned int nr_allocated; /* Number of frames we have space for */ @@ -1648,23 +1637,6 @@ int clear_page_dirty_for_io(struct page *page); int get_cmdline(struct task_struct *task, char *buffer, int buflen); -static inline bool vma_is_anonymous(struct vm_area_struct *vma) -{ - return !vma->vm_ops; -} - -#ifdef CONFIG_SHMEM -/* - * The vma_is_shmem is not inline because it is used only by slow - * paths in userfault. - */ -bool vma_is_shmem(struct vm_area_struct *vma); -#else -static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; } -#endif - -int vma_is_stack_for_current(struct vm_area_struct *vma); - extern unsigned long move_page_tables(struct vm_area_struct *vma, unsigned long old_addr, struct vm_area_struct *new_vma, unsigned long new_addr, unsigned long len, @@ -1782,7 +1754,7 @@ static inline void sync_mm_rss(struct mm_struct *mm) } #endif -#ifndef __HAVE_ARCH_PTE_DEVMAP +#ifndef CONFIG_ARCH_HAS_PTE_DEVMAP static inline int pte_devmap(pte_t pte) { return 0; @@ -2681,8 +2653,7 @@ static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags) return 0; } -typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, - void *data); +typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data); extern int apply_to_page_range(struct mm_struct *mm, unsigned long address, unsigned long size, pte_fn_t fn, void *data); @@ -2696,11 +2667,42 @@ static inline void kernel_poison_pages(struct page *page, int numpages, int enable) { } #endif -extern bool _debug_pagealloc_enabled; +#ifdef CONFIG_INIT_ON_ALLOC_DEFAULT_ON +DECLARE_STATIC_KEY_TRUE(init_on_alloc); +#else +DECLARE_STATIC_KEY_FALSE(init_on_alloc); +#endif +static inline bool want_init_on_alloc(gfp_t flags) +{ + if (static_branch_unlikely(&init_on_alloc) && + !page_poisoning_enabled()) + return true; + return flags & __GFP_ZERO; +} + +#ifdef CONFIG_INIT_ON_FREE_DEFAULT_ON +DECLARE_STATIC_KEY_TRUE(init_on_free); +#else +DECLARE_STATIC_KEY_FALSE(init_on_free); +#endif +static inline bool want_init_on_free(void) +{ + return static_branch_unlikely(&init_on_free) && + !page_poisoning_enabled(); +} + +#ifdef CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT +DECLARE_STATIC_KEY_TRUE(_debug_pagealloc_enabled); +#else +DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled); +#endif static inline bool debug_pagealloc_enabled(void) { - return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) && _debug_pagealloc_enabled; + if (!IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) + return false; + + return static_branch_unlikely(&_debug_pagealloc_enabled); } #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_ARCH_HAS_SET_DIRECT_MAP) @@ -2756,11 +2758,17 @@ extern int randomize_va_space; #endif const char * arch_vma_name(struct vm_area_struct *vma); +#ifdef CONFIG_MMU void print_vma_addr(char *prefix, unsigned long rip); +#else +static inline void print_vma_addr(char *prefix, unsigned long rip) +{ +} +#endif void *sparse_buffer_alloc(unsigned long size); -struct page *sparse_mem_map_populate(unsigned long pnum, int nid, - struct vmem_altmap *altmap); +struct page * __populate_section_memmap(unsigned long pfn, + unsigned long nr_pages, int nid, struct vmem_altmap *altmap); pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node); pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node); @@ -2850,11 +2858,9 @@ extern long copy_huge_page_from_user(struct page *dst_page, bool allow_pagefault); #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ -extern struct page_ext_operations debug_guardpage_ops; - #ifdef CONFIG_DEBUG_PAGEALLOC extern unsigned int _debug_guardpage_minorder; -extern bool _debug_guardpage_enabled; +DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled); static inline unsigned int debug_guardpage_minorder(void) { @@ -2863,21 +2869,15 @@ static inline unsigned int debug_guardpage_minorder(void) static inline bool debug_guardpage_enabled(void) { - return _debug_guardpage_enabled; + return static_branch_unlikely(&_debug_guardpage_enabled); } static inline bool page_is_guard(struct page *page) { - struct page_ext *page_ext; - if (!debug_guardpage_enabled()) return false; - page_ext = lookup_page_ext(page); - if (unlikely(!page_ext)) - return false; - - return test_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); + return PageGuard(page); } #else static inline unsigned int debug_guardpage_minorder(void) { return 0; } diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 8ec38b11b361..3a37a89eb7a7 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -158,7 +158,7 @@ struct page { struct { /* ZONE_DEVICE pages */ /** @pgmap: Points to the hosting device page map. */ struct dev_pagemap *pgmap; - unsigned long hmm_data; + void *zone_device_data; unsigned long _zd_pad_1; /* uses mapping */ }; @@ -329,7 +329,9 @@ struct vm_area_struct { struct file * vm_file; /* File we map to (can be NULL). */ void * vm_private_data; /* was vm_pte (shared mem) */ +#ifdef CONFIG_SWAP atomic_long_t swap_readahead_info; +#endif #ifndef CONFIG_MMU struct vm_region *vm_region; /* NOMMU mapping region */ #endif @@ -501,7 +503,7 @@ struct mm_struct { #endif struct work_struct async_put_work; -#if IS_ENABLED(CONFIG_HMM) +#ifdef CONFIG_HMM_MIRROR /* HMM needs to track a few things per mm */ struct hmm *hmm; #endif diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 7ac3755444d3..4a351cb7f20f 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -501,7 +501,6 @@ static inline void mmc_signal_sdio_irq(struct mmc_host *host) wake_up_process(host->sdio_irq_thread); } -void sdio_run_irqs(struct mmc_host *host); void sdio_signal_irq(struct mmc_host *host); #ifdef CONFIG_REGULATOR diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 70394cabaf4e..d77d717c620c 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -855,18 +855,6 @@ static inline int local_memory_node(int node_id) { return node_id; }; */ #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) -#ifdef CONFIG_ZONE_DEVICE -static inline bool is_dev_zone(const struct zone *zone) -{ - return zone_idx(zone) == ZONE_DEVICE; -} -#else -static inline bool is_dev_zone(const struct zone *zone) -{ - return false; -} -#endif - /* * Returns true if a zone has pages managed by the buddy allocator. * All the reclaim decisions have to use this function rather than @@ -1160,6 +1148,29 @@ static inline unsigned long section_nr_to_pfn(unsigned long sec) #define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK) #define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK) +#define SUBSECTION_SHIFT 21 + +#define PFN_SUBSECTION_SHIFT (SUBSECTION_SHIFT - PAGE_SHIFT) +#define PAGES_PER_SUBSECTION (1UL << PFN_SUBSECTION_SHIFT) +#define PAGE_SUBSECTION_MASK (~(PAGES_PER_SUBSECTION-1)) + +#if SUBSECTION_SHIFT > SECTION_SIZE_BITS +#error Subsection size exceeds section size +#else +#define SUBSECTIONS_PER_SECTION (1UL << (SECTION_SIZE_BITS - SUBSECTION_SHIFT)) +#endif + +#define SUBSECTION_ALIGN_UP(pfn) ALIGN((pfn), PAGES_PER_SUBSECTION) +#define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK) + +struct mem_section_usage { + DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION); + /* See declaration of similar field in struct zone */ + unsigned long pageblock_flags[0]; +}; + +void subsection_map_init(unsigned long pfn, unsigned long nr_pages); + struct page; struct page_ext; struct mem_section { @@ -1177,8 +1188,7 @@ struct mem_section { */ unsigned long section_mem_map; - /* See declaration of similar field in struct zone */ - unsigned long *pageblock_flags; + struct mem_section_usage *usage; #ifdef CONFIG_PAGE_EXTENSION /* * If SPARSEMEM, pgdat doesn't have page_ext pointer. We use @@ -1209,6 +1219,11 @@ extern struct mem_section **mem_section; extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]; #endif +static inline unsigned long *section_to_usemap(struct mem_section *ms) +{ + return ms->usage->pageblock_flags; +} + static inline struct mem_section *__nr_to_section(unsigned long nr) { #ifdef CONFIG_SPARSEMEM_EXTREME @@ -1219,8 +1234,8 @@ static inline struct mem_section *__nr_to_section(unsigned long nr) return NULL; return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; } -extern int __section_nr(struct mem_section* ms); -extern unsigned long usemap_size(void); +extern unsigned long __section_nr(struct mem_section *ms); +extern size_t mem_section_usage_size(void); /* * We use the lower bits of the mem_map pointer to store @@ -1238,7 +1253,8 @@ extern unsigned long usemap_size(void); #define SECTION_MARKED_PRESENT (1UL<<0) #define SECTION_HAS_MEM_MAP (1UL<<1) #define SECTION_IS_ONLINE (1UL<<2) -#define SECTION_MAP_LAST_BIT (1UL<<3) +#define SECTION_IS_EARLY (1UL<<3) +#define SECTION_MAP_LAST_BIT (1UL<<4) #define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) #define SECTION_NID_SHIFT 3 @@ -1264,6 +1280,11 @@ static inline int valid_section(struct mem_section *section) return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP)); } +static inline int early_section(struct mem_section *section) +{ + return (section && (section->section_mem_map & SECTION_IS_EARLY)); +} + static inline int valid_section_nr(unsigned long nr) { return valid_section(__nr_to_section(nr)); @@ -1291,14 +1312,42 @@ static inline struct mem_section *__pfn_to_section(unsigned long pfn) return __nr_to_section(pfn_to_section_nr(pfn)); } -extern int __highest_present_section_nr; +extern unsigned long __highest_present_section_nr; + +static inline int subsection_map_index(unsigned long pfn) +{ + return (pfn & ~(PAGE_SECTION_MASK)) / PAGES_PER_SUBSECTION; +} + +#ifdef CONFIG_SPARSEMEM_VMEMMAP +static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn) +{ + int idx = subsection_map_index(pfn); + + return test_bit(idx, ms->usage->subsection_map); +} +#else +static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn) +{ + return 1; +} +#endif #ifndef CONFIG_HAVE_ARCH_PFN_VALID static inline int pfn_valid(unsigned long pfn) { + struct mem_section *ms; + if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) return 0; - return valid_section(__nr_to_section(pfn_to_section_nr(pfn))); + ms = __nr_to_section(pfn_to_section_nr(pfn)); + if (!valid_section(ms)) + return 0; + /* + * Traditionally early sections always returned pfn_valid() for + * the entire section-sized span. + */ + return early_section(ms) || pfn_section_valid(ms, pfn); } #endif @@ -1330,6 +1379,7 @@ void sparse_init(void); #define sparse_init() do {} while (0) #define sparse_index_init(_sec, _nid) do {} while (0) #define pfn_present pfn_valid +#define subsection_map_init(_pfn, _nr_pages) do {} while (0) #endif /* CONFIG_SPARSEMEM */ /* diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 448621c32e4d..5714fd35a83c 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -16,6 +16,25 @@ typedef unsigned long kernel_ulong_t; #define PCI_ANY_ID (~0) +/** + * struct pci_device_id - PCI device ID structure + * @vendor: Vendor ID to match (or PCI_ANY_ID) + * @device: Device ID to match (or PCI_ANY_ID) + * @subvendor: Subsystem vendor ID to match (or PCI_ANY_ID) + * @subdevice: Subsystem device ID to match (or PCI_ANY_ID) + * @class: Device class, subclass, and "interface" to match. + * See Appendix D of the PCI Local Bus Spec or + * include/linux/pci_ids.h for a full list of classes. + * Most drivers do not need to specify class/class_mask + * as vendor/device is normally sufficient. + * @class_mask: Limit which sub-fields of the class field are compared. + * See drivers/scsi/sym53c8xx_2/ for example of usage. + * @driver_data: Data private to the driver. + * Most drivers don't need to use driver_data field. + * Best practice is to use driver_data as an index + * into a static list of equivalent device types, + * instead of using it as a pointer. + */ struct pci_device_id { __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/ __u32 subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */ @@ -257,17 +276,17 @@ struct pcmcia_device_id { __u16 match_flags; __u16 manf_id; - __u16 card_id; + __u16 card_id; - __u8 func_id; + __u8 func_id; /* for real multi-function devices */ - __u8 function; + __u8 function; /* for pseudo multi-function devices */ - __u8 device_no; + __u8 device_no; - __u32 prod_id_hash[4]; + __u32 prod_id_hash[4]; /* not matched against in kernelspace */ const char * prod_id[4]; @@ -795,9 +814,11 @@ struct tee_client_device_id { /** * struct wmi_device_id - WMI device identifier * @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba + * @context: pointer to driver specific data */ struct wmi_device_id { const char guid_string[UUID_STRING_LEN+1]; + const void *context; }; #endif /* LINUX_MOD_DEVICETABLE_H */ diff --git a/include/linux/module.h b/include/linux/module.h index 188998d3dca9..1455812dd325 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -21,6 +21,7 @@ #include <linux/rbtree_latch.h> #include <linux/error-injection.h> #include <linux/tracepoint-defs.h> +#include <linux/srcu.h> #include <linux/percpu.h> #include <asm/module.h> @@ -450,6 +451,10 @@ struct module { unsigned int num_tracepoints; tracepoint_ptr_t *tracepoints_ptrs; #endif +#ifdef CONFIG_TREE_SRCU + unsigned int num_srcu_structs; + struct srcu_struct **srcu_struct_ptrs; +#endif #ifdef CONFIG_BPF_EVENTS unsigned int num_bpf_raw_events; struct bpf_raw_event_map *bpf_raw_events; diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h index 31013c2effd3..5229c18025e9 100644 --- a/include/linux/moduleloader.h +++ b/include/linux/moduleloader.h @@ -29,6 +29,11 @@ void *module_alloc(unsigned long size); /* Free memory returned from module_alloc. */ void module_memfree(void *module_region); +/* Determines if the section name is an exit section (that is only used during + * module unloading) + */ +bool module_exit_section(const char *name); + /* * Apply the given relocation to the (simplified) ELF. Return -error * or 0. diff --git a/include/linux/msi.h b/include/linux/msi.h index d48e919d55ae..8ad679e9d9c0 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -64,6 +64,10 @@ struct ti_sci_inta_msi_desc { * @msg: The last set MSI message cached for reuse * @affinity: Optional pointer to a cpu affinity mask for this descriptor * + * @write_msi_msg: Callback that may be called when the MSI message + * address or data changes + * @write_msi_msg_data: Data parameter for the callback. + * * @masked: [PCI MSI/X] Mask bits * @is_msix: [PCI MSI/X] True if MSI-X * @multiple: [PCI MSI/X] log2 num of messages allocated @@ -90,6 +94,9 @@ struct msi_desc { const void *iommu_cookie; #endif + void (*write_msi_msg)(struct msi_desc *entry, void *data); + void *write_msi_msg_data; + union { /* PCI MSI/X specific data */ struct { @@ -100,6 +107,7 @@ struct msi_desc { u8 multi_cap : 3; u8 maskbit : 1; u8 is_64 : 1; + u8 is_virtual : 1; u16 entry_nr; unsigned default_irq; } msi_attrib; diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h index 208c87cf2e3e..c98a21108688 100644 --- a/include/linux/mtd/cfi.h +++ b/include/linux/mtd/cfi.h @@ -219,6 +219,13 @@ struct cfi_pri_amdstd { uint8_t VppMin; uint8_t VppMax; uint8_t TopBottom; + /* Below field are added from version 1.5 */ + uint8_t ProgramSuspend; + uint8_t UnlockBypass; + uint8_t SecureSiliconSector; + uint8_t SoftwareFeatures; +#define CFI_POLL_STATUS_REG BIT(0) +#define CFI_POLL_DQ BIT(1) } __packed; /* Vendor-Specific PRI for Atmel chips (command set 0x0002) */ diff --git a/include/linux/mtd/hyperbus.h b/include/linux/mtd/hyperbus.h new file mode 100644 index 000000000000..2dfe65964f6e --- /dev/null +++ b/include/linux/mtd/hyperbus.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ + */ + +#ifndef __LINUX_MTD_HYPERBUS_H__ +#define __LINUX_MTD_HYPERBUS_H__ + +#include <linux/mtd/map.h> + +enum hyperbus_memtype { + HYPERFLASH, + HYPERRAM, +}; + +/** + * struct hyperbus_device - struct representing HyperBus slave device + * @map: map_info struct for accessing MMIO HyperBus flash memory + * @np: pointer to HyperBus slave device node + * @mtd: pointer to MTD struct + * @ctlr: pointer to HyperBus controller struct + * @memtype: type of memory device: HyperFlash or HyperRAM + */ + +struct hyperbus_device { + struct map_info map; + struct device_node *np; + struct mtd_info *mtd; + struct hyperbus_ctlr *ctlr; + enum hyperbus_memtype memtype; +}; + +/** + * struct hyperbus_ops - struct representing custom HyperBus operations + * @read16: read 16 bit of data from flash in a single burst. Used to read + * from non default address space, such as ID/CFI space + * @write16: write 16 bit of data to flash in a single burst. Used to + * send cmd to flash or write single 16 bit word at a time. + * @copy_from: copy data from flash memory + * @copy_to: copy data to flash memory + * @calibrate: calibrate HyperBus controller + */ + +struct hyperbus_ops { + u16 (*read16)(struct hyperbus_device *hbdev, unsigned long addr); + void (*write16)(struct hyperbus_device *hbdev, + unsigned long addr, u16 val); + void (*copy_from)(struct hyperbus_device *hbdev, void *to, + unsigned long from, ssize_t len); + void (*copy_to)(struct hyperbus_device *dev, unsigned long to, + const void *from, ssize_t len); + int (*calibrate)(struct hyperbus_device *dev); +}; + +/** + * struct hyperbus_ctlr - struct representing HyperBus controller + * @dev: pointer to HyperBus controller device + * @calibrated: flag to indicate ctlr calibration sequence is complete + * @ops: HyperBus controller ops + */ +struct hyperbus_ctlr { + struct device *dev; + bool calibrated; + + const struct hyperbus_ops *ops; +}; + +/** + * hyperbus_register_device - probe and register a HyperBus slave memory device + * @hbdev: hyperbus_device struct with dev, np and ctlr field populated + * + * Return: 0 for success, others for failure. + */ +int hyperbus_register_device(struct hyperbus_device *hbdev); + +/** + * hyperbus_unregister_device - deregister HyperBus slave memory device + * @hbdev: hyperbus_device to be unregistered + * + * Return: 0 for success, others for failure. + */ +int hyperbus_unregister_device(struct hyperbus_device *hbdev); + +#endif /* __LINUX_MTD_HYPERBUS_H__ */ diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 936a3fdb48b5..4ca8c1c845fb 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -316,6 +316,12 @@ struct mtd_info { int (*_get_device) (struct mtd_info *mtd); void (*_put_device) (struct mtd_info *mtd); + /* + * flag indicates a panic write, low level drivers can take appropriate + * action if required to ensure writes go through + */ + bool oops_panic_write; + struct notifier_block reboot_notifier; /* default mode before reboot */ /* ECC status information */ diff --git a/include/linux/mtd/onenand_regs.h b/include/linux/mtd/onenand_regs.h index 2d12a1b18742..5f728407a579 100644 --- a/include/linux/mtd/onenand_regs.h +++ b/include/linux/mtd/onenand_regs.h @@ -77,6 +77,7 @@ #define ONENAND_DEVICE_DENSITY_1Gb (0x003) #define ONENAND_DEVICE_DENSITY_2Gb (0x004) #define ONENAND_DEVICE_DENSITY_4Gb (0x005) +#define ONENAND_DEVICE_DENSITY_8Gb (0x006) /* * Version ID Register F002h (R) diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index ac3884a28dea..4ab9bccfcde0 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -874,6 +874,42 @@ int nand_op_parser_exec_op(struct nand_chip *chip, const struct nand_op_parser *parser, const struct nand_operation *op, bool check_only); +static inline void nand_op_trace(const char *prefix, + const struct nand_op_instr *instr) +{ +#if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG) + switch (instr->type) { + case NAND_OP_CMD_INSTR: + pr_debug("%sCMD [0x%02x]\n", prefix, + instr->ctx.cmd.opcode); + break; + case NAND_OP_ADDR_INSTR: + pr_debug("%sADDR [%d cyc: %*ph]\n", prefix, + instr->ctx.addr.naddrs, + instr->ctx.addr.naddrs < 64 ? + instr->ctx.addr.naddrs : 64, + instr->ctx.addr.addrs); + break; + case NAND_OP_DATA_IN_INSTR: + pr_debug("%sDATA_IN [%d B%s]\n", prefix, + instr->ctx.data.len, + instr->ctx.data.force_8bit ? + ", force 8-bit" : ""); + break; + case NAND_OP_DATA_OUT_INSTR: + pr_debug("%sDATA_OUT [%d B%s]\n", prefix, + instr->ctx.data.len, + instr->ctx.data.force_8bit ? + ", force 8-bit" : ""); + break; + case NAND_OP_WAITRDY_INSTR: + pr_debug("%sWAITRDY [max %d ms]\n", prefix, + instr->ctx.waitrdy.timeout_ms); + break; + } +#endif +} + /** * struct nand_controller_ops - Controller operations * diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h index 507f7e289bd1..4ea558bd3c46 100644 --- a/include/linux/mtd/spinand.h +++ b/include/linux/mtd/spinand.h @@ -68,30 +68,60 @@ SPI_MEM_OP_DUMMY(ndummy, 1), \ SPI_MEM_OP_DATA_IN(len, buf, 1)) +#define SPINAND_PAGE_READ_FROM_CACHE_OP_3A(fast, addr, ndummy, buf, len) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(fast ? 0x0b : 0x03, 1), \ + SPI_MEM_OP_ADDR(3, addr, 1), \ + SPI_MEM_OP_DUMMY(ndummy, 1), \ + SPI_MEM_OP_DATA_IN(len, buf, 1)) + #define SPINAND_PAGE_READ_FROM_CACHE_X2_OP(addr, ndummy, buf, len) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \ SPI_MEM_OP_ADDR(2, addr, 1), \ SPI_MEM_OP_DUMMY(ndummy, 1), \ SPI_MEM_OP_DATA_IN(len, buf, 2)) +#define SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(addr, ndummy, buf, len) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \ + SPI_MEM_OP_ADDR(3, addr, 1), \ + SPI_MEM_OP_DUMMY(ndummy, 1), \ + SPI_MEM_OP_DATA_IN(len, buf, 2)) + #define SPINAND_PAGE_READ_FROM_CACHE_X4_OP(addr, ndummy, buf, len) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \ SPI_MEM_OP_ADDR(2, addr, 1), \ SPI_MEM_OP_DUMMY(ndummy, 1), \ SPI_MEM_OP_DATA_IN(len, buf, 4)) +#define SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(addr, ndummy, buf, len) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \ + SPI_MEM_OP_ADDR(3, addr, 1), \ + SPI_MEM_OP_DUMMY(ndummy, 1), \ + SPI_MEM_OP_DATA_IN(len, buf, 4)) + #define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(addr, ndummy, buf, len) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \ SPI_MEM_OP_ADDR(2, addr, 2), \ SPI_MEM_OP_DUMMY(ndummy, 2), \ SPI_MEM_OP_DATA_IN(len, buf, 2)) +#define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP_3A(addr, ndummy, buf, len) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \ + SPI_MEM_OP_ADDR(3, addr, 2), \ + SPI_MEM_OP_DUMMY(ndummy, 2), \ + SPI_MEM_OP_DATA_IN(len, buf, 2)) + #define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(addr, ndummy, buf, len) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \ SPI_MEM_OP_ADDR(2, addr, 4), \ SPI_MEM_OP_DUMMY(ndummy, 4), \ SPI_MEM_OP_DATA_IN(len, buf, 4)) +#define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP_3A(addr, ndummy, buf, len) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \ + SPI_MEM_OP_ADDR(3, addr, 4), \ + SPI_MEM_OP_DUMMY(ndummy, 4), \ + SPI_MEM_OP_DATA_IN(len, buf, 4)) + #define SPINAND_PROG_EXEC_OP(addr) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0x10, 1), \ SPI_MEM_OP_ADDR(3, addr, 1), \ @@ -197,6 +227,7 @@ struct spinand_manufacturer { extern const struct spinand_manufacturer gigadevice_spinand_manufacturer; extern const struct spinand_manufacturer macronix_spinand_manufacturer; extern const struct spinand_manufacturer micron_spinand_manufacturer; +extern const struct spinand_manufacturer paragon_spinand_manufacturer; extern const struct spinand_manufacturer toshiba_spinand_manufacturer; extern const struct spinand_manufacturer winbond_spinand_manufacturer; @@ -260,7 +291,7 @@ struct spinand_ecc_info { */ struct spinand_info { const char *model; - u8 devid; + u16 devid; u32 flags; struct nand_memory_organization memorg; struct nand_ecc_req eccreq; @@ -422,7 +453,7 @@ static inline void spinand_set_of_node(struct spinand_device *spinand, int spinand_match_and_init(struct spinand_device *dev, const struct spinand_info *table, - unsigned int table_size, u8 devid); + unsigned int table_size, u16 devid); int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val); int spinand_select_target(struct spinand_device *spinand, unsigned int target); diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 3093dd162424..dcd03fee6e01 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -151,7 +151,7 @@ static inline bool mutex_is_locked(struct mutex *lock) /* * See kernel/locking/mutex.c for detailed documentation of these APIs. - * Also see Documentation/locking/mutex-design.txt. + * Also see Documentation/locking/mutex-design.rst. */ #ifdef CONFIG_DEBUG_LOCK_ALLOC extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); diff --git a/include/linux/mv643xx.h b/include/linux/mv643xx.h index 4471cf96ef69..47e5679b48e1 100644 --- a/include/linux/mv643xx.h +++ b/include/linux/mv643xx.h @@ -918,52 +918,6 @@ extern void mv64340_irq_init(unsigned int base); -/* MPSC Platform Device, Driver Data (Shared register regions) */ -#define MPSC_SHARED_NAME "mpsc_shared" - -#define MPSC_ROUTING_BASE_ORDER 0 -#define MPSC_SDMA_INTR_BASE_ORDER 1 - -#define MPSC_ROUTING_REG_BLOCK_SIZE 0x000c -#define MPSC_SDMA_INTR_REG_BLOCK_SIZE 0x0084 - -struct mpsc_shared_pdata { - u32 mrr_val; - u32 rcrr_val; - u32 tcrr_val; - u32 intr_cause_val; - u32 intr_mask_val; -}; - -/* MPSC Platform Device, Driver Data */ -#define MPSC_CTLR_NAME "mpsc" - -#define MPSC_BASE_ORDER 0 -#define MPSC_SDMA_BASE_ORDER 1 -#define MPSC_BRG_BASE_ORDER 2 - -#define MPSC_REG_BLOCK_SIZE 0x0038 -#define MPSC_SDMA_REG_BLOCK_SIZE 0x0c18 -#define MPSC_BRG_REG_BLOCK_SIZE 0x0008 - -struct mpsc_pdata { - u8 mirror_regs; - u8 cache_mgmt; - u8 max_idle; - int default_baud; - int default_bits; - int default_parity; - int default_flow; - u32 chr_1_val; - u32 chr_2_val; - u32 chr_10_val; - u32 mpcr_val; - u32 bcr_val; - u8 brg_can_tune; - u8 brg_clk_src; - u32 brg_clk_freq; -}; - /* Watchdog Platform Device, Driver Data */ #define MV64x60_WDT_NAME "mv64x60_wdt" diff --git a/include/linux/net.h b/include/linux/net.h index f7d672cf25b5..9cafb5f353a9 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -116,11 +116,11 @@ struct socket { unsigned long flags; - struct socket_wq *wq; - struct file *file; struct sock *sk; const struct proto_ops *ops; + + struct socket_wq wq; }; struct vm_area_struct; diff --git a/include/linux/net_dim.h b/include/linux/net_dim.h deleted file mode 100644 index fd458389f7d1..000000000000 --- a/include/linux/net_dim.h +++ /dev/null @@ -1,418 +0,0 @@ -/* - * Copyright (c) 2016, Mellanox Technologies. All rights reserved. - * Copyright (c) 2017-2018, Broadcom Limited. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef NET_DIM_H -#define NET_DIM_H - -#include <linux/module.h> - -struct net_dim_cq_moder { - u16 usec; - u16 pkts; - u8 cq_period_mode; -}; - -struct net_dim_sample { - ktime_t time; - u32 pkt_ctr; - u32 byte_ctr; - u16 event_ctr; -}; - -struct net_dim_stats { - int ppms; /* packets per msec */ - int bpms; /* bytes per msec */ - int epms; /* events per msec */ -}; - -struct net_dim { /* Adaptive Moderation */ - u8 state; - struct net_dim_stats prev_stats; - struct net_dim_sample start_sample; - struct work_struct work; - u8 profile_ix; - u8 mode; - u8 tune_state; - u8 steps_right; - u8 steps_left; - u8 tired; -}; - -enum { - NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE = 0x0, - NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE = 0x1, - NET_DIM_CQ_PERIOD_NUM_MODES -}; - -/* Adaptive moderation logic */ -enum { - NET_DIM_START_MEASURE, - NET_DIM_MEASURE_IN_PROGRESS, - NET_DIM_APPLY_NEW_PROFILE, -}; - -enum { - NET_DIM_PARKING_ON_TOP, - NET_DIM_PARKING_TIRED, - NET_DIM_GOING_RIGHT, - NET_DIM_GOING_LEFT, -}; - -enum { - NET_DIM_STATS_WORSE, - NET_DIM_STATS_SAME, - NET_DIM_STATS_BETTER, -}; - -enum { - NET_DIM_STEPPED, - NET_DIM_TOO_TIRED, - NET_DIM_ON_EDGE, -}; - -#define NET_DIM_PARAMS_NUM_PROFILES 5 -/* Adaptive moderation profiles */ -#define NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256 -#define NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE 128 -#define NET_DIM_DEF_PROFILE_CQE 1 -#define NET_DIM_DEF_PROFILE_EQE 1 - -/* All profiles sizes must be NET_PARAMS_DIM_NUM_PROFILES */ -#define NET_DIM_RX_EQE_PROFILES { \ - {1, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ - {8, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ - {64, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ - {128, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ - {256, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ -} - -#define NET_DIM_RX_CQE_PROFILES { \ - {2, 256}, \ - {8, 128}, \ - {16, 64}, \ - {32, 64}, \ - {64, 64} \ -} - -#define NET_DIM_TX_EQE_PROFILES { \ - {1, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \ - {8, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \ - {32, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \ - {64, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \ - {128, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE} \ -} - -#define NET_DIM_TX_CQE_PROFILES { \ - {5, 128}, \ - {8, 64}, \ - {16, 32}, \ - {32, 32}, \ - {64, 32} \ -} - -static const struct net_dim_cq_moder -rx_profile[NET_DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = { - NET_DIM_RX_EQE_PROFILES, - NET_DIM_RX_CQE_PROFILES, -}; - -static const struct net_dim_cq_moder -tx_profile[NET_DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = { - NET_DIM_TX_EQE_PROFILES, - NET_DIM_TX_CQE_PROFILES, -}; - -static inline struct net_dim_cq_moder -net_dim_get_rx_moderation(u8 cq_period_mode, int ix) -{ - struct net_dim_cq_moder cq_moder = rx_profile[cq_period_mode][ix]; - - cq_moder.cq_period_mode = cq_period_mode; - return cq_moder; -} - -static inline struct net_dim_cq_moder -net_dim_get_def_rx_moderation(u8 cq_period_mode) -{ - u8 profile_ix = cq_period_mode == NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE ? - NET_DIM_DEF_PROFILE_CQE : NET_DIM_DEF_PROFILE_EQE; - - return net_dim_get_rx_moderation(cq_period_mode, profile_ix); -} - -static inline struct net_dim_cq_moder -net_dim_get_tx_moderation(u8 cq_period_mode, int ix) -{ - struct net_dim_cq_moder cq_moder = tx_profile[cq_period_mode][ix]; - - cq_moder.cq_period_mode = cq_period_mode; - return cq_moder; -} - -static inline struct net_dim_cq_moder -net_dim_get_def_tx_moderation(u8 cq_period_mode) -{ - u8 profile_ix = cq_period_mode == NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE ? - NET_DIM_DEF_PROFILE_CQE : NET_DIM_DEF_PROFILE_EQE; - - return net_dim_get_tx_moderation(cq_period_mode, profile_ix); -} - -static inline bool net_dim_on_top(struct net_dim *dim) -{ - switch (dim->tune_state) { - case NET_DIM_PARKING_ON_TOP: - case NET_DIM_PARKING_TIRED: - return true; - case NET_DIM_GOING_RIGHT: - return (dim->steps_left > 1) && (dim->steps_right == 1); - default: /* NET_DIM_GOING_LEFT */ - return (dim->steps_right > 1) && (dim->steps_left == 1); - } -} - -static inline void net_dim_turn(struct net_dim *dim) -{ - switch (dim->tune_state) { - case NET_DIM_PARKING_ON_TOP: - case NET_DIM_PARKING_TIRED: - break; - case NET_DIM_GOING_RIGHT: - dim->tune_state = NET_DIM_GOING_LEFT; - dim->steps_left = 0; - break; - case NET_DIM_GOING_LEFT: - dim->tune_state = NET_DIM_GOING_RIGHT; - dim->steps_right = 0; - break; - } -} - -static inline int net_dim_step(struct net_dim *dim) -{ - if (dim->tired == (NET_DIM_PARAMS_NUM_PROFILES * 2)) - return NET_DIM_TOO_TIRED; - - switch (dim->tune_state) { - case NET_DIM_PARKING_ON_TOP: - case NET_DIM_PARKING_TIRED: - break; - case NET_DIM_GOING_RIGHT: - if (dim->profile_ix == (NET_DIM_PARAMS_NUM_PROFILES - 1)) - return NET_DIM_ON_EDGE; - dim->profile_ix++; - dim->steps_right++; - break; - case NET_DIM_GOING_LEFT: - if (dim->profile_ix == 0) - return NET_DIM_ON_EDGE; - dim->profile_ix--; - dim->steps_left++; - break; - } - - dim->tired++; - return NET_DIM_STEPPED; -} - -static inline void net_dim_park_on_top(struct net_dim *dim) -{ - dim->steps_right = 0; - dim->steps_left = 0; - dim->tired = 0; - dim->tune_state = NET_DIM_PARKING_ON_TOP; -} - -static inline void net_dim_park_tired(struct net_dim *dim) -{ - dim->steps_right = 0; - dim->steps_left = 0; - dim->tune_state = NET_DIM_PARKING_TIRED; -} - -static inline void net_dim_exit_parking(struct net_dim *dim) -{ - dim->tune_state = dim->profile_ix ? NET_DIM_GOING_LEFT : - NET_DIM_GOING_RIGHT; - net_dim_step(dim); -} - -#define IS_SIGNIFICANT_DIFF(val, ref) \ - (((100UL * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */ - -static inline int net_dim_stats_compare(struct net_dim_stats *curr, - struct net_dim_stats *prev) -{ - if (!prev->bpms) - return curr->bpms ? NET_DIM_STATS_BETTER : - NET_DIM_STATS_SAME; - - if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms)) - return (curr->bpms > prev->bpms) ? NET_DIM_STATS_BETTER : - NET_DIM_STATS_WORSE; - - if (!prev->ppms) - return curr->ppms ? NET_DIM_STATS_BETTER : - NET_DIM_STATS_SAME; - - if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms)) - return (curr->ppms > prev->ppms) ? NET_DIM_STATS_BETTER : - NET_DIM_STATS_WORSE; - - if (!prev->epms) - return NET_DIM_STATS_SAME; - - if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms)) - return (curr->epms < prev->epms) ? NET_DIM_STATS_BETTER : - NET_DIM_STATS_WORSE; - - return NET_DIM_STATS_SAME; -} - -static inline bool net_dim_decision(struct net_dim_stats *curr_stats, - struct net_dim *dim) -{ - int prev_state = dim->tune_state; - int prev_ix = dim->profile_ix; - int stats_res; - int step_res; - - switch (dim->tune_state) { - case NET_DIM_PARKING_ON_TOP: - stats_res = net_dim_stats_compare(curr_stats, &dim->prev_stats); - if (stats_res != NET_DIM_STATS_SAME) - net_dim_exit_parking(dim); - break; - - case NET_DIM_PARKING_TIRED: - dim->tired--; - if (!dim->tired) - net_dim_exit_parking(dim); - break; - - case NET_DIM_GOING_RIGHT: - case NET_DIM_GOING_LEFT: - stats_res = net_dim_stats_compare(curr_stats, &dim->prev_stats); - if (stats_res != NET_DIM_STATS_BETTER) - net_dim_turn(dim); - - if (net_dim_on_top(dim)) { - net_dim_park_on_top(dim); - break; - } - - step_res = net_dim_step(dim); - switch (step_res) { - case NET_DIM_ON_EDGE: - net_dim_park_on_top(dim); - break; - case NET_DIM_TOO_TIRED: - net_dim_park_tired(dim); - break; - } - - break; - } - - if ((prev_state != NET_DIM_PARKING_ON_TOP) || - (dim->tune_state != NET_DIM_PARKING_ON_TOP)) - dim->prev_stats = *curr_stats; - - return dim->profile_ix != prev_ix; -} - -static inline void net_dim_sample(u16 event_ctr, - u64 packets, - u64 bytes, - struct net_dim_sample *s) -{ - s->time = ktime_get(); - s->pkt_ctr = packets; - s->byte_ctr = bytes; - s->event_ctr = event_ctr; -} - -#define NET_DIM_NEVENTS 64 -#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1)) - -static inline void net_dim_calc_stats(struct net_dim_sample *start, - struct net_dim_sample *end, - struct net_dim_stats *curr_stats) -{ - /* u32 holds up to 71 minutes, should be enough */ - u32 delta_us = ktime_us_delta(end->time, start->time); - u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr); - u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr, - start->byte_ctr); - - if (!delta_us) - return; - - curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us); - curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us); - curr_stats->epms = DIV_ROUND_UP(NET_DIM_NEVENTS * USEC_PER_MSEC, - delta_us); -} - -static inline void net_dim(struct net_dim *dim, - struct net_dim_sample end_sample) -{ - struct net_dim_stats curr_stats; - u16 nevents; - - switch (dim->state) { - case NET_DIM_MEASURE_IN_PROGRESS: - nevents = BIT_GAP(BITS_PER_TYPE(u16), - end_sample.event_ctr, - dim->start_sample.event_ctr); - if (nevents < NET_DIM_NEVENTS) - break; - net_dim_calc_stats(&dim->start_sample, &end_sample, - &curr_stats); - if (net_dim_decision(&curr_stats, dim)) { - dim->state = NET_DIM_APPLY_NEW_PROFILE; - schedule_work(&dim->work); - break; - } - /* fall through */ - case NET_DIM_START_MEASURE: - net_dim_sample(end_sample.event_ctr, end_sample.pkt_ctr, end_sample.byte_ctr, - &dim->start_sample); - dim->state = NET_DIM_MEASURE_IN_PROGRESS; - break; - case NET_DIM_APPLY_NEW_PROFILE: - break; - } -} - -#endif /* NET_DIM_H */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index eeacebd7debb..88292953aa6f 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -4870,4 +4870,6 @@ do { \ #define PTYPE_HASH_SIZE (16) #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1) +extern struct net_device *blackhole_netdev; + #endif /* _LINUX_NETDEVICE_H */ diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 996bc247ef6e..049aeb40fa35 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -336,11 +336,6 @@ int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, int *len); #endif -/* Call this before modifying an existing packet: ensures it is - modifiable and linear to the point you care about (writable_len). - Returns true or false. */ -int skb_make_writable(struct sk_buff *skb, unsigned int writable_len); - struct flowi; struct nf_queue_entry; diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index f5e03809cdb2..12ad9b1853b4 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h @@ -2,7 +2,7 @@ /* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu> * Patrick Schaaf <bof@bof.de> * Martin Josefsson <gandalf@wlug.westbo.se> - * Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> + * Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@netfilter.org> */ #ifndef _IP_SET_H #define _IP_SET_H diff --git a/include/linux/netfilter/ipset/ip_set_counter.h b/include/linux/netfilter/ipset/ip_set_counter.h index 5477492c8374..3400958c07be 100644 --- a/include/linux/netfilter/ipset/ip_set_counter.h +++ b/include/linux/netfilter/ipset/ip_set_counter.h @@ -2,8 +2,7 @@ #ifndef _IP_SET_COUNTER_H #define _IP_SET_COUNTER_H -/* Copyright (C) 2015 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> - */ +/* Copyright (C) 2015 Jozsef Kadlecsik <kadlec@netfilter.org> */ #ifdef __KERNEL__ diff --git a/include/linux/netfilter/ipset/ip_set_skbinfo.h b/include/linux/netfilter/ipset/ip_set_skbinfo.h index aae081e085c6..3a2df02dbd55 100644 --- a/include/linux/netfilter/ipset/ip_set_skbinfo.h +++ b/include/linux/netfilter/ipset/ip_set_skbinfo.h @@ -2,8 +2,7 @@ #ifndef _IP_SET_SKBINFO_H #define _IP_SET_SKBINFO_H -/* Copyright (C) 2015 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> - */ +/* Copyright (C) 2015 Jozsef Kadlecsik <kadlec@netfilter.org> */ #ifdef __KERNEL__ diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h index 88926b4c75f0..2be60e379ecf 100644 --- a/include/linux/netfilter/ipset/ip_set_timeout.h +++ b/include/linux/netfilter/ipset/ip_set_timeout.h @@ -2,8 +2,7 @@ #ifndef _IP_SET_TIMEOUT_H #define _IP_SET_TIMEOUT_H -/* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> - */ +/* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@netfilter.org> */ #ifdef __KERNEL__ diff --git a/include/linux/netfilter/nf_conntrack_h323_asn1.h b/include/linux/netfilter/nf_conntrack_h323_asn1.h index 91d6275292a5..19df78341fb3 100644 --- a/include/linux/netfilter/nf_conntrack_h323_asn1.h +++ b/include/linux/netfilter/nf_conntrack_h323_asn1.h @@ -1,7 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /**************************************************************************** - * ip_conntrack_h323_asn1.h - BER and PER decoding library for H.323 - * conntrack/NAT module. + * BER and PER decoding library for H.323 conntrack/NAT module. * * Copyright (c) 2006 by Jing Min Zhao <zhaojingmin@users.sourceforge.net> * diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h index 12113e502656..7beb681e1ce5 100644 --- a/include/linux/netfilter_ipv6.h +++ b/include/linux/netfilter_ipv6.h @@ -8,6 +8,7 @@ #define __LINUX_IP6_NETFILTER_H #include <uapi/linux/netfilter_ipv6.h> +#include <net/tcp.h> /* Extra routing may needed on local out, as the QUEUE target never returns * control to the table. @@ -19,6 +20,7 @@ struct ip6_rt_info { }; struct nf_queue_entry; +struct nf_ct_bridge_frag_data; /* * Hook functions for ipv6 to allow xt_* modules to be built-in even @@ -34,11 +36,24 @@ struct nf_ipv6_ops { struct in6_addr *saddr); int (*route)(struct net *net, struct dst_entry **dst, struct flowi *fl, bool strict); + u32 (*cookie_init_sequence)(const struct ipv6hdr *iph, + const struct tcphdr *th, u16 *mssp); + int (*cookie_v6_check)(const struct ipv6hdr *iph, + const struct tcphdr *th, __u32 cookie); #endif void (*route_input)(struct sk_buff *skb); int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb, int (*output)(struct net *, struct sock *, struct sk_buff *)); int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry); +#if IS_MODULE(CONFIG_IPV6) + int (*br_defrag)(struct net *net, struct sk_buff *skb, u32 user); + int (*br_fragment)(struct net *net, struct sock *sk, + struct sk_buff *skb, + struct nf_ct_bridge_frag_data *data, + int (*output)(struct net *, struct sock *sk, + const struct nf_ct_bridge_frag_data *data, + struct sk_buff *)); +#endif }; #ifdef CONFIG_NETFILTER @@ -60,8 +75,10 @@ static inline int nf_ipv6_chk_addr(struct net *net, const struct in6_addr *addr, return 1; return v6_ops->chk_addr(net, addr, dev, strict); -#else +#elif IS_BUILTIN(CONFIG_IPV6) return ipv6_chk_addr(net, addr, dev, strict); +#else + return 1; #endif } @@ -86,6 +103,52 @@ static inline int nf_ip6_route(struct net *net, struct dst_entry **dst, #endif } +#include <net/netfilter/ipv6/nf_defrag_ipv6.h> + +static inline int nf_ipv6_br_defrag(struct net *net, struct sk_buff *skb, + u32 user) +{ +#if IS_MODULE(CONFIG_IPV6) + const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); + + if (!v6_ops) + return 1; + + return v6_ops->br_defrag(net, skb, user); +#elif IS_BUILTIN(CONFIG_IPV6) + return nf_ct_frag6_gather(net, skb, user); +#else + return 1; +#endif +} + +int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, + struct nf_ct_bridge_frag_data *data, + int (*output)(struct net *, struct sock *sk, + const struct nf_ct_bridge_frag_data *data, + struct sk_buff *)); + +static inline int nf_br_ip6_fragment(struct net *net, struct sock *sk, + struct sk_buff *skb, + struct nf_ct_bridge_frag_data *data, + int (*output)(struct net *, struct sock *sk, + const struct nf_ct_bridge_frag_data *data, + struct sk_buff *)) +{ +#if IS_MODULE(CONFIG_IPV6) + const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); + + if (!v6_ops) + return 1; + + return v6_ops->br_fragment(net, sk, skb, data, output); +#elif IS_BUILTIN(CONFIG_IPV6) + return br_ip6_fragment(net, sk, skb, data, output); +#else + return 1; +#endif +} + int ip6_route_me_harder(struct net *net, struct sk_buff *skb); static inline int nf_ip6_route_me_harder(struct net *net, struct sk_buff *skb) @@ -97,9 +160,44 @@ static inline int nf_ip6_route_me_harder(struct net *net, struct sk_buff *skb) return -EHOSTUNREACH; return v6_ops->route_me_harder(net, skb); -#else +#elif IS_BUILTIN(CONFIG_IPV6) return ip6_route_me_harder(net, skb); +#else + return -EHOSTUNREACH; +#endif +} + +static inline u32 nf_ipv6_cookie_init_sequence(const struct ipv6hdr *iph, + const struct tcphdr *th, + u16 *mssp) +{ +#if IS_ENABLED(CONFIG_SYN_COOKIES) +#if IS_MODULE(CONFIG_IPV6) + const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); + + if (v6_ops) + return v6_ops->cookie_init_sequence(iph, th, mssp); +#elif IS_BUILTIN(CONFIG_IPV6) + return __cookie_v6_init_sequence(iph, th, mssp); +#endif +#endif + return 0; +} + +static inline int nf_cookie_v6_check(const struct ipv6hdr *iph, + const struct tcphdr *th, __u32 cookie) +{ +#if IS_ENABLED(CONFIG_SYN_COOKIES) +#if IS_MODULE(CONFIG_IPV6) + const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); + + if (v6_ops) + return v6_ops->cookie_v6_check(iph, th, cookie); +#elif IS_BUILTIN(CONFIG_IPV6) + return __cookie_v6_check(iph, th, cookie); +#endif #endif + return 0; } __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 593d1b9c33a8..205fa7b1f07a 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -192,7 +192,14 @@ struct netlink_callback { bool strict_check; u16 answer_flags; unsigned int prev_seq, seq; - long args[6]; + union { + u8 ctx[48]; + + /* args is deprecated. Cast a struct over ctx instead + * for proper type safety. + */ + long args[6]; + }; }; struct netlink_notify { diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 22494d170619..fd59904a282c 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -660,6 +660,7 @@ enum pnfs_update_layout_reason { PNFS_UPDATE_LAYOUT_BLOCKED, PNFS_UPDATE_LAYOUT_INVALID_OPEN, PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET, + PNFS_UPDATE_LAYOUT_EXIT, }; #define NFS4_OP_MAP_NUM_LONGS \ diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index d363d5765cdf..0a11712a80e3 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -223,6 +223,8 @@ struct nfs4_copy_state { #define NFS_INO_INVALID_MTIME BIT(10) /* cached mtime is invalid */ #define NFS_INO_INVALID_SIZE BIT(11) /* cached size is invalid */ #define NFS_INO_INVALID_OTHER BIT(12) /* other attrs are invalid */ +#define NFS_INO_DATA_INVAL_DEFER \ + BIT(13) /* Deferred cache invalidation */ #define NFS_INO_INVALID_ATTR (NFS_INO_INVALID_CHANGE \ | NFS_INO_INVALID_CTIME \ diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 1e78032a174b..a87fe854f008 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -58,6 +58,7 @@ struct nfs_client { struct nfs_subversion * cl_nfs_mod; /* pointer to nfs version module */ u32 cl_minorversion;/* NFSv4 minorversion */ + unsigned int cl_nconnect; /* Number of connections */ const char * cl_principal; /* used for machine cred */ #if IS_ENABLED(CONFIG_NFS_V4) diff --git a/include/linux/node.h b/include/linux/node.h index 1a557c589ecb..4866f32a02d8 100644 --- a/include/linux/node.h +++ b/include/linux/node.h @@ -137,10 +137,7 @@ static inline int register_one_node(int nid) extern void unregister_one_node(int nid); extern int register_cpu_under_node(unsigned int cpu, unsigned int nid); extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid); -extern int register_mem_sect_under_node(struct memory_block *mem_blk, - void *arg); -extern int unregister_mem_sect_under_nodes(struct memory_block *mem_blk, - unsigned long phys_index); +extern void unregister_memory_block_under_nodes(struct memory_block *mem_blk); extern int register_memory_node_under_compute_node(unsigned int mem_nid, unsigned int cpu_nid, @@ -171,15 +168,8 @@ static inline int unregister_cpu_under_node(unsigned int cpu, unsigned int nid) { return 0; } -static inline int register_mem_sect_under_node(struct memory_block *mem_blk, - void *arg) +static inline void unregister_memory_block_under_nodes(struct memory_block *mem_blk) { - return 0; -} -static inline int unregister_mem_sect_under_nodes(struct memory_block *mem_blk, - unsigned long phys_index) -{ - return 0; } static inline void register_hugetlbfs_with_node(node_registration_func_t reg, diff --git a/include/linux/ntb.h b/include/linux/ntb.h index 56a92e3ae3ae..8c13538aeffe 100644 --- a/include/linux/ntb.h +++ b/include/linux/ntb.h @@ -58,9 +58,11 @@ #include <linux/completion.h> #include <linux/device.h> +#include <linux/interrupt.h> struct ntb_client; struct ntb_dev; +struct ntb_msi; struct pci_dev; /** @@ -205,7 +207,7 @@ static inline int ntb_ctx_ops_is_valid(const struct ntb_ctx_ops *ops) } /** - * struct ntb_ctx_ops - ntb device operations + * struct ntb_dev_ops - ntb device operations * @port_number: See ntb_port_number(). * @peer_port_count: See ntb_peer_port_count(). * @peer_port_number: See ntb_peer_port_number(). @@ -404,7 +406,7 @@ struct ntb_client { #define drv_ntb_client(__drv) container_of((__drv), struct ntb_client, drv) /** - * struct ntb_device - ntb device + * struct ntb_dev - ntb device * @dev: Linux device object. * @pdev: PCI device entry of the ntb. * @topo: Detected topology of the ntb. @@ -426,6 +428,10 @@ struct ntb_dev { spinlock_t ctx_lock; /* block unregister until device is fully released */ struct completion released; + +#ifdef CONFIG_NTB_MSI + struct ntb_msi *msi; +#endif }; #define dev_ntb(__dev) container_of((__dev), struct ntb_dev, dev) @@ -616,7 +622,6 @@ static inline int ntb_port_number(struct ntb_dev *ntb) return ntb->ops->port_number(ntb); } - /** * ntb_peer_port_count() - get the number of peer device ports * @ntb: NTB device context. @@ -654,6 +659,58 @@ static inline int ntb_peer_port_number(struct ntb_dev *ntb, int pidx) } /** + * ntb_logical_port_number() - get the logical port number of the local port + * @ntb: NTB device context. + * + * The Logical Port Number is defined to be a unique number for each + * port starting from zero through to the number of ports minus one. + * This is in contrast to the Port Number where each port can be assigned + * any unique physical number by the hardware. + * + * The logical port number is useful for calculating the resource indexes + * used by peers. + * + * Return: the logical port number or negative value indicating an error + */ +static inline int ntb_logical_port_number(struct ntb_dev *ntb) +{ + int lport = ntb_port_number(ntb); + int pidx; + + if (lport < 0) + return lport; + + for (pidx = 0; pidx < ntb_peer_port_count(ntb); pidx++) + if (lport <= ntb_peer_port_number(ntb, pidx)) + return pidx; + + return pidx; +} + +/** + * ntb_peer_logical_port_number() - get the logical peer port by given index + * @ntb: NTB device context. + * @pidx: Peer port index. + * + * The Logical Port Number is defined to be a unique number for each + * port starting from zero through to the number of ports minus one. + * This is in contrast to the Port Number where each port can be assigned + * any unique physical number by the hardware. + * + * The logical port number is useful for calculating the resource indexes + * used by peers. + * + * Return: the peer's logical port number or negative value indicating an error + */ +static inline int ntb_peer_logical_port_number(struct ntb_dev *ntb, int pidx) +{ + if (ntb_peer_port_number(ntb, pidx) < ntb_port_number(ntb)) + return pidx; + else + return pidx + 1; +} + +/** * ntb_peer_port_idx() - get the peer device port index by given port number * @ntb: NTB device context. * @port: Peer port number. @@ -1506,4 +1563,141 @@ static inline int ntb_peer_msg_write(struct ntb_dev *ntb, int pidx, int midx, return ntb->ops->peer_msg_write(ntb, pidx, midx, msg); } +/** + * ntb_peer_resource_idx() - get a resource index for a given peer idx + * @ntb: NTB device context. + * @pidx: Peer port index. + * + * When constructing a graph of peers, each remote peer must use a different + * resource index (mw, doorbell, etc) to communicate with each other + * peer. + * + * In a two peer system, this function should always return 0 such that + * resource 0 points to the remote peer on both ports. + * + * In a 5 peer system, this function will return the following matrix + * + * pidx \ port 0 1 2 3 4 + * 0 0 0 1 2 3 + * 1 0 1 1 2 3 + * 2 0 1 2 2 3 + * 3 0 1 2 3 3 + * + * For example, if this function is used to program peer's memory + * windows, port 0 will program MW 0 on all it's peers to point to itself. + * port 1 will program MW 0 in port 0 to point to itself and MW 1 on all + * other ports. etc. + * + * For the legacy two host case, ntb_port_number() and ntb_peer_port_number() + * both return zero and therefore this function will always return zero. + * So MW 0 on each host would be programmed to point to the other host. + * + * Return: the resource index to use for that peer. + */ +static inline int ntb_peer_resource_idx(struct ntb_dev *ntb, int pidx) +{ + int local_port, peer_port; + + if (pidx >= ntb_peer_port_count(ntb)) + return -EINVAL; + + local_port = ntb_logical_port_number(ntb); + peer_port = ntb_peer_logical_port_number(ntb, pidx); + + if (peer_port < local_port) + return local_port - 1; + else + return local_port; +} + +/** + * ntb_peer_highest_mw_idx() - get a memory window index for a given peer idx + * using the highest index memory windows first + * + * @ntb: NTB device context. + * @pidx: Peer port index. + * + * Like ntb_peer_resource_idx(), except it returns indexes starting with + * last memory window index. + * + * Return: the resource index to use for that peer. + */ +static inline int ntb_peer_highest_mw_idx(struct ntb_dev *ntb, int pidx) +{ + int ret; + + ret = ntb_peer_resource_idx(ntb, pidx); + if (ret < 0) + return ret; + + return ntb_mw_count(ntb, pidx) - ret - 1; +} + +struct ntb_msi_desc { + u32 addr_offset; + u32 data; +}; + +#ifdef CONFIG_NTB_MSI + +int ntb_msi_init(struct ntb_dev *ntb, void (*desc_changed)(void *ctx)); +int ntb_msi_setup_mws(struct ntb_dev *ntb); +void ntb_msi_clear_mws(struct ntb_dev *ntb); +int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, irq_handler_t handler, + irq_handler_t thread_fn, + const char *name, void *dev_id, + struct ntb_msi_desc *msi_desc); +void ntbm_msi_free_irq(struct ntb_dev *ntb, unsigned int irq, void *dev_id); +int ntb_msi_peer_trigger(struct ntb_dev *ntb, int peer, + struct ntb_msi_desc *desc); +int ntb_msi_peer_addr(struct ntb_dev *ntb, int peer, + struct ntb_msi_desc *desc, + phys_addr_t *msi_addr); + +#else /* not CONFIG_NTB_MSI */ + +static inline int ntb_msi_init(struct ntb_dev *ntb, + void (*desc_changed)(void *ctx)) +{ + return -EOPNOTSUPP; +} +static inline int ntb_msi_setup_mws(struct ntb_dev *ntb) +{ + return -EOPNOTSUPP; +} +static inline void ntb_msi_clear_mws(struct ntb_dev *ntb) {} +static inline int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, + irq_handler_t handler, + irq_handler_t thread_fn, + const char *name, void *dev_id, + struct ntb_msi_desc *msi_desc) +{ + return -EOPNOTSUPP; +} +static inline void ntbm_msi_free_irq(struct ntb_dev *ntb, unsigned int irq, + void *dev_id) {} +static inline int ntb_msi_peer_trigger(struct ntb_dev *ntb, int peer, + struct ntb_msi_desc *desc) +{ + return -EOPNOTSUPP; +} +static inline int ntb_msi_peer_addr(struct ntb_dev *ntb, int peer, + struct ntb_msi_desc *desc, + phys_addr_t *msi_addr) +{ + return -EOPNOTSUPP; + +} + +#endif /* CONFIG_NTB_MSI */ + +static inline int ntbm_msi_request_irq(struct ntb_dev *ntb, + irq_handler_t handler, + const char *name, void *dev_id, + struct ntb_msi_desc *msi_desc) +{ + return ntbm_msi_request_threaded_irq(ntb, handler, NULL, name, + dev_id, msi_desc); +} + #endif diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h index c48e96436f56..98d904961b33 100644 --- a/include/linux/nvme-fc-driver.h +++ b/include/linux/nvme-fc-driver.h @@ -791,6 +791,11 @@ struct nvmet_fc_target_port { * nvmefc_tgt_fcp_req. * Entrypoint is Optional. * + * @discovery_event: Called by the transport to generate an RSCN + * change notifications to NVME initiators. The RSCN notifications + * should cause the initiator to rescan the discovery controller + * on the targetport. + * * @max_hw_queues: indicates the maximum number of hw queues the LLDD * supports for cpu affinitization. * Value is Mandatory. Must be at least 1. @@ -832,6 +837,7 @@ struct nvmet_fc_target_template { struct nvmefc_tgt_fcp_req *fcpreq); void (*defer_rcv)(struct nvmet_fc_target_port *tgtport, struct nvmefc_tgt_fcp_req *fcpreq); + void (*discovery_event)(struct nvmet_fc_target_port *tgtport); u32 max_hw_queues; u16 max_sgl_segments; diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 8028adacaff3..01aa6a6c241d 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -315,7 +315,7 @@ struct nvme_id_ns { __u8 nmic; __u8 rescap; __u8 fpi; - __u8 rsvd33; + __u8 dlfeat; __le16 nawun; __le16 nawupf; __le16 nacwu; @@ -324,11 +324,17 @@ struct nvme_id_ns { __le16 nabspf; __le16 noiob; __u8 nvmcap[16]; - __u8 rsvd64[28]; + __le16 npwg; + __le16 npwa; + __le16 npdg; + __le16 npda; + __le16 nows; + __u8 rsvd74[18]; __le32 anagrpid; __u8 rsvd96[3]; __u8 nsattr; - __u8 rsvd100[4]; + __le16 nvmsetid; + __le16 endgid; __u8 nguid[16]; __u8 eui64[8]; struct nvme_lbaf lbaf[16]; @@ -562,6 +568,22 @@ enum nvme_opcode { nvme_cmd_resv_release = 0x15, }; +#define nvme_opcode_name(opcode) { opcode, #opcode } +#define show_nvm_opcode_name(val) \ + __print_symbolic(val, \ + nvme_opcode_name(nvme_cmd_flush), \ + nvme_opcode_name(nvme_cmd_write), \ + nvme_opcode_name(nvme_cmd_read), \ + nvme_opcode_name(nvme_cmd_write_uncor), \ + nvme_opcode_name(nvme_cmd_compare), \ + nvme_opcode_name(nvme_cmd_write_zeroes), \ + nvme_opcode_name(nvme_cmd_dsm), \ + nvme_opcode_name(nvme_cmd_resv_register), \ + nvme_opcode_name(nvme_cmd_resv_report), \ + nvme_opcode_name(nvme_cmd_resv_acquire), \ + nvme_opcode_name(nvme_cmd_resv_release)) + + /* * Descriptor subtype - lower 4 bits of nvme_(keyed_)sgl_desc identifier * @@ -794,6 +816,32 @@ enum nvme_admin_opcode { nvme_admin_sanitize_nvm = 0x84, }; +#define nvme_admin_opcode_name(opcode) { opcode, #opcode } +#define show_admin_opcode_name(val) \ + __print_symbolic(val, \ + nvme_admin_opcode_name(nvme_admin_delete_sq), \ + nvme_admin_opcode_name(nvme_admin_create_sq), \ + nvme_admin_opcode_name(nvme_admin_get_log_page), \ + nvme_admin_opcode_name(nvme_admin_delete_cq), \ + nvme_admin_opcode_name(nvme_admin_create_cq), \ + nvme_admin_opcode_name(nvme_admin_identify), \ + nvme_admin_opcode_name(nvme_admin_abort_cmd), \ + nvme_admin_opcode_name(nvme_admin_set_features), \ + nvme_admin_opcode_name(nvme_admin_get_features), \ + nvme_admin_opcode_name(nvme_admin_async_event), \ + nvme_admin_opcode_name(nvme_admin_ns_mgmt), \ + nvme_admin_opcode_name(nvme_admin_activate_fw), \ + nvme_admin_opcode_name(nvme_admin_download_fw), \ + nvme_admin_opcode_name(nvme_admin_ns_attach), \ + nvme_admin_opcode_name(nvme_admin_keep_alive), \ + nvme_admin_opcode_name(nvme_admin_directive_send), \ + nvme_admin_opcode_name(nvme_admin_directive_recv), \ + nvme_admin_opcode_name(nvme_admin_dbbuf), \ + nvme_admin_opcode_name(nvme_admin_format_nvm), \ + nvme_admin_opcode_name(nvme_admin_security_send), \ + nvme_admin_opcode_name(nvme_admin_security_recv), \ + nvme_admin_opcode_name(nvme_admin_sanitize_nvm)) + enum { NVME_QUEUE_PHYS_CONTIG = (1 << 0), NVME_CQ_IRQ_ENABLED = (1 << 1), @@ -1008,6 +1056,23 @@ enum nvmf_capsule_command { nvme_fabrics_type_property_get = 0x04, }; +#define nvme_fabrics_type_name(type) { type, #type } +#define show_fabrics_type_name(type) \ + __print_symbolic(type, \ + nvme_fabrics_type_name(nvme_fabrics_type_property_set), \ + nvme_fabrics_type_name(nvme_fabrics_type_connect), \ + nvme_fabrics_type_name(nvme_fabrics_type_property_get)) + +/* + * If not fabrics command, fctype will be ignored. + */ +#define show_opcode_name(qid, opcode, fctype) \ + ((opcode) == nvme_fabrics_command ? \ + show_fabrics_type_name(fctype) : \ + ((qid) ? \ + show_nvm_opcode_name(opcode) : \ + show_admin_opcode_name(opcode))) + struct nvmf_common_command { __u8 opcode; __u8 resv1; @@ -1165,6 +1230,11 @@ struct nvme_command { }; }; +static inline bool nvme_is_fabrics(struct nvme_command *cmd) +{ + return cmd->common.opcode == nvme_fabrics_command; +} + struct nvme_error_slot { __le64 error_count; __le16 sqid; @@ -1186,7 +1256,7 @@ static inline bool nvme_is_write(struct nvme_command *cmd) * * Why can't we simply have a Fabrics In and Fabrics out command? */ - if (unlikely(cmd->common.opcode == nvme_fabrics_command)) + if (unlikely(nvme_is_fabrics(cmd))) return cmd->fabrics.fctype & 1; return cmd->common.opcode & 1; } diff --git a/include/linux/of.h b/include/linux/of.h index 0cf857012f11..844f89e1b039 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -1164,7 +1164,7 @@ static inline int of_property_read_string_index(const struct device_node *np, } /** - * of_property_read_bool - Findfrom a property + * of_property_read_bool - Find a property * @np: device node from which the property value is to be read. * @propname: name of the property to be searched. * diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h index a713e5d156d8..acf820e88952 100644 --- a/include/linux/of_fdt.h +++ b/include/linux/of_fdt.h @@ -23,15 +23,6 @@ struct device_node; /* For scanning an arbitrary device-tree at any time */ -extern char *of_fdt_get_string(const void *blob, u32 offset); -extern void *of_fdt_get_property(const void *blob, - unsigned long node, - const char *name, - int *size); -extern bool of_fdt_is_big_endian(const void *blob, - unsigned long node); -extern int of_fdt_match(const void *blob, unsigned long node, - const char *const *compat); extern void *of_fdt_unflatten_tree(const unsigned long *blob, struct device_node *dad, struct device_node **mynodes); @@ -64,9 +55,7 @@ extern int of_get_flat_dt_subnode_by_name(unsigned long node, extern const void *of_get_flat_dt_prop(unsigned long node, const char *name, int *size); extern int of_flat_dt_is_compatible(unsigned long node, const char *name); -extern int of_flat_dt_match(unsigned long node, const char *const *matches); extern unsigned long of_get_flat_dt_root(void); -extern int of_get_flat_dt_size(void); extern uint32_t of_get_flat_dt_phandle(unsigned long node); extern int early_init_dt_scan_chosen(unsigned long node, const char *uname, diff --git a/include/linux/olpc-ec.h b/include/linux/olpc-ec.h index 79bdc6328c52..c4602364e909 100644 --- a/include/linux/olpc-ec.h +++ b/include/linux/olpc-ec.h @@ -2,6 +2,8 @@ #ifndef _LINUX_OLPC_EC_H #define _LINUX_OLPC_EC_H +#include <linux/bits.h> + /* XO-1 EC commands */ #define EC_FIRMWARE_REV 0x08 #define EC_WRITE_SCI_MASK 0x1b @@ -16,28 +18,57 @@ #define EC_SCI_QUERY 0x84 #define EC_EXT_SCI_QUERY 0x85 +/* SCI source values */ +#define EC_SCI_SRC_GAME BIT(0) +#define EC_SCI_SRC_BATTERY BIT(1) +#define EC_SCI_SRC_BATSOC BIT(2) +#define EC_SCI_SRC_BATERR BIT(3) +#define EC_SCI_SRC_EBOOK BIT(4) /* XO-1 only */ +#define EC_SCI_SRC_WLAN BIT(5) /* XO-1 only */ +#define EC_SCI_SRC_ACPWR BIT(6) +#define EC_SCI_SRC_BATCRIT BIT(7) +#define EC_SCI_SRC_GPWAKE BIT(8) /* XO-1.5 only */ +#define EC_SCI_SRC_ALL GENMASK(8, 0) + struct platform_device; struct olpc_ec_driver { - int (*probe)(struct platform_device *); int (*suspend)(struct platform_device *); int (*resume)(struct platform_device *); int (*ec_cmd)(u8, u8 *, size_t, u8 *, size_t, void *); + + bool wakeup_available; }; -#ifdef CONFIG_OLPC +#ifdef CONFIG_OLPC_EC extern void olpc_ec_driver_register(struct olpc_ec_driver *drv, void *arg); extern int olpc_ec_cmd(u8 cmd, u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen); +extern void olpc_ec_wakeup_set(u16 value); +extern void olpc_ec_wakeup_clear(u16 value); + +extern int olpc_ec_mask_write(u16 bits); +extern int olpc_ec_sci_query(u16 *sci_value); + +extern bool olpc_ec_wakeup_available(void); + #else static inline int olpc_ec_cmd(u8 cmd, u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen) { return -ENODEV; } -#endif /* CONFIG_OLPC */ +static inline void olpc_ec_wakeup_set(u16 value) { } +static inline void olpc_ec_wakeup_clear(u16 value) { } + +static inline bool olpc_ec_wakeup_available(void) +{ + return false; +} + +#endif /* CONFIG_OLPC_EC */ #endif /* _LINUX_OLPC_EC_H */ diff --git a/include/linux/omap-mailbox.h b/include/linux/omap-mailbox.h index 6dbcd2da0332..8aa984ec1f38 100644 --- a/include/linux/omap-mailbox.h +++ b/include/linux/omap-mailbox.h @@ -6,7 +6,9 @@ #ifndef OMAP_MAILBOX_H #define OMAP_MAILBOX_H -typedef u32 mbox_msg_t; +typedef uintptr_t mbox_msg_t; + +#define omap_mbox_message(data) (u32)(mbox_msg_t)(data) typedef int __bitwise omap_mbox_irq_t; #define IRQ_TX ((__force omap_mbox_irq_t) 1) diff --git a/include/linux/oom.h b/include/linux/oom.h index d07992009265..c696c265f019 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -108,7 +108,6 @@ static inline vm_fault_t check_stable_address_space(struct mm_struct *mm) bool __oom_reap_task_mm(struct mm_struct *mm); extern unsigned long oom_badness(struct task_struct *p, - struct mem_cgroup *memcg, const nodemask_t *nodemask, unsigned long totalpages); extern bool out_of_memory(struct oom_control *oc); diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h index 1dda31825ec4..71283739ffd2 100644 --- a/include/linux/page-flags-layout.h +++ b/include/linux/page-flags-layout.h @@ -32,6 +32,7 @@ #endif /* CONFIG_SPARSEMEM */ +#ifndef BUILD_VDSO32_64 /* * page->flags layout: * @@ -76,20 +77,22 @@ #define LAST_CPUPID_SHIFT 0 #endif -#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS +#ifdef CONFIG_KASAN_SW_TAGS +#define KASAN_TAG_WIDTH 8 +#else +#define KASAN_TAG_WIDTH 0 +#endif + +#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT+KASAN_TAG_WIDTH \ + <= BITS_PER_LONG - NR_PAGEFLAGS #define LAST_CPUPID_WIDTH LAST_CPUPID_SHIFT #else #define LAST_CPUPID_WIDTH 0 #endif -#ifdef CONFIG_KASAN_SW_TAGS -#define KASAN_TAG_WIDTH 8 #if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH+LAST_CPUPID_WIDTH+KASAN_TAG_WIDTH \ > BITS_PER_LONG - NR_PAGEFLAGS -#error "KASAN: not enough bits in page flags for tag" -#endif -#else -#define KASAN_TAG_WIDTH 0 +#error "Not enough bits in page flags" #endif /* @@ -104,4 +107,5 @@ #define LAST_CPUPID_NOT_IN_PAGE_FLAGS #endif +#endif #endif /* _LINUX_PAGE_FLAGS_LAYOUT */ diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 9f8712a4b1a5..f91cb8898ff0 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -152,6 +152,8 @@ enum pageflags { PG_savepinned = PG_dirty, /* Has a grant mapping of another (foreign) domain's page. */ PG_foreign = PG_owner_priv_1, + /* Remapped by swiotlb-xen. */ + PG_xen_remapped = PG_owner_priv_1, /* SLOB */ PG_slob_free = PG_private, @@ -329,6 +331,8 @@ PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND) TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND) PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND); PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND); +PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND) + TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND) PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) @@ -703,6 +707,7 @@ PAGEFLAG_FALSE(DoubleMap) #define PG_offline 0x00000100 #define PG_kmemcg 0x00000200 #define PG_table 0x00000400 +#define PG_guard 0x00000800 #define PageType(page, flag) \ ((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE) @@ -754,6 +759,11 @@ PAGE_TYPE_OPS(Kmemcg, kmemcg) */ PAGE_TYPE_OPS(Table, table) +/* + * Marks guardpages used with debug_pagealloc. + */ +PAGE_TYPE_OPS(Guard, guard) + extern bool is_free_buddy_page(struct page *page); __PAGEFLAG(Isolated, isolated, PF_ANY); diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h index 280ae96dc4c3..1099c2fee20f 100644 --- a/include/linux/page-isolation.h +++ b/include/linux/page-isolation.h @@ -50,7 +50,7 @@ start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE. * target range is [start_pfn, end_pfn) */ -int +void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, unsigned migratetype); diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h index f84f167ec04c..09592951725c 100644 --- a/include/linux/page_ext.h +++ b/include/linux/page_ext.h @@ -17,7 +17,6 @@ struct page_ext_operations { #ifdef CONFIG_PAGE_EXTENSION enum page_ext_flags { - PAGE_EXT_DEBUG_GUARD, PAGE_EXT_OWNER, #if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT) PAGE_EXT_YOUNG, diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index fe0b29bf2df7..c7552459a15f 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -383,8 +383,7 @@ extern int read_cache_pages(struct address_space *mapping, static inline struct page *read_mapping_page(struct address_space *mapping, pgoff_t index, void *data) { - filler_t *filler = (filler_t *)mapping->a_ops->readpage; - return read_cache_page(mapping, index, filler, data); + return read_cache_page(mapping, index, NULL, data); } /* @@ -452,6 +451,9 @@ extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm, unsigned int flags); extern void unlock_page(struct page *page); +/* + * Return true if the page was successfully locked + */ static inline int trylock_page(struct page *page) { page = compound_head(page); diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h index 8082b612f561..62b7fdcc661c 100644 --- a/include/linux/pci-acpi.h +++ b/include/linux/pci-acpi.h @@ -107,9 +107,10 @@ static inline void acpiphp_check_host_bridge(struct acpi_device *adev) { } #endif extern const guid_t pci_acpi_dsm_guid; -#define DEVICE_LABEL_DSM 0x07 -#define RESET_DELAY_DSM 0x08 -#define FUNCTION_DELAY_DSM 0x09 +#define IGNORE_PCI_BOOT_CONFIG_DSM 0x05 +#define DEVICE_LABEL_DSM 0x07 +#define RESET_DELAY_DSM 0x08 +#define FUNCTION_DELAY_DSM 0x09 #else /* CONFIG_ACPI */ static inline void acpi_pci_add_bus(struct pci_bus *bus) { } diff --git a/include/linux/pci-aspm.h b/include/linux/pci-aspm.h index df28af5cef21..67064145d76e 100644 --- a/include/linux/pci-aspm.h +++ b/include/linux/pci-aspm.h @@ -24,11 +24,12 @@ #define PCIE_LINK_STATE_CLKPM 4 #ifdef CONFIG_PCIEASPM -void pci_disable_link_state(struct pci_dev *pdev, int state); -void pci_disable_link_state_locked(struct pci_dev *pdev, int state); +int pci_disable_link_state(struct pci_dev *pdev, int state); +int pci_disable_link_state_locked(struct pci_dev *pdev, int state); void pcie_no_aspm(void); #else -static inline void pci_disable_link_state(struct pci_dev *pdev, int state) { } +static inline int pci_disable_link_state(struct pci_dev *pdev, int state) +{ return 0; } static inline void pcie_no_aspm(void) { } #endif diff --git a/include/linux/pci.h b/include/linux/pci.h index dd436da7eccc..9e700d9f9f28 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -151,6 +151,8 @@ static inline const char *pci_power_name(pci_power_t state) #define PCI_PM_BUS_WAIT 50 /** + * typedef pci_channel_state_t + * * The pci_channel state describes connectivity between the CPU and * the PCI device. If some PCI bus between here and the PCI device * has crashed or locked up, this info is reflected here. @@ -258,6 +260,7 @@ enum pci_bus_speed { PCIE_SPEED_5_0GT = 0x15, PCIE_SPEED_8_0GT = 0x16, PCIE_SPEED_16_0GT = 0x17, + PCIE_SPEED_32_0GT = 0x18, PCI_SPEED_UNKNOWN = 0xff, }; @@ -383,7 +386,7 @@ struct pci_dev { unsigned int is_busmaster:1; /* Is busmaster */ unsigned int no_msi:1; /* May not use MSI */ - unsigned int no_64bit_msi:1; /* May only use 32-bit MSIs */ + unsigned int no_64bit_msi:1; /* May only use 32-bit MSIs */ unsigned int block_cfg_access:1; /* Config space access blocked */ unsigned int broken_parity_status:1; /* Generates false positive parity */ unsigned int irq_reroute_variant:2; /* Needs IRQ rerouting variant */ @@ -506,6 +509,8 @@ struct pci_host_bridge { unsigned int native_shpc_hotplug:1; /* OS may use SHPC hotplug */ unsigned int native_pme:1; /* OS may use PCIe PME */ unsigned int native_ltr:1; /* OS may use PCIe LTR */ + unsigned int preserve_config:1; /* Preserve FW resource setup */ + /* Resource alignment requirements */ resource_size_t (*align_resource)(struct pci_dev *dev, const struct resource *res, @@ -776,6 +781,50 @@ struct pci_error_handlers { struct module; + +/** + * struct pci_driver - PCI driver structure + * @node: List of driver structures. + * @name: Driver name. + * @id_table: Pointer to table of device IDs the driver is + * interested in. Most drivers should export this + * table using MODULE_DEVICE_TABLE(pci,...). + * @probe: This probing function gets called (during execution + * of pci_register_driver() for already existing + * devices or later if a new device gets inserted) for + * all PCI devices which match the ID table and are not + * "owned" by the other drivers yet. This function gets + * passed a "struct pci_dev \*" for each device whose + * entry in the ID table matches the device. The probe + * function returns zero when the driver chooses to + * take "ownership" of the device or an error code + * (negative number) otherwise. + * The probe function always gets called from process + * context, so it can sleep. + * @remove: The remove() function gets called whenever a device + * being handled by this driver is removed (either during + * deregistration of the driver or when it's manually + * pulled out of a hot-pluggable slot). + * The remove function always gets called from process + * context, so it can sleep. + * @suspend: Put device into low power state. + * @suspend_late: Put device into low power state. + * @resume_early: Wake device from low power state. + * @resume: Wake device from low power state. + * (Please see Documentation/power/pci.rst for descriptions + * of PCI Power Management and the related functions.) + * @shutdown: Hook into reboot_notifier_list (kernel/sys.c). + * Intended to stop any idling DMA operations. + * Useful for enabling wake-on-lan (NIC) or changing + * the power state of a device before reboot. + * e.g. drivers/net/e100.c. + * @sriov_configure: Optional driver callback to allow configuration of + * number of VFs to enable via sysfs "sriov_numvfs" file. + * @err_handler: See Documentation/PCI/pci-error-recovery.rst + * @groups: Sysfs attribute groups. + * @driver: Driver model structure. + * @dynids: List of dynamically added device IDs. + */ struct pci_driver { struct list_head node; const char *name; @@ -1363,6 +1412,15 @@ int pci_set_vga_state(struct pci_dev *pdev, bool decode, #define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */ #define PCI_IRQ_MSIX (1 << 2) /* Allow MSI-X interrupts */ #define PCI_IRQ_AFFINITY (1 << 3) /* Auto-assign affinity */ + +/* + * Virtual interrupts allow for more interrupts to be allocated + * than the device has interrupts for. These are not programmed + * into the device's MSI-X table and must be handled by some + * other driver means. + */ +#define PCI_IRQ_VIRTUAL (1 << 4) + #define PCI_IRQ_ALL_TYPES \ (PCI_IRQ_LEGACY | PCI_IRQ_MSI | PCI_IRQ_MSIX) @@ -2207,7 +2265,7 @@ static inline u8 pci_vpd_srdt_tag(const u8 *srdt) /** * pci_vpd_info_field_size - Extracts the information field length - * @lrdt: Pointer to the beginning of an information field header + * @info_field: Pointer to the beginning of an information field header * * Returns the extracted information field length. */ diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 70e86148cb1e..c842735a4f45 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -1112,7 +1112,7 @@ #define PCI_VENDOR_ID_AL 0x10b9 #define PCI_DEVICE_ID_AL_M1533 0x1533 -#define PCI_DEVICE_ID_AL_M1535 0x1535 +#define PCI_DEVICE_ID_AL_M1535 0x1535 #define PCI_DEVICE_ID_AL_M1541 0x1541 #define PCI_DEVICE_ID_AL_M1563 0x1563 #define PCI_DEVICE_ID_AL_M1621 0x1621 @@ -1336,6 +1336,7 @@ #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP78S_SMBUS 0x0752 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE 0x0759 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_SMBUS 0x07D8 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_320M 0x08A0 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP79_SMBUS 0x0AA2 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA 0x0D85 @@ -1752,7 +1753,7 @@ #define PCI_VENDOR_ID_STALLION 0x124d /* Allied Telesyn */ -#define PCI_VENDOR_ID_AT 0x1259 +#define PCI_VENDOR_ID_AT 0x1259 #define PCI_SUBDEVICE_ID_AT_2700FX 0x2701 #define PCI_SUBDEVICE_ID_AT_2701FX 0x2703 @@ -2366,6 +2367,7 @@ #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI 0xabce #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31 0xabcf +#define PCI_DEVICE_ID_SYNOPSYS_EDDA 0xedda #define PCI_VENDOR_ID_USR 0x16ec @@ -2550,7 +2552,7 @@ #define PCI_DEVICE_ID_KORENIX_JETCARDF2 0x1700 #define PCI_DEVICE_ID_KORENIX_JETCARDF3 0x17ff -#define PCI_VENDOR_ID_HUAWEI 0x19e5 +#define PCI_VENDOR_ID_HUAWEI 0x19e5 #define PCI_VENDOR_ID_NETRONOME 0x19ee #define PCI_DEVICE_ID_NETRONOME_NFP4000 0x4000 diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index b297cd1cd4f1..7aef0abc194a 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -75,14 +75,21 @@ enum { * operation using percpu_ref_switch_to_percpu(). If initialized * with this flag, the ref will stay in atomic mode until * percpu_ref_switch_to_percpu() is invoked on it. + * Implies ALLOW_REINIT. */ PERCPU_REF_INIT_ATOMIC = 1 << 0, /* * Start dead w/ ref == 0 in atomic mode. Must be revived with - * percpu_ref_reinit() before used. Implies INIT_ATOMIC. + * percpu_ref_reinit() before used. Implies INIT_ATOMIC and + * ALLOW_REINIT. */ PERCPU_REF_INIT_DEAD = 1 << 1, + + /* + * Allow switching from atomic mode to percpu mode. + */ + PERCPU_REF_ALLOW_REINIT = 1 << 2, }; struct percpu_ref { @@ -95,6 +102,7 @@ struct percpu_ref { percpu_ref_func_t *release; percpu_ref_func_t *confirm_switch; bool force_atomic:1; + bool allow_reinit:1; struct rcu_head rcu; }; diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h index 03cb4b6f842e..3998cdf9cd14 100644 --- a/include/linux/percpu-rwsem.h +++ b/include/linux/percpu-rwsem.h @@ -17,14 +17,18 @@ struct percpu_rw_semaphore { int readers_block; }; -#define DEFINE_STATIC_PERCPU_RWSEM(name) \ +#define __DEFINE_PERCPU_RWSEM(name, is_static) \ static DEFINE_PER_CPU(unsigned int, __percpu_rwsem_rc_##name); \ -static struct percpu_rw_semaphore name = { \ - .rss = __RCU_SYNC_INITIALIZER(name.rss, RCU_SCHED_SYNC), \ +is_static struct percpu_rw_semaphore name = { \ + .rss = __RCU_SYNC_INITIALIZER(name.rss), \ .read_count = &__percpu_rwsem_rc_##name, \ .rw_sem = __RWSEM_INITIALIZER(name.rw_sem), \ .writer = __RCUWAIT_INITIALIZER(name.writer), \ } +#define DEFINE_PERCPU_RWSEM(name) \ + __DEFINE_PERCPU_RWSEM(name, /* not static */) +#define DEFINE_STATIC_PERCPU_RWSEM(name) \ + __DEFINE_PERCPU_RWSEM(name, static) extern int __percpu_down_read(struct percpu_rw_semaphore *, int); extern void __percpu_up_read(struct percpu_rw_semaphore *); @@ -117,7 +121,7 @@ static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem, lock_release(&sem->rw_sem.dep_map, 1, ip); #ifdef CONFIG_RWSEM_SPIN_ON_OWNER if (!read) - sem->rw_sem.owner = RWSEM_OWNER_UNKNOWN; + atomic_long_set(&sem->rw_sem.owner, RWSEM_OWNER_UNKNOWN); #endif } @@ -127,7 +131,7 @@ static inline void percpu_rwsem_acquire(struct percpu_rw_semaphore *sem, lock_acquire(&sem->rw_sem.dep_map, 0, 1, read, 1, NULL, ip); #ifdef CONFIG_RWSEM_SPIN_ON_OWNER if (!read) - sem->rw_sem.owner = current; + atomic_long_set(&sem->rw_sem.owner, (long)current); #endif } diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index a9b0ee408fbd..71f525a35ac2 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -171,4 +171,6 @@ void armpmu_free_irq(int irq, int cpu); #endif /* CONFIG_ARM_PMU */ +#define ARMV8_SPE_PDEV_NAME "arm,spe-v1" + #endif /* __ARM_PMU_H__ */ diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 2bca72f3028b..e8ad3c590a23 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -256,6 +256,7 @@ struct pmu { struct module *module; struct device *dev; const struct attribute_group **attr_groups; + const struct attribute_group **attr_update; const char *name; int type; @@ -749,6 +750,11 @@ struct perf_event_context { int nr_stat; int nr_freq; int rotate_disable; + /* + * Set when nr_events != nr_active, except tolerant to events not + * necessary to be active due to scheduling constraints, such as cgroups. + */ + int rotate_necessary; refcount_t refcount; struct task_struct *task; @@ -1049,6 +1055,11 @@ static inline int in_software_context(struct perf_event *event) return event->ctx->pmu->task_ctx_nr == perf_sw_context; } +static inline int is_exclusive_pmu(struct pmu *pmu) +{ + return pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE; +} + extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64); diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h index 3c202a11a79e..2d9148221e9a 100644 --- a/include/linux/pfn_t.h +++ b/include/linux/pfn_t.h @@ -66,13 +66,6 @@ static inline phys_addr_t pfn_t_to_phys(pfn_t pfn) return PFN_PHYS(pfn_t_to_pfn(pfn)); } -static inline void *pfn_t_to_virt(pfn_t pfn) -{ - if (pfn_t_has_page(pfn) && !is_device_private_page(pfn_t_to_page(pfn))) - return __va(pfn_t_to_phys(pfn)); - return NULL; -} - static inline pfn_t page_to_pfn_t(struct page *page) { return pfn_to_pfn_t(page_to_pfn(page)); @@ -104,7 +97,7 @@ static inline pud_t pfn_t_pud(pfn_t pfn, pgprot_t pgprot) #endif #endif -#ifdef __HAVE_ARCH_PTE_DEVMAP +#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP static inline bool pfn_t_devmap(pfn_t pfn) { const u64 flags = PFN_DEV|PFN_MAP; @@ -122,7 +115,7 @@ pmd_t pmd_mkdevmap(pmd_t pmd); defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) pud_t pud_mkdevmap(pud_t pud); #endif -#endif /* __HAVE_ARCH_PTE_DEVMAP */ +#endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */ #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL static inline bool pfn_t_special(pfn_t pfn) diff --git a/include/linux/phy.h b/include/linux/phy.h index 6424586fe2d6..462b90b73f93 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -55,6 +55,9 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_ini #define PHY_10GBIT_FEC_FEATURES ((unsigned long *)&phy_10gbit_fec_features) #define PHY_10GBIT_FULL_FEATURES ((unsigned long *)&phy_10gbit_full_features) +extern const int phy_basic_ports_array[3]; +extern const int phy_fibre_port_array[1]; +extern const int phy_all_ports_features_array[7]; extern const int phy_10_100_features_array[4]; extern const int phy_basic_t1_features_array[2]; extern const int phy_gbit_features_array[2]; @@ -98,6 +101,7 @@ typedef enum { PHY_INTERFACE_MODE_XAUI, /* 10GBASE-KR, XFI, SFI - single lane 10G Serdes */ PHY_INTERFACE_MODE_10GKR, + PHY_INTERFACE_MODE_USXGMII, PHY_INTERFACE_MODE_MAX, } phy_interface_t; @@ -173,6 +177,8 @@ static inline const char *phy_modes(phy_interface_t interface) return "xaui"; case PHY_INTERFACE_MODE_10GKR: return "10gbase-kr"; + case PHY_INTERFACE_MODE_USXGMII: + return "usxgmii"; default: return "unknown"; } @@ -180,7 +186,6 @@ static inline const char *phy_modes(phy_interface_t interface) #define PHY_INIT_TIMEOUT 100000 -#define PHY_STATE_TIME 1 #define PHY_FORCE_TIMEOUT 10 #define PHY_MAX_ADDR 32 @@ -193,6 +198,8 @@ static inline const char *phy_modes(phy_interface_t interface) /* Or MII_ADDR_C45 into regnum for read/write on mii_bus to enable the 21 bit IEEE 802.3ae clause 45 addressing mode used by 10GIGE phy chips. */ #define MII_ADDR_C45 (1<<30) +#define MII_DEVADDR_C45_SHIFT 16 +#define MII_REGADDR_C45_MASK GENMASK(15, 0) struct device; struct phylink; @@ -290,12 +297,6 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr); * - irq or timer will set RUNNING if link comes back * - phy_stop moves to HALTED * - * FORCING: PHY is being configured with forced settings - * - if link is up, move to RUNNING - * - If link is down, we drop to the next highest setting, and - * retry (FORCING) after a timeout - * - phy_stop moves to HALTED - * * RUNNING: PHY is currently up, running, and possibly sending * and/or receiving packets * - irq or timer will set NOLINK if link goes down @@ -312,7 +313,6 @@ enum phy_state { PHY_UP, PHY_RUNNING, PHY_NOLINK, - PHY_FORCING, }; /** @@ -340,8 +340,6 @@ struct phy_c45_device_ids { * loopback_enabled: Set true if this phy has been loopbacked successfully. * state: state of the PHY for management purposes * dev_flags: Device-specific flags used by the PHY driver. - * link_timeout: The number of timer firings to wait before the - * giving up on the current attempt at acquiring a link * irq: IRQ number of the PHY's interrupt (-1 if none) * phy_timer: The timer for handling the state machine * attached_dev: The attached enet driver's device instance ptr @@ -409,8 +407,6 @@ struct phy_device { /* Energy efficient ethernet modes which should be prohibited */ u32 eee_broken_modes; - int link_timeout; - #ifdef CONFIG_LED_TRIGGER_PHY struct phy_led_trigger *phy_led_triggers; unsigned int phy_num_led_triggers; @@ -529,6 +525,9 @@ struct phy_driver { */ int (*did_interrupt)(struct phy_device *phydev); + /* Override default interrupt handling */ + int (*handle_interrupt)(struct phy_device *phydev); + /* Clears up any memory if needed */ void (*remove)(struct phy_device *phydev); @@ -1129,6 +1128,7 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner); int phy_drivers_register(struct phy_driver *new_driver, int n, struct module *owner); void phy_state_machine(struct work_struct *work); +void phy_queue_state_machine(struct phy_device *phydev, unsigned long jiffies); void phy_mac_interrupt(struct phy_device *phydev); void phy_start_machine(struct phy_device *phydev); void phy_stop_machine(struct phy_device *phydev); @@ -1139,6 +1139,7 @@ int phy_ethtool_ksettings_set(struct phy_device *phydev, const struct ethtool_link_ksettings *cmd); int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd); void phy_request_interrupt(struct phy_device *phydev); +void phy_free_interrupt(struct phy_device *phydev); void phy_print_status(struct phy_device *phydev); int phy_set_max_speed(struct phy_device *phydev, u32 max_speed); void phy_remove_link_mode(struct phy_device *phydev, u32 link_mode); diff --git a/include/linux/phylink.h b/include/linux/phylink.h index 2d2e55dfea94..300ecdb6790a 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -54,6 +54,21 @@ struct phylink_link_state { unsigned int an_complete:1; }; +enum phylink_op_type { + PHYLINK_NETDEV = 0, + PHYLINK_DEV, +}; + +/** + * struct phylink_config - PHYLINK configuration structure + * @dev: a pointer to a struct device associated with the MAC + * @type: operation type of PHYLINK instance + */ +struct phylink_config { + struct device *dev; + enum phylink_op_type type; +}; + /** * struct phylink_mac_ops - MAC operations structure. * @validate: Validate and update the link configuration. @@ -66,16 +81,17 @@ struct phylink_link_state { * The individual methods are described more fully below. */ struct phylink_mac_ops { - void (*validate)(struct net_device *ndev, unsigned long *supported, + void (*validate)(struct phylink_config *config, + unsigned long *supported, struct phylink_link_state *state); - int (*mac_link_state)(struct net_device *ndev, + int (*mac_link_state)(struct phylink_config *config, struct phylink_link_state *state); - void (*mac_config)(struct net_device *ndev, unsigned int mode, + void (*mac_config)(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state); - void (*mac_an_restart)(struct net_device *ndev); - void (*mac_link_down)(struct net_device *ndev, unsigned int mode, + void (*mac_an_restart)(struct phylink_config *config); + void (*mac_link_down)(struct phylink_config *config, unsigned int mode, phy_interface_t interface); - void (*mac_link_up)(struct net_device *ndev, unsigned int mode, + void (*mac_link_up)(struct phylink_config *config, unsigned int mode, phy_interface_t interface, struct phy_device *phy); }; @@ -83,7 +99,7 @@ struct phylink_mac_ops { #if 0 /* For kernel-doc purposes only. */ /** * validate - Validate and update the link configuration - * @ndev: a pointer to a &struct net_device for the MAC. + * @config: a pointer to a &struct phylink_config. * @supported: ethtool bitmask for supported link modes. * @state: a pointer to a &struct phylink_link_state. * @@ -93,19 +109,26 @@ struct phylink_mac_ops { * Note that the PHY may be able to transform from one connection * technology to another, so, eg, don't clear 1000BaseX just * because the MAC is unable to BaseX mode. This is more about - * clearing unsupported speeds and duplex settings. + * clearing unsupported speeds and duplex settings. The port modes + * should not be cleared; phylink_set_port_modes() will help with this. * * If the @state->interface mode is %PHY_INTERFACE_MODE_1000BASEX * or %PHY_INTERFACE_MODE_2500BASEX, select the appropriate mode * based on @state->advertising and/or @state->speed and update - * @state->interface accordingly. + * @state->interface accordingly. See phylink_helper_basex_speed(). + * + * When @state->interface is %PHY_INTERFACE_MODE_NA, phylink expects the + * MAC driver to return all supported link modes. + * + * If the @state->interface mode is not supported, then the @supported + * mask must be cleared. */ -void validate(struct net_device *ndev, unsigned long *supported, +void validate(struct phylink_config *config, unsigned long *supported, struct phylink_link_state *state); /** * mac_link_state() - Read the current link state from the hardware - * @ndev: a pointer to a &struct net_device for the MAC. + * @config: a pointer to a &struct phylink_config. * @state: a pointer to a &struct phylink_link_state. * * Read the current link state from the MAC, reporting the current @@ -114,12 +137,12 @@ void validate(struct net_device *ndev, unsigned long *supported, * negotiation completion state in @state->an_complete, and link * up state in @state->link. */ -int mac_link_state(struct net_device *ndev, +int mac_link_state(struct phylink_config *config, struct phylink_link_state *state); /** * mac_config() - configure the MAC for the selected mode and state - * @ndev: a pointer to a &struct net_device for the MAC. + * @config: a pointer to a &struct phylink_config. * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND. * @state: a pointer to a &struct phylink_link_state. * @@ -168,18 +191,18 @@ int mac_link_state(struct net_device *ndev, * down. This "update" behaviour is critical to avoid bouncing the * link up status. */ -void mac_config(struct net_device *ndev, unsigned int mode, +void mac_config(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state); /** * mac_an_restart() - restart 802.3z BaseX autonegotiation - * @ndev: a pointer to a &struct net_device for the MAC. + * @config: a pointer to a &struct phylink_config. */ -void mac_an_restart(struct net_device *ndev); +void mac_an_restart(struct phylink_config *config); /** * mac_link_down() - take the link down - * @ndev: a pointer to a &struct net_device for the MAC. + * @config: a pointer to a &struct phylink_config. * @mode: link autonegotiation mode * @interface: link &typedef phy_interface_t mode * @@ -188,12 +211,12 @@ void mac_an_restart(struct net_device *ndev); * Energy Efficient Ethernet MAC configuration. Interface type * selection must be done in mac_config(). */ -void mac_link_down(struct net_device *ndev, unsigned int mode, +void mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface); /** * mac_link_up() - allow the link to come up - * @ndev: a pointer to a &struct net_device for the MAC. + * @config: a pointer to a &struct phylink_config. * @mode: link autonegotiation mode * @interface: link &typedef phy_interface_t mode * @phy: any attached phy @@ -204,13 +227,14 @@ void mac_link_down(struct net_device *ndev, unsigned int mode, * phy_init_eee() and perform appropriate MAC configuration for EEE. * Interface type selection must be done in mac_config(). */ -void mac_link_up(struct net_device *ndev, unsigned int mode, +void mac_link_up(struct phylink_config *config, unsigned int mode, phy_interface_t interface, struct phy_device *phy); #endif -struct phylink *phylink_create(struct net_device *, struct fwnode_handle *, - phy_interface_t iface, const struct phylink_mac_ops *ops); +struct phylink *phylink_create(struct phylink_config *, struct fwnode_handle *, + phy_interface_t iface, + const struct phylink_mac_ops *ops); void phylink_destroy(struct phylink *); int phylink_connect_phy(struct phylink *, struct phy_device *); diff --git a/include/linux/pid.h b/include/linux/pid.h index 3c8ef5a199ca..2a83e434db9d 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h @@ -3,6 +3,8 @@ #define _LINUX_PID_H #include <linux/rculist.h> +#include <linux/wait.h> +#include <linux/refcount.h> enum pid_type { @@ -56,10 +58,12 @@ struct upid { struct pid { - atomic_t count; + refcount_t count; unsigned int level; /* lists of tasks that use this pid */ struct hlist_head tasks[PIDTYPE_MAX]; + /* wait queue for pidfd notifications */ + wait_queue_head_t wait_pidfd; struct rcu_head rcu; struct upid numbers[1]; }; @@ -71,7 +75,7 @@ extern const struct file_operations pidfd_fops; static inline struct pid *get_pid(struct pid *pid) { if (pid) - atomic_inc(&pid->count); + refcount_inc(&pid->count); return pid; } diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h index 6f260c1d3467..6aeb711f7cd1 100644 --- a/include/linux/pinctrl/pinconf-generic.h +++ b/include/linux/pinctrl/pinconf-generic.h @@ -11,6 +11,12 @@ #ifndef __LINUX_PINCTRL_PINCONF_GENERIC_H #define __LINUX_PINCTRL_PINCONF_GENERIC_H +#include <linux/device.h> +#include <linux/pinctrl/machine.h> + +struct pinctrl_dev; +struct pinctrl_map; + /** * enum pin_config_param - possible pin configuration parameters * @PIN_CONFIG_BIAS_BUS_HOLD: the pin will be set to weakly latch so that it @@ -54,6 +60,8 @@ * push-pull mode, the argument is ignored. * @PIN_CONFIG_DRIVE_STRENGTH: the pin will sink or source at most the current * passed as argument. The argument is in mA. + * @PIN_CONFIG_DRIVE_STRENGTH_UA: the pin will sink or source at most the current + * passed as argument. The argument is in uA. * @PIN_CONFIG_INPUT_DEBOUNCE: this will configure the pin to debounce mode, * which means it will wait for signals to settle when reading inputs. The * argument gives the debounce time in usecs. Setting the @@ -111,6 +119,7 @@ enum pin_config_param { PIN_CONFIG_DRIVE_OPEN_SOURCE, PIN_CONFIG_DRIVE_PUSH_PULL, PIN_CONFIG_DRIVE_STRENGTH, + PIN_CONFIG_DRIVE_STRENGTH_UA, PIN_CONFIG_INPUT_DEBOUNCE, PIN_CONFIG_INPUT_ENABLE, PIN_CONFIG_INPUT_SCHMITT, @@ -155,9 +164,6 @@ static inline unsigned long pinconf_to_config_packed(enum pin_config_param param return PIN_CONF_PACKED(param, argument); } -#ifdef CONFIG_GENERIC_PINCONF - -#ifdef CONFIG_DEBUG_FS #define PCONFDUMP(a, b, c, d) { \ .param = a, .display = b, .format = c, .has_arg = d \ } @@ -168,14 +174,6 @@ struct pin_config_item { const char * const format; bool has_arg; }; -#endif /* CONFIG_DEBUG_FS */ - -#ifdef CONFIG_OF - -#include <linux/device.h> -#include <linux/pinctrl/machine.h> -struct pinctrl_dev; -struct pinctrl_map; struct pinconf_generic_params { const char * const property; @@ -220,8 +218,5 @@ static inline int pinconf_generic_dt_node_to_map_all( return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps, PIN_MAP_TYPE_INVALID); } -#endif - -#endif /* CONFIG_GENERIC_PINCONF */ #endif /* __LINUX_PINCTRL_PINCONF_GENERIC_H */ diff --git a/include/linux/pinctrl/pinconf.h b/include/linux/pinctrl/pinconf.h index 514414a5ad01..f8a8215e9021 100644 --- a/include/linux/pinctrl/pinconf.h +++ b/include/linux/pinctrl/pinconf.h @@ -11,7 +11,7 @@ #ifndef __LINUX_PINCTRL_PINCONF_H #define __LINUX_PINCTRL_PINCONF_H -#ifdef CONFIG_PINCONF +#include <linux/types.h> struct pinctrl_dev; struct seq_file; @@ -64,6 +64,4 @@ struct pinconf_ops { unsigned long config); }; -#endif - #endif /* __LINUX_PINCTRL_PINCONF_H */ diff --git a/include/linux/pinctrl/pinctrl-state.h b/include/linux/pinctrl/pinctrl-state.h index a0e785815a64..635d97e9285e 100644 --- a/include/linux/pinctrl/pinctrl-state.h +++ b/include/linux/pinctrl/pinctrl-state.h @@ -3,6 +3,9 @@ * Standard pin control state definitions */ +#ifndef __LINUX_PINCTRL_PINCTRL_STATE_H +#define __LINUX_PINCTRL_PINCTRL_STATE_H + /** * @PINCTRL_STATE_DEFAULT: the state the pinctrl handle shall be put * into as default, usually this means the pins are up and ready to @@ -31,3 +34,5 @@ #define PINCTRL_STATE_INIT "init" #define PINCTRL_STATE_IDLE "idle" #define PINCTRL_STATE_SLEEP "sleep" + +#endif /* __LINUX_PINCTRL_PINCTRL_STATE_H */ diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h index e429e5d92dd6..7ce23450a1cb 100644 --- a/include/linux/pinctrl/pinctrl.h +++ b/include/linux/pinctrl/pinctrl.h @@ -11,8 +11,6 @@ #ifndef __LINUX_PINCTRL_PINCTRL_H #define __LINUX_PINCTRL_PINCTRL_H -#ifdef CONFIG_PINCTRL - #include <linux/radix-tree.h> #include <linux/list.h> #include <linux/seq_file.h> @@ -124,6 +122,10 @@ struct pinctrl_ops { * the hardware description * @custom_conf_items: Information how to print @params in debugfs, must be * the same size as the @custom_params, i.e. @num_custom_params + * @link_consumers: If true create a device link between pinctrl and its + * consumers (i.e. the devices requesting pin control states). This is + * sometimes necessary to ascertain the right suspend/resume order for + * example. */ struct pinctrl_desc { const char *name; @@ -138,6 +140,7 @@ struct pinctrl_desc { const struct pinconf_generic_params *custom_params; const struct pin_config_item *custom_conf_items; #endif + bool link_consumers; }; /* External interface to pin controller */ @@ -166,7 +169,6 @@ extern struct pinctrl_dev *devm_pinctrl_register(struct device *dev, extern void devm_pinctrl_unregister(struct device *dev, struct pinctrl_dev *pctldev); -extern bool pin_is_valid(struct pinctrl_dev *pctldev, int pin); extern void pinctrl_add_gpio_range(struct pinctrl_dev *pctldev, struct pinctrl_gpio_range *range); extern void pinctrl_add_gpio_ranges(struct pinctrl_dev *pctldev, @@ -197,16 +199,5 @@ struct pinctrl_dev *of_pinctrl_get(struct device_node *np) extern const char *pinctrl_dev_get_name(struct pinctrl_dev *pctldev); extern const char *pinctrl_dev_get_devname(struct pinctrl_dev *pctldev); extern void *pinctrl_dev_get_drvdata(struct pinctrl_dev *pctldev); -#else - -struct pinctrl_dev; - -/* Sufficiently stupid default functions when pinctrl is not in use */ -static inline bool pin_is_valid(struct pinctrl_dev *pctldev, int pin) -{ - return pin >= 0; -} - -#endif /* !CONFIG_PINCTRL */ #endif /* __LINUX_PINCTRL_PINCTRL_H */ diff --git a/include/linux/pinctrl/pinmux.h b/include/linux/pinctrl/pinmux.h index e873ed97d79e..9a647fa5c8f1 100644 --- a/include/linux/pinctrl/pinmux.h +++ b/include/linux/pinctrl/pinmux.h @@ -15,8 +15,6 @@ #include <linux/seq_file.h> #include <linux/pinctrl/pinctrl.h> -#ifdef CONFIG_PINMUX - struct pinctrl_dev; /** @@ -84,6 +82,4 @@ struct pinmux_ops { bool strict; }; -#endif /* CONFIG_PINMUX */ - #endif /* __LINUX_PINCTRL_PINMUX_H */ diff --git a/include/linux/platform_data/dma-imx.h b/include/linux/platform_data/dma-imx.h index 80f9be858bd0..281adbb26e6b 100644 --- a/include/linux/platform_data/dma-imx.h +++ b/include/linux/platform_data/dma-imx.h @@ -52,7 +52,6 @@ struct imx_dma_data { int dma_request2; /* secondary DMA request line */ enum sdma_peripheral_type peripheral_type; int priority; - struct device_node *of_node; }; static inline int imx_dma_is_ipu(struct dma_chan *chan) diff --git a/include/linux/platform_data/fsa9480.h b/include/linux/platform_data/fsa9480.h deleted file mode 100644 index dea8d84448ec..000000000000 --- a/include/linux/platform_data/fsa9480.h +++ /dev/null @@ -1,24 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2010 Samsung Electronics - * Minkyu Kang <mk7.kang@samsung.com> - */ - -#ifndef _FSA9480_H_ -#define _FSA9480_H_ - -#define FSA9480_ATTACHED 1 -#define FSA9480_DETACHED 0 - -struct fsa9480_platform_data { - void (*cfg_gpio) (void); - void (*usb_cb) (u8 attached); - void (*uart_cb) (u8 attached); - void (*charger_cb) (u8 attached); - void (*jig_cb) (u8 attached); - void (*reset_cb) (void); - void (*usb_power) (u8 on); - int wakeup; -}; - -#endif /* _FSA9480_H_ */ diff --git a/include/linux/platform_data/gpio-omap.h b/include/linux/platform_data/gpio-omap.h index 17edc43201d2..8b30b14b47d3 100644 --- a/include/linux/platform_data/gpio-omap.h +++ b/include/linux/platform_data/gpio-omap.h @@ -186,7 +186,7 @@ struct omap_gpio_platform_data { bool is_mpuio; /* whether the bank is of type MPUIO */ u32 non_wakeup_gpios; - struct omap_gpio_reg_offs *regs; + const struct omap_gpio_reg_offs *regs; /* Return context loss count due to PM states changing */ int (*get_context_loss_count)(struct device *dev); diff --git a/include/linux/platform_data/i2c-mux-gpio.h b/include/linux/platform_data/i2c-mux-gpio.h index 9f6ca406505b..5e4c2c272a73 100644 --- a/include/linux/platform_data/i2c-mux-gpio.h +++ b/include/linux/platform_data/i2c-mux-gpio.h @@ -19,10 +19,6 @@ * position * @n_values: Number of multiplexer positions (busses to instantiate) * @classes: Optional I2C auto-detection classes - * @gpio_chip: Optional GPIO chip name; if set, GPIO pin numbers are given - * relative to the base GPIO number of that chip - * @gpios: Array of GPIO numbers used to control MUX - * @n_gpios: Number of GPIOs used to control MUX * @idle: Bitmask to write to MUX when idle or GPIO_I2CMUX_NO_IDLE if not used */ struct i2c_mux_gpio_platform_data { @@ -31,9 +27,6 @@ struct i2c_mux_gpio_platform_data { const unsigned *values; int n_values; const unsigned *classes; - char *gpio_chip; - const unsigned *gpios; - int n_gpios; unsigned idle; }; diff --git a/include/linux/platform_data/keypad-w90p910.h b/include/linux/platform_data/keypad-w90p910.h deleted file mode 100644 index 206ca4ecd93f..000000000000 --- a/include/linux/platform_data/keypad-w90p910.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __ASM_ARCH_W90P910_KEYPAD_H -#define __ASM_ARCH_W90P910_KEYPAD_H - -#include <linux/input/matrix_keypad.h> - -extern void mfp_set_groupi(struct device *dev); - -struct w90p910_keypad_platform_data { - const struct matrix_keymap_data *keymap_data; - - unsigned int prescale; - unsigned int debounce; -}; - -#endif /* __ASM_ARCH_W90P910_KEYPAD_H */ diff --git a/include/linux/platform_data/media/mmp-camera.h b/include/linux/platform_data/media/mmp-camera.h index d2d3a443eedf..53adaab64f28 100644 --- a/include/linux/platform_data/media/mmp-camera.h +++ b/include/linux/platform_data/media/mmp-camera.h @@ -12,11 +12,7 @@ enum dphy3_algo { }; struct mmp_camera_platform_data { - struct platform_device *i2c_device; - int sensor_power_gpio; - int sensor_reset_gpio; enum v4l2_mbus_type bus_type; - int mclk_min; /* The minimal value of MCLK */ int mclk_src; /* which clock source the MCLK derives from */ int mclk_div; /* Clock Divider Value for MCLK */ /* diff --git a/include/linux/platform_data/spi-mt65xx.h b/include/linux/platform_data/spi-mt65xx.h index 617a75336d56..f0e6d6483e62 100644 --- a/include/linux/platform_data/spi-mt65xx.h +++ b/include/linux/platform_data/spi-mt65xx.h @@ -11,8 +11,6 @@ /* Board specific platform_data */ struct mtk_chip_config { - u32 tx_mlsb; - u32 rx_mlsb; u32 cs_pol; u32 sample_sel; }; diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h index 9256c0305968..0c587d4fc718 100644 --- a/include/linux/platform_data/ti-sysc.h +++ b/include/linux/platform_data/ti-sysc.h @@ -19,6 +19,7 @@ enum ti_sysc_module_type { struct ti_sysc_cookie { void *data; + void *clkdm; }; /** @@ -46,6 +47,10 @@ struct sysc_regbits { s8 emufree_shift; }; +#define SYSC_MODULE_QUIRK_HDQ1W BIT(17) +#define SYSC_MODULE_QUIRK_I2C BIT(16) +#define SYSC_MODULE_QUIRK_WDT BIT(15) +#define SYSS_QUIRK_RESETDONE_INVERTED BIT(14) #define SYSC_QUIRK_SWSUP_MSTANDBY BIT(13) #define SYSC_QUIRK_SWSUP_SIDLE_ACT BIT(12) #define SYSC_QUIRK_SWSUP_SIDLE BIT(11) @@ -125,9 +130,16 @@ struct ti_sysc_module_data { }; struct device; +struct clk; struct ti_sysc_platform_data { struct of_dev_auxdata *auxdata; + int (*init_clockdomain)(struct device *dev, struct clk *fck, + struct clk *ick, struct ti_sysc_cookie *cookie); + void (*clkdm_deny_idle)(struct device *dev, + const struct ti_sysc_cookie *cookie); + void (*clkdm_allow_idle)(struct device *dev, + const struct ti_sysc_cookie *cookie); int (*init_module)(struct device *dev, const struct ti_sysc_module_data *data, struct ti_sysc_cookie *cookie); diff --git a/include/linux/platform_data/video-clcd-versatile.h b/include/linux/platform_data/video-clcd-versatile.h deleted file mode 100644 index 305ebaec3afd..000000000000 --- a/include/linux/platform_data/video-clcd-versatile.h +++ /dev/null @@ -1,28 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef PLAT_CLCD_H -#define PLAT_CLCD_H - -#ifdef CONFIG_PLAT_VERSATILE_CLCD -struct clcd_panel *versatile_clcd_get_panel(const char *); -int versatile_clcd_setup_dma(struct clcd_fb *, unsigned long); -int versatile_clcd_mmap_dma(struct clcd_fb *, struct vm_area_struct *); -void versatile_clcd_remove_dma(struct clcd_fb *); -#else -static inline struct clcd_panel *versatile_clcd_get_panel(const char *s) -{ - return NULL; -} -static inline int versatile_clcd_setup_dma(struct clcd_fb *fb, unsigned long framesize) -{ - return -ENODEV; -} -static inline int versatile_clcd_mmap_dma(struct clcd_fb *fb, struct vm_area_struct *vm) -{ - return -ENODEV; -} -static inline void versatile_clcd_remove_dma(struct clcd_fb *fb) -{ -} -#endif - -#endif diff --git a/include/linux/platform_data/wilco-ec.h b/include/linux/platform_data/wilco-ec.h index 1ff224793c99..ad03b586a095 100644 --- a/include/linux/platform_data/wilco-ec.h +++ b/include/linux/platform_data/wilco-ec.h @@ -13,12 +13,9 @@ /* Message flags for using the mailbox() interface */ #define WILCO_EC_FLAG_NO_RESPONSE BIT(0) /* EC does not respond */ -#define WILCO_EC_FLAG_EXTENDED_DATA BIT(1) /* EC returns 256 data bytes */ /* Normal commands have a maximum 32 bytes of data */ #define EC_MAILBOX_DATA_SIZE 32 -/* Extended commands have 256 bytes of response data */ -#define EC_MAILBOX_DATA_SIZE_EXTENDED 256 /** * struct wilco_ec_device - Wilco Embedded Controller handle. @@ -32,6 +29,7 @@ * @data_size: Size of the data buffer used for EC communication. * @debugfs_pdev: The child platform_device used by the debugfs sub-driver. * @rtc_pdev: The child platform_device used by the RTC sub-driver. + * @telem_pdev: The child platform_device used by the telemetry sub-driver. */ struct wilco_ec_device { struct device *dev; @@ -43,6 +41,7 @@ struct wilco_ec_device { size_t data_size; struct platform_device *debugfs_pdev; struct platform_device *rtc_pdev; + struct platform_device *telem_pdev; }; /** @@ -85,14 +84,12 @@ struct wilco_ec_response { * enum wilco_ec_msg_type - Message type to select a set of command codes. * @WILCO_EC_MSG_LEGACY: Legacy EC messages for standard EC behavior. * @WILCO_EC_MSG_PROPERTY: Get/Set/Sync EC controlled NVRAM property. - * @WILCO_EC_MSG_TELEMETRY_SHORT: 32 bytes of telemetry data provided by the EC. - * @WILCO_EC_MSG_TELEMETRY_LONG: 256 bytes of telemetry data provided by the EC. + * @WILCO_EC_MSG_TELEMETRY: Request telemetry data from the EC. */ enum wilco_ec_msg_type { WILCO_EC_MSG_LEGACY = 0x00f0, WILCO_EC_MSG_PROPERTY = 0x00f2, - WILCO_EC_MSG_TELEMETRY_SHORT = 0x00f5, - WILCO_EC_MSG_TELEMETRY_LONG = 0x00f6, + WILCO_EC_MSG_TELEMETRY = 0x00f5, }; /** @@ -123,4 +120,87 @@ struct wilco_ec_message { */ int wilco_ec_mailbox(struct wilco_ec_device *ec, struct wilco_ec_message *msg); +/* + * A Property is typically a data item that is stored to NVRAM + * by the EC. Each of these data items has an index associated + * with it, known as the Property ID (PID). Properties may have + * variable lengths, up to a max of WILCO_EC_PROPERTY_MAX_SIZE + * bytes. Properties can be simple integers, or they may be more + * complex binary data. + */ + +#define WILCO_EC_PROPERTY_MAX_SIZE 4 + +/** + * struct ec_property_set_msg - Message to get or set a property. + * @property_id: Which property to get or set. + * @length: Number of bytes of |data| that are used. + * @data: Actual property data. + */ +struct wilco_ec_property_msg { + u32 property_id; + int length; + u8 data[WILCO_EC_PROPERTY_MAX_SIZE]; +}; + +/** + * wilco_ec_get_property() - Retrieve a property from the EC. + * @ec: Embedded Controller device. + * @prop_msg: Message for request and response. + * + * The property_id field of |prop_msg| should be filled before calling this + * function. The result will be stored in the data and length fields. + * + * Return: 0 on success, negative error code on failure. + */ +int wilco_ec_get_property(struct wilco_ec_device *ec, + struct wilco_ec_property_msg *prop_msg); + +/** + * wilco_ec_set_property() - Store a property on the EC. + * @ec: Embedded Controller device. + * @prop_msg: Message for request and response. + * + * The property_id, length, and data fields of |prop_msg| should be + * filled before calling this function. + * + * Return: 0 on success, negative error code on failure. + */ +int wilco_ec_set_property(struct wilco_ec_device *ec, + struct wilco_ec_property_msg *prop_msg); + +/** + * wilco_ec_get_byte_property() - Retrieve a byte-size property from the EC. + * @ec: Embedded Controller device. + * @property_id: Which property to retrieve. + * @val: The result value, will be filled by this function. + * + * Return: 0 on success, negative error code on failure. + */ +int wilco_ec_get_byte_property(struct wilco_ec_device *ec, u32 property_id, + u8 *val); + +/** + * wilco_ec_get_byte_property() - Store a byte-size property on the EC. + * @ec: Embedded Controller device. + * @property_id: Which property to store. + * @val: Value to store. + * + * Return: 0 on success, negative error code on failure. + */ +int wilco_ec_set_byte_property(struct wilco_ec_device *ec, u32 property_id, + u8 val); + +/** + * wilco_ec_add_sysfs() - Create sysfs entries + * @ec: Wilco EC device + * + * wilco_ec_remove_sysfs() needs to be called afterwards + * to perform the necessary cleanup. + * + * Return: 0 on success or negative error code on failure. + */ +int wilco_ec_add_sysfs(struct wilco_ec_device *ec); +void wilco_ec_remove_sysfs(struct wilco_ec_device *ec); + #endif /* WILCO_EC_H */ diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h index bfba245636a7..4802cd2c7309 100644 --- a/include/linux/platform_data/x86/asus-wmi.h +++ b/include/linux/platform_data/x86/asus-wmi.h @@ -18,8 +18,8 @@ #define ASUS_WMI_METHODID_GDSP 0x50534447 /* Get DiSPlay output */ #define ASUS_WMI_METHODID_DEVP 0x50564544 /* DEVice Policy */ #define ASUS_WMI_METHODID_OSVR 0x5256534F /* OS VeRsion */ -#define ASUS_WMI_METHODID_DSTS 0x53544344 /* Device STatuS */ -#define ASUS_WMI_METHODID_DSTS2 0x53545344 /* Device STatuS #2*/ +#define ASUS_WMI_METHODID_DCTS 0x53544344 /* Device status (DCTS) */ +#define ASUS_WMI_METHODID_DSTS 0x53545344 /* Device status (DSTS) */ #define ASUS_WMI_METHODID_BSTS 0x53545342 /* Bios STatuS ? */ #define ASUS_WMI_METHODID_DEVS 0x53564544 /* DEVice Set */ #define ASUS_WMI_METHODID_CFVS 0x53564643 /* CPU Frequency Volt Set */ @@ -57,6 +57,7 @@ #define ASUS_WMI_DEVID_KBD_BACKLIGHT 0x00050021 #define ASUS_WMI_DEVID_LIGHT_SENSOR 0x00050022 /* ?? */ #define ASUS_WMI_DEVID_LIGHTBAR 0x00050025 +#define ASUS_WMI_DEVID_FAN_BOOST_MODE 0x00110018 /* Misc */ #define ASUS_WMI_DEVID_CAMERA 0x00060013 diff --git a/include/linux/platform_data/xilinx-ll-temac.h b/include/linux/platform_data/xilinx-ll-temac.h index 368530f98176..f4a68136afa6 100644 --- a/include/linux/platform_data/xilinx-ll-temac.h +++ b/include/linux/platform_data/xilinx-ll-temac.h @@ -4,6 +4,7 @@ #include <linux/if_ether.h> #include <linux/phy.h> +#include <linux/spinlock.h> struct ll_temac_platform_data { bool txcsum; /* Enable/disable TX checksum */ @@ -21,7 +22,7 @@ struct ll_temac_platform_data { * TEMAC IP block, the same mutex should be passed here, as * they share the same DCR bus bridge. */ - struct mutex *indirect_mutex; + spinlock_t *indirect_lock; /* DMA channel control setup */ u8 tx_irq_timeout; /* TX Interrupt Delay Time-out */ u8 tx_irq_count; /* TX Interrupt Coalescing Threshold Count */ diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index beb25f277889..9bc36b589827 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -4,7 +4,7 @@ * * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org> * - * See Documentation/driver-model/ for more information. + * See Documentation/driver-api/driver-model/ for more information. */ #ifndef _PLATFORM_DEVICE_H_ diff --git a/include/linux/pm.h b/include/linux/pm.h index 345d74a727e3..3619a870eaa4 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -271,7 +271,7 @@ typedef struct pm_message { * actions to be performed by a device driver's callbacks generally depend on * the platform and subsystem the device belongs to. * - * Refer to Documentation/power/runtime_pm.txt for more information about the + * Refer to Documentation/power/runtime_pm.rst for more information about the * role of the @runtime_suspend(), @runtime_resume() and @runtime_idle() * callbacks in device runtime power management. */ @@ -760,7 +760,6 @@ extern int pm_generic_poweroff_late(struct device *dev); extern int pm_generic_poweroff(struct device *dev); extern void pm_generic_complete(struct device *dev); -extern void dev_pm_skip_next_resume_phases(struct device *dev); extern bool dev_pm_may_skip_resume(struct device *dev); extern bool dev_pm_smart_suspend_and_suspended(struct device *dev); diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 5f3a1ee9c4c2..af5021f27cb7 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -128,8 +128,8 @@ struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char * name); void dev_pm_opp_put_clkname(struct opp_table *opp_table); struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data)); void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table); -struct opp_table *dev_pm_opp_set_genpd_virt_dev(struct device *dev, struct device *virt_dev, int index); -void dev_pm_opp_put_genpd_virt_dev(struct opp_table *opp_table, struct device *virt_dev); +struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names); +void dev_pm_opp_detach_genpd(struct opp_table *opp_table); int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate); int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq); int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask); @@ -292,12 +292,12 @@ static inline struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const static inline void dev_pm_opp_put_clkname(struct opp_table *opp_table) {} -static inline struct opp_table *dev_pm_opp_set_genpd_virt_dev(struct device *dev, struct device *virt_dev, int index) +static inline struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names) { return ERR_PTR(-ENOTSUPP); } -static inline void dev_pm_opp_put_genpd_virt_dev(struct opp_table *opp_table, struct device *virt_dev) {} +static inline void dev_pm_opp_detach_genpd(struct opp_table *opp_table) {} static inline int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate) { diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h index 6ea1ae373d77..2aebbc5b9950 100644 --- a/include/linux/pm_qos.h +++ b/include/linux/pm_qos.h @@ -40,6 +40,8 @@ enum pm_qos_flags_status { #define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT PM_QOS_LATENCY_ANY #define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS PM_QOS_LATENCY_ANY_NS #define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0 +#define PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE 0 +#define PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE (-1) #define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1) #define PM_QOS_FLAG_NO_POWER_OFF (1 << 0) @@ -58,6 +60,8 @@ struct pm_qos_flags_request { enum dev_pm_qos_req_type { DEV_PM_QOS_RESUME_LATENCY = 1, DEV_PM_QOS_LATENCY_TOLERANCE, + DEV_PM_QOS_MIN_FREQUENCY, + DEV_PM_QOS_MAX_FREQUENCY, DEV_PM_QOS_FLAGS, }; @@ -99,10 +103,14 @@ struct pm_qos_flags { struct dev_pm_qos { struct pm_qos_constraints resume_latency; struct pm_qos_constraints latency_tolerance; + struct pm_qos_constraints min_frequency; + struct pm_qos_constraints max_frequency; struct pm_qos_flags flags; struct dev_pm_qos_request *resume_latency_req; struct dev_pm_qos_request *latency_tolerance_req; struct dev_pm_qos_request *flags_req; + struct dev_pm_qos_request *min_frequency_req; + struct dev_pm_qos_request *max_frequency_req; }; /* Action requested to pm_qos_update_target */ @@ -139,16 +147,18 @@ s32 pm_qos_read_value(struct pm_qos_constraints *c); #ifdef CONFIG_PM enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask); enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask); -s32 __dev_pm_qos_read_value(struct device *dev); -s32 dev_pm_qos_read_value(struct device *dev); +s32 __dev_pm_qos_resume_latency(struct device *dev); +s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type); int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, enum dev_pm_qos_req_type type, s32 value); int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value); int dev_pm_qos_remove_request(struct dev_pm_qos_request *req); int dev_pm_qos_add_notifier(struct device *dev, - struct notifier_block *notifier); + struct notifier_block *notifier, + enum dev_pm_qos_req_type type); int dev_pm_qos_remove_notifier(struct device *dev, - struct notifier_block *notifier); + struct notifier_block *notifier, + enum dev_pm_qos_req_type type); void dev_pm_qos_constraints_init(struct device *dev); void dev_pm_qos_constraints_destroy(struct device *dev); int dev_pm_qos_add_ancestor_request(struct device *dev, @@ -174,7 +184,7 @@ static inline s32 dev_pm_qos_requested_flags(struct device *dev) return dev->power.qos->flags_req->data.flr.flags; } -static inline s32 dev_pm_qos_raw_read_value(struct device *dev) +static inline s32 dev_pm_qos_raw_resume_latency(struct device *dev) { return IS_ERR_OR_NULL(dev->power.qos) ? PM_QOS_RESUME_LATENCY_NO_CONSTRAINT : @@ -187,10 +197,24 @@ static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, static inline enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask) { return PM_QOS_FLAGS_UNDEFINED; } -static inline s32 __dev_pm_qos_read_value(struct device *dev) - { return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; } -static inline s32 dev_pm_qos_read_value(struct device *dev) +static inline s32 __dev_pm_qos_resume_latency(struct device *dev) { return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; } +static inline s32 dev_pm_qos_read_value(struct device *dev, + enum dev_pm_qos_req_type type) +{ + switch (type) { + case DEV_PM_QOS_RESUME_LATENCY: + return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; + case DEV_PM_QOS_MIN_FREQUENCY: + return PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE; + case DEV_PM_QOS_MAX_FREQUENCY: + return PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE; + default: + WARN_ON(1); + return 0; + } +} + static inline int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, enum dev_pm_qos_req_type type, @@ -202,10 +226,12 @@ static inline int dev_pm_qos_update_request(struct dev_pm_qos_request *req, static inline int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) { return 0; } static inline int dev_pm_qos_add_notifier(struct device *dev, - struct notifier_block *notifier) + struct notifier_block *notifier, + enum dev_pm_qos_req_type type) { return 0; } static inline int dev_pm_qos_remove_notifier(struct device *dev, - struct notifier_block *notifier) + struct notifier_block *notifier, + enum dev_pm_qos_req_type type) { return 0; } static inline void dev_pm_qos_constraints_init(struct device *dev) { @@ -241,7 +267,7 @@ static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; } static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; } -static inline s32 dev_pm_qos_raw_read_value(struct device *dev) +static inline s32 dev_pm_qos_raw_resume_latency(struct device *dev) { return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; } diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h index ce57771fab9b..91027602d137 100644 --- a/include/linux/pm_wakeup.h +++ b/include/linux/pm_wakeup.h @@ -36,7 +36,7 @@ struct wake_irq; * @expire_count: Number of times the wakeup source's timeout has expired. * @wakeup_count: Number of times the wakeup source might abort suspend. * @active: Status of the wakeup source. - * @has_timeout: The wakeup source has been activated with a timeout. + * @autosleep_enabled: Autosleep is active, so update @prevent_sleep_time. */ struct wakeup_source { const char *name; diff --git a/include/linux/poison.h b/include/linux/poison.h index d6d980a681c7..df34330b4e34 100644 --- a/include/linux/poison.h +++ b/include/linux/poison.h @@ -21,7 +21,7 @@ * non-initialized list entries. */ #define LIST_POISON1 ((void *) 0x100 + POISON_POINTER_DELTA) -#define LIST_POISON2 ((void *) 0x200 + POISON_POINTER_DELTA) +#define LIST_POISON2 ((void *) 0x122 + POISON_POINTER_DELTA) /********** include/linux/timer.h **********/ /* diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 6f348b3ee2e0..28413f737e7d 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -128,6 +128,8 @@ enum power_supply_property { POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD, /* in percents! */ POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD, /* in percents! */ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT, + POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT, + POWER_SUPPLY_PROP_INPUT_POWER_LIMIT, POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN, POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN, POWER_SUPPLY_PROP_ENERGY_FULL, @@ -480,4 +482,17 @@ static inline bool power_supply_is_watt_property(enum power_supply_property psp) return 0; } +#ifdef CONFIG_POWER_SUPPLY_HWMON +int power_supply_add_hwmon_sysfs(struct power_supply *psy); +void power_supply_remove_hwmon_sysfs(struct power_supply *psy); +#else +static inline int power_supply_add_hwmon_sysfs(struct power_supply *psy) +{ + return 0; +} + +static inline +void power_supply_remove_hwmon_sysfs(struct power_supply *psy) {} +#endif + #endif /* __LINUX_POWER_SUPPLY_H__ */ diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index 52a283ba0465..a705aa2d03f9 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -75,6 +75,15 @@ struct proc_dir_entry *proc_create_net_single_write(const char *name, umode_t mo void *data); extern struct pid *tgid_pidfd_to_pid(const struct file *file); +#ifdef CONFIG_PROC_PID_ARCH_STATUS +/* + * The architecture which selects CONFIG_PROC_PID_ARCH_STATUS must + * provide proc_pid_arch_status() definition. + */ +int proc_pid_arch_status(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *task); +#endif /* CONFIG_PROC_PID_ARCH_STATUS */ + #else /* CONFIG_PROC_FS */ static inline void proc_root_init(void) diff --git a/include/linux/processor.h b/include/linux/processor.h index dbc952eec869..dc78bdc7079a 100644 --- a/include/linux/processor.h +++ b/include/linux/processor.h @@ -32,15 +32,6 @@ #define spin_cpu_relax() cpu_relax() #endif -/* - * spin_cpu_yield may be called to yield (undirected) to the hypervisor if - * necessary. This should be used if the wait is expected to take longer - * than context switch overhead, but we can't sleep or do a directed yield. - */ -#ifndef spin_cpu_yield -#define spin_cpu_yield() cpu_relax_yield() -#endif - #ifndef spin_end #define spin_end() #endif diff --git a/include/linux/property.h b/include/linux/property.h index e9caa290cda5..5a910ad79591 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -76,6 +76,10 @@ int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode, unsigned int nargs, unsigned int index, struct fwnode_reference_args *args); +struct fwnode_handle *fwnode_find_reference(const struct fwnode_handle *fwnode, + const char *name, + unsigned int index); + struct fwnode_handle *fwnode_get_parent(const struct fwnode_handle *fwnode); struct fwnode_handle *fwnode_get_next_parent( struct fwnode_handle *fwnode); @@ -141,6 +145,26 @@ static inline int device_property_read_u64(struct device *dev, return device_property_read_u64_array(dev, propname, val, 1); } +static inline int device_property_count_u8(struct device *dev, const char *propname) +{ + return device_property_read_u8_array(dev, propname, NULL, 0); +} + +static inline int device_property_count_u16(struct device *dev, const char *propname) +{ + return device_property_read_u16_array(dev, propname, NULL, 0); +} + +static inline int device_property_count_u32(struct device *dev, const char *propname) +{ + return device_property_read_u32_array(dev, propname, NULL, 0); +} + +static inline int device_property_count_u64(struct device *dev, const char *propname) +{ + return device_property_read_u64_array(dev, propname, NULL, 0); +} + static inline bool fwnode_property_read_bool(const struct fwnode_handle *fwnode, const char *propname) { @@ -171,6 +195,30 @@ static inline int fwnode_property_read_u64(const struct fwnode_handle *fwnode, return fwnode_property_read_u64_array(fwnode, propname, val, 1); } +static inline int fwnode_property_count_u8(const struct fwnode_handle *fwnode, + const char *propname) +{ + return fwnode_property_read_u8_array(fwnode, propname, NULL, 0); +} + +static inline int fwnode_property_count_u16(const struct fwnode_handle *fwnode, + const char *propname) +{ + return fwnode_property_read_u16_array(fwnode, propname, NULL, 0); +} + +static inline int fwnode_property_count_u32(const struct fwnode_handle *fwnode, + const char *propname) +{ + return fwnode_property_read_u32_array(fwnode, propname, NULL, 0); +} + +static inline int fwnode_property_count_u64(const struct fwnode_handle *fwnode, + const char *propname) +{ + return fwnode_property_read_u64_array(fwnode, propname, NULL, 0); +} + /** * struct property_entry - "Built-in" device property representation. * @name: Name of the property. @@ -329,7 +377,54 @@ int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode, /* -------------------------------------------------------------------------- */ /* Software fwnode support - when HW description is incomplete or missing */ +struct software_node; + +/** + * struct software_node_ref_args - Reference with additional arguments + * @node: Reference to a software node + * @nargs: Number of elements in @args array + * @args: Integer arguments + */ +struct software_node_ref_args { + const struct software_node *node; + unsigned int nargs; + u64 args[NR_FWNODE_REFERENCE_ARGS]; +}; + +/** + * struct software_node_reference - Named software node reference property + * @name: Name of the property + * @nrefs: Number of elements in @refs array + * @refs: Array of references with optional arguments + */ +struct software_node_reference { + const char *name; + unsigned int nrefs; + const struct software_node_ref_args *refs; +}; + +/** + * struct software_node - Software node description + * @name: Name of the software node + * @parent: Parent of the software node + * @properties: Array of device properties + * @references: Array of software node reference properties + */ +struct software_node { + const char *name; + const struct software_node *parent; + const struct property_entry *properties; + const struct software_node_reference *references; +}; + bool is_software_node(const struct fwnode_handle *fwnode); +const struct software_node *to_software_node(struct fwnode_handle *fwnode); +struct fwnode_handle *software_node_fwnode(const struct software_node *node); + +int software_node_register_nodes(const struct software_node *nodes); +void software_node_unregister_nodes(const struct software_node *nodes); + +int software_node_register(const struct software_node *node); int software_node_notify(struct device *dev, unsigned long action); diff --git a/include/linux/pseudo_fs.h b/include/linux/pseudo_fs.h new file mode 100644 index 000000000000..eceda1d1407a --- /dev/null +++ b/include/linux/pseudo_fs.h @@ -0,0 +1,16 @@ +#ifndef __LINUX_PSEUDO_FS__ +#define __LINUX_PSEUDO_FS__ + +#include <linux/fs_context.h> + +struct pseudo_fs_context { + const struct super_operations *ops; + const struct xattr_handler **xattr; + const struct dentry_operations *dops; + unsigned long magic; +}; + +struct pseudo_fs_context *init_pseudo(struct fs_context *fc, + unsigned long magic); + +#endif diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h index 28eb9c792522..93cc4f1d444a 100644 --- a/include/linux/ptp_clock_kernel.h +++ b/include/linux/ptp_clock_kernel.h @@ -213,6 +213,14 @@ extern void ptp_clock_event(struct ptp_clock *ptp, extern int ptp_clock_index(struct ptp_clock *ptp); /** + * scaled_ppm_to_ppb() - convert scaled ppm to ppb + * + * @ppm: Parts per million, but with a 16 bit binary fractional field + */ + +extern s32 scaled_ppm_to_ppb(long ppm); + +/** * ptp_find_pin() - obtain the pin index of a given auxiliary function * * @ptp: The clock obtained from ptp_clock_register(). diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index d5084ebd9f03..2a9df80ea887 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -355,7 +355,7 @@ static inline void user_single_step_report(struct pt_regs *regs) info.si_code = SI_USER; info.si_pid = 0; info.si_uid = 0; - force_sig_info(info.si_signo, &info, current); + force_sig_info(&info); } #endif diff --git a/include/linux/pwm.h b/include/linux/pwm.h index eaa5c6e3fc9f..24632a7a7d11 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -405,12 +405,16 @@ struct pwm_device *of_pwm_xlate_with_flags(struct pwm_chip *pc, const struct of_phandle_args *args); struct pwm_device *pwm_get(struct device *dev, const char *con_id); -struct pwm_device *of_pwm_get(struct device_node *np, const char *con_id); +struct pwm_device *of_pwm_get(struct device *dev, struct device_node *np, + const char *con_id); void pwm_put(struct pwm_device *pwm); struct pwm_device *devm_pwm_get(struct device *dev, const char *con_id); struct pwm_device *devm_of_pwm_get(struct device *dev, struct device_node *np, const char *con_id); +struct pwm_device *devm_fwnode_pwm_get(struct device *dev, + struct fwnode_handle *fwnode, + const char *con_id); void devm_pwm_put(struct device *dev, struct pwm_device *pwm); #else static inline struct pwm_device *pwm_request(int pwm_id, const char *label) @@ -493,7 +497,8 @@ static inline struct pwm_device *pwm_get(struct device *dev, return ERR_PTR(-ENODEV); } -static inline struct pwm_device *of_pwm_get(struct device_node *np, +static inline struct pwm_device *of_pwm_get(struct device *dev, + struct device_node *np, const char *con_id) { return ERR_PTR(-ENODEV); @@ -516,6 +521,13 @@ static inline struct pwm_device *devm_of_pwm_get(struct device *dev, return ERR_PTR(-ENODEV); } +static inline struct pwm_device * +devm_fwnode_pwm_get(struct device *dev, struct fwnode_handle *fwnode, + const char *con_id) +{ + return ERR_PTR(-ENODEV); +} + static inline void devm_pwm_put(struct device *dev, struct pwm_device *pwm) { } diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 48841e5dab90..eef02e64b422 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -907,7 +907,8 @@ struct qed_common_ops { u32 (*sb_release)(struct qed_dev *cdev, struct qed_sb_info *sb_info, - u16 sb_id); + u16 sb_id, + enum qed_sb_type type); void (*simd_handler_config)(struct qed_dev *cdev, void *token, @@ -1123,6 +1124,13 @@ struct qed_common_ops { */ int (*read_module_eeprom)(struct qed_dev *cdev, char *buf, u8 dev_addr, u32 offset, u32 len); + +/** + * @brief get_affin_hwfn_idx + * + * @param cdev + */ + u8 (*get_affin_hwfn_idx)(struct qed_dev *cdev); }; #define MASK_FIELD(_name, _value) \ diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h index d15f8e4815e3..898f595ea3d6 100644 --- a/include/linux/qed/qed_rdma_if.h +++ b/include/linux/qed/qed_rdma_if.h @@ -670,6 +670,8 @@ struct qed_rdma_ops { int (*ll2_set_mac_filter)(struct qed_dev *cdev, u8 *old_mac_address, u8 *new_mac_address); + int (*iwarp_set_engine_affin)(struct qed_dev *cdev, bool b_reset); + int (*iwarp_connect)(void *rdma_cxt, struct qed_iwarp_connect_in *iparams, struct qed_iwarp_connect_out *oparams); diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h index 5ef7d54caac2..ee582bdb7fda 100644 --- a/include/linux/ramfs.h +++ b/include/linux/ramfs.h @@ -19,7 +19,6 @@ extern int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize); extern const struct file_operations ramfs_file_operations; extern const struct vm_operations_struct generic_file_vm_ops; -extern int __init init_ramfs_fs(void); int ramfs_fill_super(struct super_block *sb, void *data, int silent); diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h index e6337fce08f2..1fd61a9af45c 100644 --- a/include/linux/rbtree.h +++ b/include/linux/rbtree.h @@ -32,25 +32,9 @@ struct rb_root { struct rb_node *rb_node; }; -/* - * Leftmost-cached rbtrees. - * - * We do not cache the rightmost node based on footprint - * size vs number of potential users that could benefit - * from O(1) rb_last(). Just not worth it, users that want - * this feature can always implement the logic explicitly. - * Furthermore, users that want to cache both pointers may - * find it a bit asymmetric, but that's ok. - */ -struct rb_root_cached { - struct rb_root rb_root; - struct rb_node *rb_leftmost; -}; - #define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3)) #define RB_ROOT (struct rb_root) { NULL, } -#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL } #define rb_entry(ptr, type, member) container_of(ptr, type, member) #define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL) @@ -72,12 +56,6 @@ extern struct rb_node *rb_prev(const struct rb_node *); extern struct rb_node *rb_first(const struct rb_root *); extern struct rb_node *rb_last(const struct rb_root *); -extern void rb_insert_color_cached(struct rb_node *, - struct rb_root_cached *, bool); -extern void rb_erase_cached(struct rb_node *node, struct rb_root_cached *); -/* Same as rb_first(), but O(1) */ -#define rb_first_cached(root) (root)->rb_leftmost - /* Postorder iteration - always visit the parent after its children */ extern struct rb_node *rb_first_postorder(const struct rb_root *); extern struct rb_node *rb_next_postorder(const struct rb_node *); @@ -87,8 +65,6 @@ extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, struct rb_root *root); extern void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new, struct rb_root *root); -extern void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new, - struct rb_root_cached *root); static inline void rb_link_node(struct rb_node *node, struct rb_node *parent, struct rb_node **rb_link) @@ -136,4 +112,50 @@ static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent typeof(*pos), field); 1; }); \ pos = n) +/* + * Leftmost-cached rbtrees. + * + * We do not cache the rightmost node based on footprint + * size vs number of potential users that could benefit + * from O(1) rb_last(). Just not worth it, users that want + * this feature can always implement the logic explicitly. + * Furthermore, users that want to cache both pointers may + * find it a bit asymmetric, but that's ok. + */ +struct rb_root_cached { + struct rb_root rb_root; + struct rb_node *rb_leftmost; +}; + +#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL } + +/* Same as rb_first(), but O(1) */ +#define rb_first_cached(root) (root)->rb_leftmost + +static inline void rb_insert_color_cached(struct rb_node *node, + struct rb_root_cached *root, + bool leftmost) +{ + if (leftmost) + root->rb_leftmost = node; + rb_insert_color(node, &root->rb_root); +} + +static inline void rb_erase_cached(struct rb_node *node, + struct rb_root_cached *root) +{ + if (root->rb_leftmost == node) + root->rb_leftmost = rb_next(node); + rb_erase(node, &root->rb_root); +} + +static inline void rb_replace_node_cached(struct rb_node *victim, + struct rb_node *new, + struct rb_root_cached *root) +{ + if (root->rb_leftmost == victim) + root->rb_leftmost = new; + rb_replace_node(victim, new, &root->rb_root); +} + #endif /* _LINUX_RBTREE_H */ diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h index 0f902ccb48b0..179faab29f52 100644 --- a/include/linux/rbtree_augmented.h +++ b/include/linux/rbtree_augmented.h @@ -30,10 +30,9 @@ struct rb_augment_callbacks { void (*rotate)(struct rb_node *old, struct rb_node *new); }; -extern void __rb_insert_augmented(struct rb_node *node, - struct rb_root *root, - bool newleft, struct rb_node **leftmost, +extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root, void (*augment_rotate)(struct rb_node *old, struct rb_node *new)); + /* * Fixup the rbtree and update the augmented information when rebalancing. * @@ -48,7 +47,7 @@ static inline void rb_insert_augmented(struct rb_node *node, struct rb_root *root, const struct rb_augment_callbacks *augment) { - __rb_insert_augmented(node, root, false, NULL, augment->rotate); + __rb_insert_augmented(node, root, augment->rotate); } static inline void @@ -56,8 +55,9 @@ rb_insert_augmented_cached(struct rb_node *node, struct rb_root_cached *root, bool newleft, const struct rb_augment_callbacks *augment) { - __rb_insert_augmented(node, &root->rb_root, - newleft, &root->rb_leftmost, augment->rotate); + if (newleft) + root->rb_leftmost = node; + rb_insert_augmented(node, &root->rb_root, augment); } #define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, \ @@ -150,7 +150,6 @@ extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root, static __always_inline struct rb_node * __rb_erase_augmented(struct rb_node *node, struct rb_root *root, - struct rb_node **leftmost, const struct rb_augment_callbacks *augment) { struct rb_node *child = node->rb_right; @@ -158,9 +157,6 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root, struct rb_node *parent, *rebalance; unsigned long pc; - if (leftmost && node == *leftmost) - *leftmost = rb_next(node); - if (!tmp) { /* * Case 1: node to erase has no more than 1 child (easy!) @@ -260,8 +256,7 @@ static __always_inline void rb_erase_augmented(struct rb_node *node, struct rb_root *root, const struct rb_augment_callbacks *augment) { - struct rb_node *rebalance = __rb_erase_augmented(node, root, - NULL, augment); + struct rb_node *rebalance = __rb_erase_augmented(node, root, augment); if (rebalance) __rb_erase_color(rebalance, root, augment->rotate); } @@ -270,11 +265,9 @@ static __always_inline void rb_erase_augmented_cached(struct rb_node *node, struct rb_root_cached *root, const struct rb_augment_callbacks *augment) { - struct rb_node *rebalance = __rb_erase_augmented(node, &root->rb_root, - &root->rb_leftmost, - augment); - if (rebalance) - __rb_erase_color(rebalance, &root->rb_root, augment->rotate); + if (root->rb_leftmost == node) + root->rb_leftmost = rb_next(node); + rb_erase_augmented(node, &root->rb_root, augment); } #endif /* _LINUX_RBTREE_AUGMENTED_H */ diff --git a/include/linux/rcu_sync.h b/include/linux/rcu_sync.h index 6fc53a1345b3..9b83865d24f9 100644 --- a/include/linux/rcu_sync.h +++ b/include/linux/rcu_sync.h @@ -13,62 +13,44 @@ #include <linux/wait.h> #include <linux/rcupdate.h> -enum rcu_sync_type { RCU_SYNC, RCU_SCHED_SYNC, RCU_BH_SYNC }; - /* Structure to mediate between updaters and fastpath-using readers. */ struct rcu_sync { int gp_state; int gp_count; wait_queue_head_t gp_wait; - int cb_state; struct rcu_head cb_head; - - enum rcu_sync_type gp_type; }; -extern void rcu_sync_lockdep_assert(struct rcu_sync *); - /** * rcu_sync_is_idle() - Are readers permitted to use their fastpaths? * @rsp: Pointer to rcu_sync structure to use for synchronization * - * Returns true if readers are permitted to use their fastpaths. - * Must be invoked within an RCU read-side critical section whose - * flavor matches that of the rcu_sync struture. + * Returns true if readers are permitted to use their fastpaths. Must be + * invoked within some flavor of RCU read-side critical section. */ static inline bool rcu_sync_is_idle(struct rcu_sync *rsp) { -#ifdef CONFIG_PROVE_RCU - rcu_sync_lockdep_assert(rsp); -#endif - return !rsp->gp_state; /* GP_IDLE */ + RCU_LOCKDEP_WARN(!rcu_read_lock_held() && + !rcu_read_lock_bh_held() && + !rcu_read_lock_sched_held(), + "suspicious rcu_sync_is_idle() usage"); + return !READ_ONCE(rsp->gp_state); /* GP_IDLE */ } -extern void rcu_sync_init(struct rcu_sync *, enum rcu_sync_type); +extern void rcu_sync_init(struct rcu_sync *); extern void rcu_sync_enter_start(struct rcu_sync *); extern void rcu_sync_enter(struct rcu_sync *); extern void rcu_sync_exit(struct rcu_sync *); extern void rcu_sync_dtor(struct rcu_sync *); -#define __RCU_SYNC_INITIALIZER(name, type) { \ +#define __RCU_SYNC_INITIALIZER(name) { \ .gp_state = 0, \ .gp_count = 0, \ .gp_wait = __WAIT_QUEUE_HEAD_INITIALIZER(name.gp_wait), \ - .cb_state = 0, \ - .gp_type = type, \ } -#define __DEFINE_RCU_SYNC(name, type) \ - struct rcu_sync_struct name = __RCU_SYNC_INITIALIZER(name, type) - -#define DEFINE_RCU_SYNC(name) \ - __DEFINE_RCU_SYNC(name, RCU_SYNC) - -#define DEFINE_RCU_SCHED_SYNC(name) \ - __DEFINE_RCU_SYNC(name, RCU_SCHED_SYNC) - -#define DEFINE_RCU_BH_SYNC(name) \ - __DEFINE_RCU_SYNC(name, RCU_BH_SYNC) +#define DEFINE_RCU_SYNC(name) \ + struct rcu_sync name = __RCU_SYNC_INITIALIZER(name) #endif /* _LINUX_RCU_SYNC_H_ */ diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index b25d20822e75..8f7167478c1d 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -365,16 +365,15 @@ static inline void rcu_preempt_sleep_check(void) { } * other macros that it invokes. */ #define rcu_assign_pointer(p, v) \ -({ \ +do { \ uintptr_t _r_a_p__v = (uintptr_t)(v); \ - rcu_check_sparse(p, __rcu); \ + rcu_check_sparse(p, __rcu); \ \ if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \ WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \ else \ smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \ - _r_a_p__v; \ -}) +} while (0) /** * rcu_swap_protected() - swap an RCU and a regular pointer @@ -586,7 +585,7 @@ static inline void rcu_preempt_sleep_check(void) { } * read-side critical sections may be preempted and they may also block, but * only when acquiring spinlocks that are subject to priority inheritance. */ -static inline void rcu_read_lock(void) +static __always_inline void rcu_read_lock(void) { __rcu_read_lock(); __acquire(RCU); @@ -803,7 +802,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) /** * kfree_rcu() - kfree an object after a grace period. * @ptr: pointer to kfree - * @rcu_head: the name of the struct rcu_head within the type of @ptr. + * @rhf: the name of the struct rcu_head within the type of @ptr. * * Many rcu callbacks functions just call kfree() on the base structure. * These functions are trivial, but their size adds up, and furthermore @@ -826,9 +825,13 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) * The BUILD_BUG_ON check must not involve any function calls, hence the * checks are done in macros here. */ -#define kfree_rcu(ptr, rcu_head) \ - __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head)) - +#define kfree_rcu(ptr, rhf) \ +do { \ + typeof (ptr) ___p = (ptr); \ + \ + if (___p) \ + __kfree_rcu(&((___p)->rhf), offsetof(typeof(*(ptr)), rhf)); \ +} while (0) /* * Place this after a lock-acquisition primitive to guarantee that diff --git a/include/linux/regmap.h b/include/linux/regmap.h index d3dea823af8e..dfe493ac692d 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -22,6 +22,7 @@ struct module; struct clk; struct device; struct i2c_client; +struct i3c_device; struct irq_domain; struct slim_device; struct spi_device; @@ -109,7 +110,7 @@ struct reg_sequence { * @cond: Break condition (usually involving @val) * @sleep_us: Maximum time to sleep between reads in us (0 * tight-loops). Should be less than ~20ms since usleep_range - * is used (see Documentation/timers/timers-howto.txt). + * is used (see Documentation/timers/timers-howto.rst). * @timeout_us: Timeout in us, 0 means never timeout * * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_read @@ -151,7 +152,7 @@ struct reg_sequence { * @cond: Break condition (usually involving @val) * @sleep_us: Maximum time to sleep between reads in us (0 * tight-loops). Should be less than ~20ms since usleep_range - * is used (see Documentation/timers/timers-howto.txt). + * is used (see Documentation/timers/timers-howto.rst). * @timeout_us: Timeout in us, 0 means never timeout * * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_field_read @@ -621,6 +622,10 @@ struct regmap *__devm_regmap_init_slimbus(struct slim_device *slimbus, const struct regmap_config *config, struct lock_class_key *lock_key, const char *lock_name); +struct regmap *__devm_regmap_init_i3c(struct i3c_device *i3c, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name); /* * Wrapper for regmap_init macros to include a unique lockdep key and name * for each call. No-op if CONFIG_LOCKDEP is not set. @@ -979,6 +984,21 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg); #define devm_regmap_init_slimbus(slimbus, config) \ __regmap_lockdep_wrapper(__devm_regmap_init_slimbus, #config, \ slimbus, config) + +/** + * devm_regmap_init_i3c() - Initialise managed register map + * + * @i3c: Device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer + * to a struct regmap. The regmap will be automatically freed by the + * device management code. + */ +#define devm_regmap_init_i3c(i3c, config) \ + __regmap_lockdep_wrapper(__devm_regmap_init_i3c, #config, \ + i3c, config) + int regmap_mmio_attach_clk(struct regmap *map, struct clk *clk); void regmap_mmio_detach_clk(struct regmap *map); void regmap_exit(struct regmap *map); diff --git a/include/linux/regulator/coupler.h b/include/linux/regulator/coupler.h new file mode 100644 index 000000000000..0212d6255e4e --- /dev/null +++ b/include/linux/regulator/coupler.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * coupler.h -- SoC Regulator support, coupler API. + * + * Regulator Coupler Interface. + */ + +#ifndef __LINUX_REGULATOR_COUPLER_H_ +#define __LINUX_REGULATOR_COUPLER_H_ + +#include <linux/kernel.h> +#include <linux/suspend.h> + +struct regulator_coupler; +struct regulator_dev; + +/** + * struct regulator_coupler - customized regulator's coupler + * + * Regulator's coupler allows to customize coupling algorithm. + * + * @list: couplers list entry + * @attach_regulator: Callback invoked on creation of a coupled regulator, + * couples are unresolved at this point. The callee should + * check that it could handle the regulator and return 0 on + * success, -errno on failure and 1 if given regulator is + * not suitable for this coupler (case of having multiple + * regulators in a system). Callback shall be implemented. + * @detach_regulator: Callback invoked on destruction of a coupled regulator. + * This callback is optional and could be NULL. + * @balance_voltage: Callback invoked when voltage of a coupled regulator is + * changing. Called with all of the coupled rdev's being held + * under "consumer lock". The callee should perform voltage + * balancing, changing voltage of the coupled regulators as + * needed. It's up to the coupler to verify the voltage + * before changing it in hardware, i.e. coupler should + * check consumer's min/max and etc. This callback is + * optional and could be NULL, in which case a generic + * voltage balancer will be used. + */ +struct regulator_coupler { + struct list_head list; + + int (*attach_regulator)(struct regulator_coupler *coupler, + struct regulator_dev *rdev); + int (*detach_regulator)(struct regulator_coupler *coupler, + struct regulator_dev *rdev); + int (*balance_voltage)(struct regulator_coupler *coupler, + struct regulator_dev *rdev, + suspend_state_t state); +}; + +#ifdef CONFIG_REGULATOR +int regulator_coupler_register(struct regulator_coupler *coupler); +const char *rdev_get_name(struct regulator_dev *rdev); +int regulator_check_consumers(struct regulator_dev *rdev, + int *min_uV, int *max_uV, + suspend_state_t state); +int regulator_check_voltage(struct regulator_dev *rdev, + int *min_uV, int *max_uV); +int regulator_get_voltage_rdev(struct regulator_dev *rdev); +int regulator_set_voltage_rdev(struct regulator_dev *rdev, + int min_uV, int max_uV, + suspend_state_t state); +#else +static inline int regulator_coupler_register(struct regulator_coupler *coupler) +{ + return 0; +} +static inline const char *rdev_get_name(struct regulator_dev *rdev) +{ + return NULL; +} +static inline int regulator_check_consumers(struct regulator_dev *rdev, + int *min_uV, int *max_uV, + suspend_state_t state) +{ + return -EINVAL; +} +static inline int regulator_check_voltage(struct regulator_dev *rdev, + int *min_uV, int *max_uV) +{ + return -EINVAL; +} +static inline int regulator_get_voltage_rdev(struct regulator_dev *rdev) +{ + return -EINVAL; +} +static inline int regulator_set_voltage_rdev(struct regulator_dev *rdev, + int min_uV, int max_uV, + suspend_state_t state) +{ + return -EINVAL; +} +#endif + +#endif diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index d45ab52c91c9..9a911bb5fb61 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -12,8 +12,6 @@ #ifndef __LINUX_REGULATOR_DRIVER_H_ #define __LINUX_REGULATOR_DRIVER_H_ -#define MAX_COUPLED 2 - #include <linux/device.h> #include <linux/notifier.h> #include <linux/regulator/consumer.h> @@ -283,6 +281,11 @@ enum regulator_type { * @vsel_range_mask: Mask for register bitfield used for range selector * @vsel_reg: Register for selector when using regulator_regmap_X_voltage_ * @vsel_mask: Mask for register bitfield used for selector + * @vsel_step: Specify the resolution of selector stepping when setting + * voltage. If 0, then no stepping is done (requested selector is + * set directly), if >0 then the regulator API will ramp the + * voltage up/down gradually each time increasing/decreasing the + * selector by the specified step value. * @csel_reg: Register for current limit selector using regmap set_current_limit * @csel_mask: Mask for register bitfield used for current limit selector * @apply_reg: Register for initiate voltage change on the output when @@ -357,6 +360,7 @@ struct regulator_desc { unsigned int vsel_range_mask; unsigned int vsel_reg; unsigned int vsel_mask; + unsigned int vsel_step; unsigned int csel_reg; unsigned int csel_mask; unsigned int apply_reg; @@ -423,7 +427,8 @@ struct regulator_config { * incremented. */ struct coupling_desc { - struct regulator_dev *coupled_rdevs[MAX_COUPLED]; + struct regulator_dev **coupled_rdevs; + struct regulator_coupler *coupler; int n_resolved; int n_coupled; }; @@ -549,4 +554,5 @@ void regulator_unlock(struct regulator_dev *rdev); */ int regulator_desc_list_voltage_linear_range(const struct regulator_desc *desc, unsigned int selector); + #endif diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h index 5539efa76d26..a84cc8879c3e 100644 --- a/include/linux/regulator/machine.h +++ b/include/linux/regulator/machine.h @@ -153,7 +153,7 @@ struct regulation_constraints { int system_load; /* used for coupled regulators */ - int max_spread; + u32 *max_spread; /* used for changing voltage in steps */ int max_uV_step; diff --git a/include/linux/regulator/max8952.h b/include/linux/regulator/max8952.h index ebd99d2e62ad..8712c091abf0 100644 --- a/include/linux/regulator/max8952.h +++ b/include/linux/regulator/max8952.h @@ -105,9 +105,6 @@ enum { #define MAX8952_NUM_DVS_MODE 4 struct max8952_platform_data { - int gpio_vid0; - int gpio_vid1; - u32 default_mode; u32 dvs_mode[MAX8952_NUM_DVS_MODE]; /* MAX8952_DVS_MODEx_XXXXmV */ diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 04d04709f2bd..16ad66683ad0 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -100,7 +100,9 @@ struct fw_rsc_hdr { * the remote processor will be writing logs. * @RSC_VDEV: declare support for a virtio device, and serve as its * virtio header. - * @RSC_LAST: just keep this one at the end + * @RSC_LAST: just keep this one at the end of standard resources + * @RSC_VENDOR_START: start of the vendor specific resource types range + * @RSC_VENDOR_END: end of the vendor specific resource types range * * For more details regarding a specific resource type, please see its * dedicated structure below. @@ -111,11 +113,13 @@ struct fw_rsc_hdr { * please update it as needed. */ enum fw_resource_type { - RSC_CARVEOUT = 0, - RSC_DEVMEM = 1, - RSC_TRACE = 2, - RSC_VDEV = 3, - RSC_LAST = 4, + RSC_CARVEOUT = 0, + RSC_DEVMEM = 1, + RSC_TRACE = 2, + RSC_VDEV = 3, + RSC_LAST = 4, + RSC_VENDOR_START = 128, + RSC_VENDOR_END = 512, }; #define FW_RSC_ADDR_ANY (-1) @@ -340,12 +344,26 @@ struct rproc_mem_entry { struct firmware; /** + * enum rsc_handling_status - return status of rproc_ops handle_rsc hook + * @RSC_HANDLED: resource was handled + * @RSC_IGNORED: resource was ignored + */ +enum rsc_handling_status { + RSC_HANDLED = 0, + RSC_IGNORED = 1, +}; + +/** * struct rproc_ops - platform-specific device handlers * @start: power on the device and boot it * @stop: power off the device * @kick: kick a virtqueue (virtqueue id given as a parameter) * @da_to_va: optional platform hook to perform address translations * @parse_fw: parse firmware to extract information (e.g. resource table) + * @handle_rsc: optional platform hook to handle vendor resources. Should return + * RSC_HANDLED if resource was handled, RSC_IGNORED if not handled and a + * negative value on error + * @load_rsc_table: load resource table from firmware image * @find_loaded_rsc_table: find the loaded resouce table * @load: load firmware to memory, where the remote processor * expects to find it @@ -358,6 +376,8 @@ struct rproc_ops { void (*kick)(struct rproc *rproc, int vqid); void * (*da_to_va)(struct rproc *rproc, u64 da, int len); int (*parse_fw)(struct rproc *rproc, const struct firmware *fw); + int (*handle_rsc)(struct rproc *rproc, u32 rsc_type, void *rsc, + int offset, int avail); struct resource_table *(*find_loaded_rsc_table)( struct rproc *rproc, const struct firmware *fw); int (*load)(struct rproc *rproc, const struct firmware *fw); diff --git a/include/linux/reservation.h b/include/linux/reservation.h index ee750765cc94..644a22dbe53b 100644 --- a/include/linux/reservation.h +++ b/include/linux/reservation.h @@ -216,8 +216,12 @@ reservation_object_unlock(struct reservation_object *obj) { #ifdef CONFIG_DEBUG_MUTEXES /* Test shared fence slot reservation */ - if (obj->fence) - obj->fence->shared_max = obj->fence->shared_count; + if (rcu_access_pointer(obj->fence)) { + struct reservation_object_list *fence = + reservation_object_get_list(obj); + + fence->shared_max = fence->shared_count; + } #endif ww_mutex_unlock(&obj->lock); } diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 9f8bc06d4136..beb9a9da1699 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -352,37 +352,38 @@ static inline void rht_unlock(struct bucket_table *tbl, static inline struct rhash_head __rcu *__rht_ptr( struct rhash_lock_head *const *bkt) { - return (struct rhash_head __rcu *)((unsigned long)*bkt & ~BIT(0)); + return (struct rhash_head __rcu *) + ((unsigned long)*bkt & ~BIT(0) ?: + (unsigned long)RHT_NULLS_MARKER(bkt)); } /* * Where 'bkt' is a bucket and might be locked: - * rht_ptr() dereferences that pointer and clears the lock bit. + * rht_ptr_rcu() dereferences that pointer and clears the lock bit. + * rht_ptr() dereferences in a context where the bucket is locked. * rht_ptr_exclusive() dereferences in a context where exclusive * access is guaranteed, such as when destroying the table. */ +static inline struct rhash_head *rht_ptr_rcu( + struct rhash_lock_head *const *bkt) +{ + struct rhash_head __rcu *p = __rht_ptr(bkt); + + return rcu_dereference(p); +} + static inline struct rhash_head *rht_ptr( struct rhash_lock_head *const *bkt, struct bucket_table *tbl, unsigned int hash) { - struct rhash_head __rcu *p = __rht_ptr(bkt); - - if (!p) - return RHT_NULLS_MARKER(bkt); - - return rht_dereference_bucket_rcu(p, tbl, hash); + return rht_dereference_bucket(__rht_ptr(bkt), tbl, hash); } static inline struct rhash_head *rht_ptr_exclusive( struct rhash_lock_head *const *bkt) { - struct rhash_head __rcu *p = __rht_ptr(bkt); - - if (!p) - return RHT_NULLS_MARKER(bkt); - - return rcu_dereference_protected(p, 1); + return rcu_dereference_protected(__rht_ptr(bkt), 1); } static inline void rht_assign_locked(struct rhash_lock_head **bkt, @@ -509,7 +510,7 @@ static inline void rht_assign_unlock(struct bucket_table *tbl, */ #define rht_for_each_rcu(pos, tbl, hash) \ for (({barrier(); }), \ - pos = rht_ptr(rht_bucket(tbl, hash), tbl, hash); \ + pos = rht_ptr_rcu(rht_bucket(tbl, hash)); \ !rht_is_a_nulls(pos); \ pos = rcu_dereference_raw(pos->next)) @@ -546,8 +547,7 @@ static inline void rht_assign_unlock(struct bucket_table *tbl, */ #define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \ rht_for_each_entry_rcu_from(tpos, pos, \ - rht_ptr(rht_bucket(tbl, hash), \ - tbl, hash), \ + rht_ptr_rcu(rht_bucket(tbl, hash)), \ tbl, hash, member) /** @@ -603,7 +603,7 @@ restart: hash = rht_key_hashfn(ht, tbl, key, params); bkt = rht_bucket(tbl, hash); do { - rht_for_each_rcu_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) { + rht_for_each_rcu_from(he, rht_ptr_rcu(bkt), tbl, hash) { if (params.obj_cmpfn ? params.obj_cmpfn(&arg, rht_obj(ht, he)) : rhashtable_compare(&arg, rht_obj(ht, he))) diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index 2ea18a3def04..9d9c663987d8 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -34,12 +34,13 @@ */ struct rw_semaphore { atomic_long_t count; -#ifdef CONFIG_RWSEM_SPIN_ON_OWNER /* - * Write owner. Used as a speculative check to see - * if the owner is running on the cpu. + * Write owner or one of the read owners as well flags regarding + * the current state of the rwsem. Can be used as a speculative + * check to see if the write owner is running on the cpu. */ - struct task_struct *owner; + atomic_long_t owner; +#ifdef CONFIG_RWSEM_SPIN_ON_OWNER struct optimistic_spin_queue osq; /* spinner MCS lock */ #endif raw_spinlock_t wait_lock; @@ -50,10 +51,10 @@ struct rw_semaphore { }; /* - * Setting bit 1 of the owner field but not bit 0 will indicate + * Setting all bits of the owner field except bit 0 will indicate * that the rwsem is writer-owned with an unknown owner. */ -#define RWSEM_OWNER_UNKNOWN ((struct task_struct *)-2L) +#define RWSEM_OWNER_UNKNOWN (-2L) /* In all implementations count != 0 means locked */ static inline int rwsem_is_locked(struct rw_semaphore *sem) @@ -73,13 +74,14 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem) #endif #ifdef CONFIG_RWSEM_SPIN_ON_OWNER -#define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED, .owner = NULL +#define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED #else #define __RWSEM_OPT_INIT(lockname) #endif #define __RWSEM_INITIALIZER(name) \ { __RWSEM_INIT_COUNT(name), \ + .owner = ATOMIC_LONG_INIT(0), \ .wait_list = LIST_HEAD_INIT((name).wait_list), \ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \ __RWSEM_OPT_INIT(name) \ @@ -158,7 +160,7 @@ extern void downgrade_write(struct rw_semaphore *sem); * static then another method for expressing nested locking is * the explicit definition of lock class keys and the use of * lockdep_set_class() at lock initialization time. - * See Documentation/locking/lockdep-design.txt for more details.) + * See Documentation/locking/lockdep-design.rst for more details.) */ extern void down_read_nested(struct rw_semaphore *sem, int subclass); extern void down_write_nested(struct rw_semaphore *sem, int subclass); diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 30a9a55c28ba..6eec50fb36c8 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -266,10 +266,11 @@ int sg_split(struct scatterlist *in, const int in_mapped_nents, typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t); typedef void (sg_free_fn)(struct scatterlist *, unsigned int); -void __sg_free_table(struct sg_table *, unsigned int, bool, sg_free_fn *); +void __sg_free_table(struct sg_table *, unsigned int, unsigned int, + sg_free_fn *); void sg_free_table(struct sg_table *); int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, - struct scatterlist *, gfp_t, sg_alloc_fn *); + struct scatterlist *, unsigned int, gfp_t, sg_alloc_fn *); int sg_alloc_table(struct sg_table *, unsigned int, gfp_t); int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, unsigned int n_pages, unsigned int offset, @@ -331,9 +332,11 @@ size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents, #endif #ifdef CONFIG_SG_POOL -void sg_free_table_chained(struct sg_table *table, bool first_chunk); +void sg_free_table_chained(struct sg_table *table, + unsigned nents_first_chunk); int sg_alloc_table_chained(struct sg_table *table, int nents, - struct scatterlist *first_chunk); + struct scatterlist *first_chunk, + unsigned nents_first_chunk); #endif /* diff --git a/include/linux/sched.h b/include/linux/sched.h index 11837410690f..9f51932bd543 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -35,6 +35,7 @@ struct audit_context; struct backing_dev_info; struct bio_list; struct blk_plug; +struct capture_control; struct cfs_rq; struct fs_struct; struct futex_pi_state; @@ -47,8 +48,9 @@ struct pid_namespace; struct pipe_inode_info; struct rcu_node; struct reclaim_state; -struct capture_control; struct robust_list_head; +struct root_domain; +struct rq; struct sched_attr; struct sched_param; struct seq_file; @@ -281,6 +283,18 @@ struct vtime { u64 gtime; }; +/* + * Utilization clamp constraints. + * @UCLAMP_MIN: Minimum utilization + * @UCLAMP_MAX: Maximum utilization + * @UCLAMP_CNT: Utilization clamp constraints count + */ +enum uclamp_id { + UCLAMP_MIN = 0, + UCLAMP_MAX, + UCLAMP_CNT +}; + struct sched_info { #ifdef CONFIG_SCHED_INFO /* Cumulative counters: */ @@ -312,6 +326,10 @@ struct sched_info { # define SCHED_FIXEDPOINT_SHIFT 10 # define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT) +/* Increase resolution of cpu_capacity calculations */ +# define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT +# define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT) + struct load_weight { unsigned long weight; u32 inv_weight; @@ -560,12 +578,47 @@ struct sched_dl_entity { struct hrtimer inactive_timer; }; +#ifdef CONFIG_UCLAMP_TASK +/* Number of utilization clamp buckets (shorter alias) */ +#define UCLAMP_BUCKETS CONFIG_UCLAMP_BUCKETS_COUNT + +/* + * Utilization clamp for a scheduling entity + * @value: clamp value "assigned" to a se + * @bucket_id: bucket index corresponding to the "assigned" value + * @active: the se is currently refcounted in a rq's bucket + * @user_defined: the requested clamp value comes from user-space + * + * The bucket_id is the index of the clamp bucket matching the clamp value + * which is pre-computed and stored to avoid expensive integer divisions from + * the fast path. + * + * The active bit is set whenever a task has got an "effective" value assigned, + * which can be different from the clamp value "requested" from user-space. + * This allows to know a task is refcounted in the rq's bucket corresponding + * to the "effective" bucket_id. + * + * The user_defined bit is set whenever a task has got a task-specific clamp + * value requested from userspace, i.e. the system defaults apply to this task + * just as a restriction. This allows to relax default clamps when a less + * restrictive task-specific value has been requested, thus allowing to + * implement a "nice" semantic. For example, a task running with a 20% + * default boost can still drop its own boosting to 0%. + */ +struct uclamp_se { + unsigned int value : bits_per(SCHED_CAPACITY_SCALE); + unsigned int bucket_id : bits_per(UCLAMP_BUCKETS); + unsigned int active : 1; + unsigned int user_defined : 1; +}; +#endif /* CONFIG_UCLAMP_TASK */ + union rcu_special { struct { u8 blocked; u8 need_qs; u8 exp_hint; /* Hint for performance. */ - u8 pad; /* No garbage from compiler! */ + u8 deferred_qs; } b; /* Bits. */ u32 s; /* Set of bits. */ }; @@ -640,6 +693,13 @@ struct task_struct { #endif struct sched_dl_entity dl; +#ifdef CONFIG_UCLAMP_TASK + /* Clamp values requested for a scheduling entity */ + struct uclamp_se uclamp_req[UCLAMP_CNT]; + /* Effective clamp values used for a scheduling entity */ + struct uclamp_se uclamp[UCLAMP_CNT]; +#endif + #ifdef CONFIG_PREEMPT_NOTIFIERS /* List of struct preempt_notifier: */ struct hlist_head preempt_notifiers; @@ -651,7 +711,8 @@ struct task_struct { unsigned int policy; int nr_cpus_allowed; - cpumask_t cpus_allowed; + const cpumask_t *cpus_ptr; + cpumask_t cpus_mask; #ifdef CONFIG_PREEMPT_RCU int rcu_read_lock_nesting; @@ -831,6 +892,11 @@ struct task_struct { /* Effective (overridable) subjective task credentials (COW): */ const struct cred __rcu *cred; +#ifdef CONFIG_KEYS + /* Cached requested key. */ + struct key *cached_requested_key; +#endif + /* * executable name, excluding path. * @@ -1026,7 +1092,15 @@ struct task_struct { u64 last_sum_exec_runtime; struct callback_head numa_work; - struct numa_group *numa_group; + /* + * This pointer is only modified for current in syscall and + * pagefault context (and for tasks being destroyed), so it can be read + * from any of the following contexts: + * - RCU read-side critical section + * - current->numa_group from everywhere + * - task's runqueue locked, task not running + */ + struct numa_group __rcu *numa_group; /* * numa_faults is an array split into four regions: @@ -1399,7 +1473,7 @@ extern struct pid *cad_pid; #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ #define PF_MEMSTALL 0x01000000 /* Stalled due to lack of memory */ #define PF_UMH 0x02000000 /* I'm an Usermodehelper process */ -#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ +#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */ #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ #define PF_MEMALLOC_NOCMA 0x10000000 /* All allocation request will have _GFP_MOVABLE cleared */ #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ @@ -1518,10 +1592,6 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpuma } #endif -#ifndef cpu_relax_yield -#define cpu_relax_yield() cpu_relax() -#endif - extern int yield_to(struct task_struct *p, bool preempt); extern void set_user_nice(struct task_struct *p, long nice); extern int task_prio(const struct task_struct *p); @@ -1919,4 +1989,16 @@ static inline void rseq_syscall(struct pt_regs *regs) #endif +const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq); +char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len); +int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq); + +const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq); +const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq); +const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq); + +int sched_trace_rq_cpu(struct rq *rq); + +const struct cpumask *sched_trace_rd_span(struct root_domain *rd); + #endif diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h index b0fb1446fe04..6c8512d3be88 100644 --- a/include/linux/sched/isolation.h +++ b/include/linux/sched/isolation.h @@ -19,6 +19,7 @@ enum hk_flags { DECLARE_STATIC_KEY_FALSE(housekeeping_overridden); extern int housekeeping_any_cpu(enum hk_flags flags); extern const struct cpumask *housekeeping_cpumask(enum hk_flags flags); +extern bool housekeeping_enabled(enum hk_flags flags); extern void housekeeping_affine(struct task_struct *t, enum hk_flags flags); extern bool housekeeping_test_cpu(int cpu, enum hk_flags flags); extern void __init housekeeping_init(void); @@ -35,6 +36,11 @@ static inline const struct cpumask *housekeeping_cpumask(enum hk_flags flags) return cpu_possible_mask; } +static inline bool housekeeping_enabled(enum hk_flags flags) +{ + return false; +} + static inline void housekeeping_affine(struct task_struct *t, enum hk_flags flags) { } static inline void housekeeping_init(void) { } diff --git a/include/linux/sched/nohz.h b/include/linux/sched/nohz.h index b36f4cf38111..1abe91ff6e4a 100644 --- a/include/linux/sched/nohz.h +++ b/include/linux/sched/nohz.h @@ -7,14 +7,6 @@ */ #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) -extern void cpu_load_update_nohz_start(void); -extern void cpu_load_update_nohz_stop(void); -#else -static inline void cpu_load_update_nohz_start(void) { } -static inline void cpu_load_update_nohz_stop(void) { } -#endif - -#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) extern void nohz_balance_enter_idle(int cpu); extern int get_nohz_timer_target(void); #else diff --git a/include/linux/sched/numa_balancing.h b/include/linux/sched/numa_balancing.h index e7dd04a84ba8..3988762efe15 100644 --- a/include/linux/sched/numa_balancing.h +++ b/include/linux/sched/numa_balancing.h @@ -19,7 +19,7 @@ extern void task_numa_fault(int last_node, int node, int pages, int flags); extern pid_t task_numa_group_id(struct task_struct *p); extern void set_numabalancing_state(bool enabled); -extern void task_numa_free(struct task_struct *p); +extern void task_numa_free(struct task_struct *p, bool final); extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page, int src_nid, int dst_cpu); #else @@ -34,7 +34,7 @@ static inline pid_t task_numa_group_id(struct task_struct *p) static inline void set_numabalancing_state(bool enabled) { } -static inline void task_numa_free(struct task_struct *p) +static inline void task_numa_free(struct task_struct *p, bool final) { } static inline bool should_numa_migrate_memory(struct task_struct *p, diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index 38a0f0785323..efd8ce7675ed 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -15,10 +15,10 @@ */ struct sighand_struct { - refcount_t count; - struct k_sigaction action[_NSIG]; spinlock_t siglock; + refcount_t count; wait_queue_head_t signalfd_wqh; + struct k_sigaction action[_NSIG]; }; /* @@ -307,16 +307,19 @@ static inline void kernel_signal_stop(void) # define ___ARCH_SI_IA64(_a1, _a2, _a3) #endif -int force_sig_fault(int sig, int code, void __user *addr +int force_sig_fault_to_task(int sig, int code, void __user *addr ___ARCH_SI_TRAPNO(int trapno) ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) , struct task_struct *t); +int force_sig_fault(int sig, int code, void __user *addr + ___ARCH_SI_TRAPNO(int trapno) + ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)); int send_sig_fault(int sig, int code, void __user *addr ___ARCH_SI_TRAPNO(int trapno) ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) , struct task_struct *t); -int force_sig_mceerr(int code, void __user *, short, struct task_struct *); +int force_sig_mceerr(int code, void __user *, short); int send_sig_mceerr(int code, void __user *, short, struct task_struct *); int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper); @@ -325,17 +328,17 @@ int force_sig_pkuerr(void __user *addr, u32 pkey); int force_sig_ptrace_errno_trap(int errno, void __user *addr); extern int send_sig_info(int, struct kernel_siginfo *, struct task_struct *); -extern void force_sigsegv(int sig, struct task_struct *p); -extern int force_sig_info(int, struct kernel_siginfo *, struct task_struct *); +extern void force_sigsegv(int sig); +extern int force_sig_info(struct kernel_siginfo *); extern int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp); extern int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid); -extern int kill_pid_info_as_cred(int, struct kernel_siginfo *, struct pid *, +extern int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr, struct pid *, const struct cred *); extern int kill_pgrp(struct pid *pid, int sig, int priv); extern int kill_pid(struct pid *pid, int sig, int priv); extern __must_check bool do_notify_parent(struct task_struct *, int); extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); -extern void force_sig(int, struct task_struct *); +extern void force_sig(int); extern int send_sig(int, struct task_struct *, int); extern int zap_other_threads(struct task_struct *p); extern struct sigqueue *sigqueue_alloc(void); @@ -417,7 +420,6 @@ void task_join_group_stop(struct task_struct *task); static inline void set_restore_sigmask(void) { set_thread_flag(TIF_RESTORE_SIGMASK); - WARN_ON(!test_thread_flag(TIF_SIGPENDING)); } static inline void clear_tsk_restore_sigmask(struct task_struct *task) @@ -448,7 +450,6 @@ static inline bool test_and_clear_restore_sigmask(void) static inline void set_restore_sigmask(void) { current->restore_sigmask = true; - WARN_ON(!test_thread_flag(TIF_SIGPENDING)); } static inline void clear_tsk_restore_sigmask(struct task_struct *task) { @@ -481,6 +482,16 @@ static inline void restore_saved_sigmask(void) __set_current_blocked(¤t->saved_sigmask); } +extern int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize); + +static inline void restore_saved_sigmask_unless(bool interrupted) +{ + if (interrupted) + WARN_ON(!test_thread_flag(TIF_SIGPENDING)); + else + restore_saved_sigmask(); +} + static inline sigset_t *sigmask_to_save(void) { sigset_t *res = ¤t->blocked; diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index 99ce6d728df7..d4f6215ee03f 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -56,6 +56,11 @@ int sched_proc_update_handler(struct ctl_table *table, int write, extern unsigned int sysctl_sched_rt_period; extern int sysctl_sched_rt_runtime; +#ifdef CONFIG_UCLAMP_TASK +extern unsigned int sysctl_sched_uclamp_util_min; +extern unsigned int sysctl_sched_uclamp_util_max; +#endif + #ifdef CONFIG_CFS_BANDWIDTH extern unsigned int sysctl_sched_cfs_bandwidth_slice; #endif @@ -75,6 +80,12 @@ extern int sched_rt_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); +#ifdef CONFIG_UCLAMP_TASK +extern int sysctl_sched_uclamp_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); +#endif + extern int sysctl_numa_balancing(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index f1227f2c38a4..0497091e40c1 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -8,11 +8,26 @@ */ #include <linux/sched.h> +#include <linux/uaccess.h> struct task_struct; struct rusage; union thread_union; +/* All the bits taken by the old clone syscall. */ +#define CLONE_LEGACY_FLAGS 0xffffffffULL + +struct kernel_clone_args { + u64 flags; + int __user *pidfd; + int __user *child_tid; + int __user *parent_tid; + int exit_signal; + unsigned long stack; + unsigned long stack_size; + unsigned long tls; +}; + /* * This serializes "schedule()" and also protects * the run-queue from deletions/modifications (but @@ -73,7 +88,8 @@ extern void do_group_exit(int); extern void exit_files(struct task_struct *); extern void exit_itimers(struct signal_struct *); -extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long); +extern long _do_fork(struct kernel_clone_args *kargs); +extern bool legacy_clone_args_valid(const struct kernel_clone_args *kargs); extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); struct task_struct *fork_idle(int); struct mm_struct *copy_init_mm(void); diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index cfc0a89a7159..7863bb62d2ab 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h @@ -7,12 +7,6 @@ #include <linux/sched/idle.h> /* - * Increase resolution of cpu_capacity calculations - */ -#define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT -#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT) - -/* * sched-domains (multiprocessor balancing) declarations: */ #ifdef CONFIG_SMP @@ -84,11 +78,6 @@ struct sched_domain { unsigned int busy_factor; /* less balancing by factor if busy */ unsigned int imbalance_pct; /* No balance until over watermark */ unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ - unsigned int busy_idx; - unsigned int idle_idx; - unsigned int newidle_idx; - unsigned int wake_idx; - unsigned int forkexec_idx; int nohz_idle; /* NOHZ IDLE status */ int flags; /* See SD_* */ @@ -201,14 +190,6 @@ extern void set_sched_topology(struct sched_domain_topology_level *tl); # define SD_INIT_NAME(type) #endif -#ifndef arch_scale_cpu_capacity -static __always_inline -unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu) -{ - return SCHED_CAPACITY_SCALE; -} -#endif - #else /* CONFIG_SMP */ struct sched_domain_attr; @@ -224,16 +205,16 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu) return true; } +#endif /* !CONFIG_SMP */ + #ifndef arch_scale_cpu_capacity static __always_inline -unsigned long arch_scale_cpu_capacity(void __always_unused *sd, int cpu) +unsigned long arch_scale_cpu_capacity(int cpu) { return SCHED_CAPACITY_SCALE; } #endif -#endif /* !CONFIG_SMP */ - static inline int task_node(const struct task_struct *p) { return cpu_to_node(task_cpu(p)); diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h index 468d2565a9fe..917d88edb7b9 100644 --- a/include/linux/sched/user.h +++ b/include/linux/sched/user.h @@ -7,8 +7,6 @@ #include <linux/refcount.h> #include <linux/ratelimit.h> -struct key; - /* * Some day this will be a full-fledged user tracking system.. */ @@ -30,18 +28,6 @@ struct user_struct { unsigned long unix_inflight; /* How many files in flight in unix sockets */ atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */ -#ifdef CONFIG_KEYS - /* - * These pointers can only change from NULL to a non-NULL value once. - * Writes are protected by key_user_keyring_mutex. - * Unlocked readers should use READ_ONCE() unless they know that - * install_user_keyrings() has been called successfully (which sets - * these members to non-NULL values, preventing further modifications). - */ - struct key *uid_keyring; /* UID specific keyring */ - struct key *session_keyring; /* UID's default session keyring */ -#endif - /* Hash table maintenance information */ struct hlist_node uidhash_node; kuid_t uid; diff --git a/include/linux/sched/wake_q.h b/include/linux/sched/wake_q.h index ad826d2a4557..26a2013ac39c 100644 --- a/include/linux/sched/wake_q.h +++ b/include/linux/sched/wake_q.h @@ -51,6 +51,11 @@ static inline void wake_q_init(struct wake_q_head *head) head->lastp = &head->first; } +static inline bool wake_q_empty(struct wake_q_head *head) +{ + return head->first == WAKE_Q_TAIL; +} + extern void wake_q_add(struct wake_q_head *head, struct task_struct *task); extern void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task); extern void wake_up_q(struct wake_q_head *head); diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index 3105055c00a7..9ff2e9357e9a 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -144,6 +144,7 @@ struct scmi_power_ops { struct scmi_sensor_info { u32 id; u8 type; + s8 scale; char name[SCMI_MAX_STR_SIZE]; }; diff --git a/include/linux/security.h b/include/linux/security.h index 659071c2e57c..5f7441abbf42 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -189,9 +189,9 @@ static inline const char *kernel_load_data_id_str(enum kernel_load_data_id id) #ifdef CONFIG_SECURITY -int call_lsm_notifier(enum lsm_event event, void *data); -int register_lsm_notifier(struct notifier_block *nb); -int unregister_lsm_notifier(struct notifier_block *nb); +int call_blocking_lsm_notifier(enum lsm_event event, void *data); +int register_blocking_lsm_notifier(struct notifier_block *nb); +int unregister_blocking_lsm_notifier(struct notifier_block *nb); /* prototypes */ extern int security_init(void); @@ -394,17 +394,17 @@ int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen); int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen); #else /* CONFIG_SECURITY */ -static inline int call_lsm_notifier(enum lsm_event event, void *data) +static inline int call_blocking_lsm_notifier(enum lsm_event event, void *data) { return 0; } -static inline int register_lsm_notifier(struct notifier_block *nb) +static inline int register_blocking_lsm_notifier(struct notifier_block *nb) { return 0; } -static inline int unregister_lsm_notifier(struct notifier_block *nb) +static inline int unregister_blocking_lsm_notifier(struct notifier_block *nb) { return 0; } diff --git a/include/linux/sed-opal.h b/include/linux/sed-opal.h index 3e76b6d7d97f..53c28d750a45 100644 --- a/include/linux/sed-opal.h +++ b/include/linux/sed-opal.h @@ -39,6 +39,9 @@ static inline bool is_sed_ioctl(unsigned int cmd) case IOC_OPAL_ENABLE_DISABLE_MBR: case IOC_OPAL_ERASE_LR: case IOC_OPAL_SECURE_ERASE_LR: + case IOC_OPAL_PSID_REVERT_TPR: + case IOC_OPAL_MBR_DONE: + case IOC_OPAL_WRITE_SHADOW_MBR: return true; } return false; diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index a121982af0f5..5998e1f4ff06 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -127,6 +127,7 @@ void seq_put_hex_ll(struct seq_file *m, const char *delimiter, unsigned long long v, unsigned int width); void seq_escape(struct seq_file *m, const char *s, const char *esc); +void seq_escape_mem_ascii(struct seq_file *m, const char *src, size_t isz); void seq_hex_dump(struct seq_file *m, const char *prefix_str, int prefix_type, int rowsize, int groupsize, const void *buf, size_t len, diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h index 5e0b59422a68..bb2bc99388ca 100644 --- a/include/linux/serial_8250.h +++ b/include/linux/serial_8250.h @@ -110,6 +110,7 @@ struct uart_8250_port { * if no_console_suspend */ unsigned char probe; + struct mctrl_gpios *gpios; #define UART_PROBE_RSA (1 << 0) /* diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 05b179015d6c..2b78cc734719 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -32,7 +32,7 @@ struct device; /* * This structure describes all the operations that can be done on the - * physical hardware. See Documentation/serial/driver.rst for details. + * physical hardware. See Documentation/driver-api/serial/driver.rst for details. */ struct uart_ops { unsigned int (*tx_empty)(struct uart_port *); diff --git a/include/linux/sfp.h b/include/linux/sfp.h index d9d9de3fcf8e..1c35428e98bc 100644 --- a/include/linux/sfp.h +++ b/include/linux/sfp.h @@ -464,11 +464,14 @@ enum { struct fwnode_handle; struct ethtool_eeprom; struct ethtool_modinfo; -struct net_device; struct sfp_bus; /** * struct sfp_upstream_ops - upstream operations structure + * @attach: called when the sfp socket driver is bound to the upstream + * (mandatory). + * @detach: called when the sfp socket driver is unbound from the upstream + * (mandatory). * @module_insert: called after a module has been detected to determine * whether the module is supported for the upstream device. * @module_remove: called after the module has been removed. @@ -481,6 +484,8 @@ struct sfp_bus; * been removed. */ struct sfp_upstream_ops { + void (*attach)(void *priv, struct sfp_bus *bus); + void (*detach)(void *priv, struct sfp_bus *bus); int (*module_insert)(void *priv, const struct sfp_eeprom_id *id); void (*module_remove)(void *priv); void (*link_down)(void *priv); @@ -504,7 +509,7 @@ int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee, void sfp_upstream_start(struct sfp_bus *bus); void sfp_upstream_stop(struct sfp_bus *bus); struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode, - struct net_device *ndev, void *upstream, + void *upstream, const struct sfp_upstream_ops *ops); void sfp_unregister_upstream(struct sfp_bus *bus); #else @@ -549,8 +554,7 @@ static inline void sfp_upstream_stop(struct sfp_bus *bus) } static inline struct sfp_bus *sfp_register_upstream( - struct fwnode_handle *fwnode, - struct net_device *ndev, void *upstream, + struct fwnode_handle *fwnode, void *upstream, const struct sfp_upstream_ops *ops) { return (struct sfp_bus *)-1; diff --git a/include/linux/signal.h b/include/linux/signal.h index 78c2bb376954..b5d99482d3fe 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -273,10 +273,6 @@ extern int group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p, enum pid_type type); extern int __group_send_sig_info(int, struct kernel_siginfo *, struct task_struct *); extern int sigprocmask(int, sigset_t *, sigset_t *); -extern int set_user_sigmask(const sigset_t __user *usigmask, sigset_t *set, - sigset_t *oldset, size_t sigsetsize); -extern void restore_user_sigmask(const void __user *usigmask, - sigset_t *sigsaved, bool interrupted); extern void set_current_blocked(sigset_t *); extern void __set_current_blocked(const sigset_t *); extern int show_unhandled_signals; diff --git a/include/linux/siox.h b/include/linux/siox.h index a860cb8c1f9d..da7225bf1877 100644 --- a/include/linux/siox.h +++ b/include/linux/siox.h @@ -72,3 +72,13 @@ static inline void siox_driver_unregister(struct siox_driver *sdriver) { return driver_unregister(&sdriver->driver); } + +/* + * module_siox_driver() - Helper macro for drivers that don't do + * anything special in module init/exit. This eliminates a lot of + * boilerplate. Each module may only use this macro once, and + * calling it replaces module_init() and module_exit() + */ +#define module_siox_driver(__siox_driver) \ + module_driver(__siox_driver, siox_driver_register, \ + siox_driver_unregister) diff --git a/include/linux/sizes.h b/include/linux/sizes.h index 1cbb4c4d016e..9874f6f67537 100644 --- a/include/linux/sizes.h +++ b/include/linux/sizes.h @@ -44,5 +44,6 @@ #define SZ_2G 0x80000000 #define SZ_4G _AC(0x100000000, ULL) +#define SZ_64T _AC(0x400000000000, ULL) #endif /* __LINUX_SIZES_H__ */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 056f557d5194..d8af86d995d6 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1024,6 +1024,7 @@ static inline bool skb_unref(struct sk_buff *skb) void skb_release_head_state(struct sk_buff *skb); void kfree_skb(struct sk_buff *skb); void kfree_skb_list(struct sk_buff *segs); +void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt); void skb_tx_error(struct sk_buff *skb); void consume_skb(struct sk_buff *skb); void __consume_stateless_skb(struct sk_buff *skb); @@ -1059,6 +1060,7 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len, int max_page_order, int *errcode, gfp_t gfp_mask); +struct sk_buff *alloc_skb_for_msg(struct sk_buff *first); /* Layout of fast clones : [skb1][skb2][fclone_ref] */ struct sk_buff_fclones { @@ -1319,6 +1321,20 @@ skb_flow_dissect_flow_keys_basic(const struct net *net, data, proto, nhoff, hlen, flags); } +void skb_flow_dissect_meta(const struct sk_buff *skb, + struct flow_dissector *flow_dissector, + void *target_container); + +/* Gets a skb connection tracking info, ctinfo map should be a + * a map of mapsize to translate enum ip_conntrack_info states + * to user states. + */ +void +skb_flow_dissect_ct(const struct sk_buff *skb, + struct flow_dissector *flow_dissector, + void *target_container, + u16 *ctinfo_map, + size_t mapsize); void skb_flow_dissect_tunnel_info(const struct sk_buff *skb, struct flow_dissector *flow_dissector, @@ -3441,6 +3457,10 @@ int skb_ensure_writable(struct sk_buff *skb, int write_len); int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci); int skb_vlan_pop(struct sk_buff *skb); int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci); +int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto); +int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto); +int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse); +int skb_mpls_dec_ttl(struct sk_buff *skb); struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy, gfp_t gfp); @@ -3914,18 +3934,16 @@ static inline bool __skb_checksum_convert_check(struct sk_buff *skb) return (skb->ip_summed == CHECKSUM_NONE && skb->csum_valid); } -static inline void __skb_checksum_convert(struct sk_buff *skb, - __sum16 check, __wsum pseudo) +static inline void __skb_checksum_convert(struct sk_buff *skb, __wsum pseudo) { skb->csum = ~pseudo; skb->ip_summed = CHECKSUM_COMPLETE; } -#define skb_checksum_try_convert(skb, proto, check, compute_pseudo) \ +#define skb_checksum_try_convert(skb, proto, compute_pseudo) \ do { \ if (__skb_checksum_convert_check(skb)) \ - __skb_checksum_convert(skb, check, \ - compute_pseudo(skb, proto)); \ + __skb_checksum_convert(skb, compute_pseudo(skb, proto)); \ } while (0) static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr, diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index 50ced8aba9db..e4b3fb4bb77c 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -354,7 +354,13 @@ static inline void sk_psock_restore_proto(struct sock *sk, sk->sk_write_space = psock->saved_write_space; if (psock->sk_proto) { - sk->sk_prot = psock->sk_proto; + struct inet_connection_sock *icsk = inet_csk(sk); + bool has_ulp = !!icsk->icsk_ulp_data; + + if (has_ulp) + tcp_update_ulp(sk, psock->sk_proto); + else + sk->sk_prot = psock->sk_proto; psock->sk_proto = NULL; } } diff --git a/include/linux/slab.h b/include/linux/slab.h index 9449b19c5f10..56c9c7eed34e 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -16,6 +16,7 @@ #include <linux/overflow.h> #include <linux/types.h> #include <linux/workqueue.h> +#include <linux/percpu-refcount.h> /* @@ -115,6 +116,10 @@ /* Objects are reclaimable */ #define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U) #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ + +/* Slab deactivation flag */ +#define SLAB_DEACTIVATED ((slab_flags_t __force)0x10000000U) + /* * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. * @@ -151,8 +156,7 @@ void kmem_cache_destroy(struct kmem_cache *); int kmem_cache_shrink(struct kmem_cache *); void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *); -void memcg_deactivate_kmem_caches(struct mem_cgroup *); -void memcg_destroy_kmem_caches(struct mem_cgroup *); +void memcg_deactivate_kmem_caches(struct mem_cgroup *, struct mem_cgroup *); /* * Please use this macro to create slab caches. Simply specify the @@ -184,6 +188,7 @@ void * __must_check __krealloc(const void *, size_t, gfp_t); void * __must_check krealloc(const void *, size_t, gfp_t); void kfree(const void *); void kzfree(const void *); +size_t __ksize(const void *); size_t ksize(const void *); #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR @@ -641,11 +646,12 @@ struct memcg_cache_params { struct mem_cgroup *memcg; struct list_head children_node; struct list_head kmem_caches_node; + struct percpu_ref refcnt; - void (*deact_fn)(struct kmem_cache *); + void (*work_fn)(struct kmem_cache *); union { - struct rcu_head deact_rcu_head; - struct work_struct deact_work; + struct rcu_head rcu_head; + struct work_struct work; }; }; }; diff --git a/include/linux/smp.h b/include/linux/smp.h index a56f08ff3097..6fc856c9eda5 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -35,7 +35,7 @@ int smp_call_function_single(int cpuid, smp_call_func_t func, void *info, /* * Call a function on all processors */ -int on_each_cpu(smp_call_func_t func, void *info, int wait); +void on_each_cpu(smp_call_func_t func, void *info, int wait); /* * Call a function on processors specified by mask, which might include @@ -101,7 +101,7 @@ extern void smp_cpus_done(unsigned int max_cpus); /* * Call a function on all other processors */ -int smp_call_function(smp_call_func_t func, void *info, int wait); +void smp_call_function(smp_call_func_t func, void *info, int wait); void smp_call_function_many(const struct cpumask *mask, smp_call_func_t func, void *info, bool wait); @@ -144,9 +144,8 @@ static inline void smp_send_stop(void) { } * These macros fold the SMP functionality into a single CPU system */ #define raw_smp_processor_id() 0 -static inline int up_smp_call_function(smp_call_func_t func, void *info) +static inline void up_smp_call_function(smp_call_func_t func, void *info) { - return 0; } #define smp_call_function(func, info, wait) \ (up_smp_call_function(func, info)) @@ -181,29 +180,46 @@ static inline int get_boot_cpu_id(void) #endif /* !SMP */ -/* - * smp_processor_id(): get the current CPU ID. +/** + * raw_processor_id() - get the current (unstable) CPU id + * + * For then you know what you are doing and need an unstable + * CPU id. + */ + +/** + * smp_processor_id() - get the current (stable) CPU id + * + * This is the normal accessor to the CPU id and should be used + * whenever possible. * - * if DEBUG_PREEMPT is enabled then we check whether it is - * used in a preemption-safe way. (smp_processor_id() is safe - * if it's used in a preemption-off critical section, or in - * a thread that is bound to the current CPU.) + * The CPU id is stable when: * - * NOTE: raw_smp_processor_id() is for internal use only - * (smp_processor_id() is the preferred variant), but in rare - * instances it might also be used to turn off false positives - * (i.e. smp_processor_id() use that the debugging code reports but - * which use for some reason is legal). Don't use this to hack around - * the warning message, as your code might not work under PREEMPT. + * - IRQs are disabled; + * - preemption is disabled; + * - the task is CPU affine. + * + * When CONFIG_DEBUG_PREEMPT; we verify these assumption and WARN + * when smp_processor_id() is used when the CPU id is not stable. + */ + +/* + * Allow the architecture to differentiate between a stable and unstable read. + * For example, x86 uses an IRQ-safe asm-volatile read for the unstable but a + * regular asm read for the stable. */ +#ifndef __smp_processor_id +#define __smp_processor_id(x) raw_smp_processor_id(x) +#endif + #ifdef CONFIG_DEBUG_PREEMPT extern unsigned int debug_smp_processor_id(void); # define smp_processor_id() debug_smp_processor_id() #else -# define smp_processor_id() raw_smp_processor_id() +# define smp_processor_id() __smp_processor_id() #endif -#define get_cpu() ({ preempt_disable(); smp_processor_id(); }) +#define get_cpu() ({ preempt_disable(); __smp_processor_id(); }) #define put_cpu() preempt_enable() /* diff --git a/include/linux/soc/qcom/mdt_loader.h b/include/linux/soc/qcom/mdt_loader.h index 944b06aefb0f..e600baec6825 100644 --- a/include/linux/soc/qcom/mdt_loader.h +++ b/include/linux/soc/qcom/mdt_loader.h @@ -21,4 +21,6 @@ int qcom_mdt_load_no_init(struct device *dev, const struct firmware *fw, const char *fw_name, int pas_id, void *mem_region, phys_addr_t mem_phys, size_t mem_size, phys_addr_t *reloc_base); +void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len); + #endif diff --git a/include/linux/soc/ti/ti_sci_protocol.h b/include/linux/soc/ti/ti_sci_protocol.h index 568722a041bf..6c610e188a44 100644 --- a/include/linux/soc/ti/ti_sci_protocol.h +++ b/include/linux/soc/ti/ti_sci_protocol.h @@ -166,29 +166,29 @@ struct ti_sci_dev_ops { * managed by driver for that purpose. */ struct ti_sci_clk_ops { - int (*get_clock)(const struct ti_sci_handle *handle, u32 did, u8 cid, + int (*get_clock)(const struct ti_sci_handle *handle, u32 did, u32 cid, bool needs_ssc, bool can_change_freq, bool enable_input_term); - int (*idle_clock)(const struct ti_sci_handle *handle, u32 did, u8 cid); - int (*put_clock)(const struct ti_sci_handle *handle, u32 did, u8 cid); - int (*is_auto)(const struct ti_sci_handle *handle, u32 did, u8 cid, + int (*idle_clock)(const struct ti_sci_handle *handle, u32 did, u32 cid); + int (*put_clock)(const struct ti_sci_handle *handle, u32 did, u32 cid); + int (*is_auto)(const struct ti_sci_handle *handle, u32 did, u32 cid, bool *req_state); - int (*is_on)(const struct ti_sci_handle *handle, u32 did, u8 cid, + int (*is_on)(const struct ti_sci_handle *handle, u32 did, u32 cid, bool *req_state, bool *current_state); - int (*is_off)(const struct ti_sci_handle *handle, u32 did, u8 cid, + int (*is_off)(const struct ti_sci_handle *handle, u32 did, u32 cid, bool *req_state, bool *current_state); - int (*set_parent)(const struct ti_sci_handle *handle, u32 did, u8 cid, - u8 parent_id); - int (*get_parent)(const struct ti_sci_handle *handle, u32 did, u8 cid, - u8 *parent_id); + int (*set_parent)(const struct ti_sci_handle *handle, u32 did, u32 cid, + u32 parent_id); + int (*get_parent)(const struct ti_sci_handle *handle, u32 did, u32 cid, + u32 *parent_id); int (*get_num_parents)(const struct ti_sci_handle *handle, u32 did, - u8 cid, u8 *num_parents); + u32 cid, u32 *num_parents); int (*get_best_match_freq)(const struct ti_sci_handle *handle, u32 did, - u8 cid, u64 min_freq, u64 target_freq, + u32 cid, u64 min_freq, u64 target_freq, u64 max_freq, u64 *match_freq); - int (*set_freq)(const struct ti_sci_handle *handle, u32 did, u8 cid, + int (*set_freq)(const struct ti_sci_handle *handle, u32 did, u32 cid, u64 min_freq, u64 target_freq, u64 max_freq); - int (*get_freq)(const struct ti_sci_handle *handle, u32 did, u8 cid, + int (*get_freq)(const struct ti_sci_handle *handle, u32 did, u32 cid, u64 *current_freq); }; @@ -241,12 +241,254 @@ struct ti_sci_rm_irq_ops { u16 global_event, u8 vint_status_bit); }; +/* RA config.addr_lo parameter is valid for RM ring configure TI_SCI message */ +#define TI_SCI_MSG_VALUE_RM_RING_ADDR_LO_VALID BIT(0) +/* RA config.addr_hi parameter is valid for RM ring configure TI_SCI message */ +#define TI_SCI_MSG_VALUE_RM_RING_ADDR_HI_VALID BIT(1) + /* RA config.count parameter is valid for RM ring configure TI_SCI message */ +#define TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID BIT(2) +/* RA config.mode parameter is valid for RM ring configure TI_SCI message */ +#define TI_SCI_MSG_VALUE_RM_RING_MODE_VALID BIT(3) +/* RA config.size parameter is valid for RM ring configure TI_SCI message */ +#define TI_SCI_MSG_VALUE_RM_RING_SIZE_VALID BIT(4) +/* RA config.order_id parameter is valid for RM ring configure TISCI message */ +#define TI_SCI_MSG_VALUE_RM_RING_ORDER_ID_VALID BIT(5) + +#define TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER \ + (TI_SCI_MSG_VALUE_RM_RING_ADDR_LO_VALID | \ + TI_SCI_MSG_VALUE_RM_RING_ADDR_HI_VALID | \ + TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID | \ + TI_SCI_MSG_VALUE_RM_RING_MODE_VALID | \ + TI_SCI_MSG_VALUE_RM_RING_SIZE_VALID) + +/** + * struct ti_sci_rm_ringacc_ops - Ring Accelerator Management operations + * @config: configure the SoC Navigator Subsystem Ring Accelerator ring + * @get_config: get the SoC Navigator Subsystem Ring Accelerator ring + * configuration + */ +struct ti_sci_rm_ringacc_ops { + int (*config)(const struct ti_sci_handle *handle, + u32 valid_params, u16 nav_id, u16 index, + u32 addr_lo, u32 addr_hi, u32 count, u8 mode, + u8 size, u8 order_id + ); + int (*get_config)(const struct ti_sci_handle *handle, + u32 nav_id, u32 index, u8 *mode, + u32 *addr_lo, u32 *addr_hi, u32 *count, + u8 *size, u8 *order_id); +}; + +/** + * struct ti_sci_rm_psil_ops - PSI-L thread operations + * @pair: pair PSI-L source thread to a destination thread. + * If the src_thread is mapped to UDMA tchan, the corresponding channel's + * TCHAN_THRD_ID register is updated. + * If the dst_thread is mapped to UDMA rchan, the corresponding channel's + * RCHAN_THRD_ID register is updated. + * @unpair: unpair PSI-L source thread from a destination thread. + * If the src_thread is mapped to UDMA tchan, the corresponding channel's + * TCHAN_THRD_ID register is cleared. + * If the dst_thread is mapped to UDMA rchan, the corresponding channel's + * RCHAN_THRD_ID register is cleared. + */ +struct ti_sci_rm_psil_ops { + int (*pair)(const struct ti_sci_handle *handle, u32 nav_id, + u32 src_thread, u32 dst_thread); + int (*unpair)(const struct ti_sci_handle *handle, u32 nav_id, + u32 src_thread, u32 dst_thread); +}; + +/* UDMAP channel types */ +#define TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR 2 +#define TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR_SB 3 /* RX only */ +#define TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR 10 +#define TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBVR 11 +#define TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR 12 +#define TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBVR 13 + +#define TI_SCI_RM_UDMAP_RX_FLOW_DESC_HOST 0 +#define TI_SCI_RM_UDMAP_RX_FLOW_DESC_MONO 2 + +#define TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES 1 +#define TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES 2 +#define TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES 3 + +/* UDMAP TX/RX channel valid_params common declarations */ +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID BIT(0) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID BIT(1) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID BIT(2) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID BIT(3) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID BIT(4) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_PRIORITY_VALID BIT(5) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_QOS_VALID BIT(6) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_ORDER_ID_VALID BIT(7) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_SCHED_PRIORITY_VALID BIT(8) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID BIT(14) + +/** + * Configures a Navigator Subsystem UDMAP transmit channel + * + * Configures a Navigator Subsystem UDMAP transmit channel registers. + * See @ti_sci_msg_rm_udmap_tx_ch_cfg_req + */ +struct ti_sci_msg_rm_udmap_tx_ch_cfg { + u32 valid_params; +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID BIT(9) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID BIT(10) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID BIT(11) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_CREDIT_COUNT_VALID BIT(12) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FDEPTH_VALID BIT(13) + u16 nav_id; + u16 index; + u8 tx_pause_on_err; + u8 tx_filt_einfo; + u8 tx_filt_pswords; + u8 tx_atype; + u8 tx_chan_type; + u8 tx_supr_tdpkt; + u16 tx_fetch_size; + u8 tx_credit_count; + u16 txcq_qnum; + u8 tx_priority; + u8 tx_qos; + u8 tx_orderid; + u16 fdepth; + u8 tx_sched_priority; + u8 tx_burst_size; +}; + +/** + * Configures a Navigator Subsystem UDMAP receive channel + * + * Configures a Navigator Subsystem UDMAP receive channel registers. + * See @ti_sci_msg_rm_udmap_rx_ch_cfg_req + */ +struct ti_sci_msg_rm_udmap_rx_ch_cfg { + u32 valid_params; +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID BIT(9) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID BIT(10) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID BIT(11) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID BIT(12) + u16 nav_id; + u16 index; + u16 rx_fetch_size; + u16 rxcq_qnum; + u8 rx_priority; + u8 rx_qos; + u8 rx_orderid; + u8 rx_sched_priority; + u16 flowid_start; + u16 flowid_cnt; + u8 rx_pause_on_err; + u8 rx_atype; + u8 rx_chan_type; + u8 rx_ignore_short; + u8 rx_ignore_long; + u8 rx_burst_size; +}; + +/** + * Configures a Navigator Subsystem UDMAP receive flow + * + * Configures a Navigator Subsystem UDMAP receive flow's registers. + * See @tis_ci_msg_rm_udmap_flow_cfg_req + */ +struct ti_sci_msg_rm_udmap_flow_cfg { + u32 valid_params; +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID BIT(0) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID BIT(1) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID BIT(2) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID BIT(3) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SOP_OFFSET_VALID BIT(4) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID BIT(5) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_VALID BIT(6) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_VALID BIT(7) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_VALID BIT(8) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_VALID BIT(9) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID BIT(10) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID BIT(11) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID BIT(12) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID BIT(13) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID BIT(14) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID BIT(15) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID BIT(16) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID BIT(17) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PS_LOCATION_VALID BIT(18) + u16 nav_id; + u16 flow_index; + u8 rx_einfo_present; + u8 rx_psinfo_present; + u8 rx_error_handling; + u8 rx_desc_type; + u16 rx_sop_offset; + u16 rx_dest_qnum; + u8 rx_src_tag_hi; + u8 rx_src_tag_lo; + u8 rx_dest_tag_hi; + u8 rx_dest_tag_lo; + u8 rx_src_tag_hi_sel; + u8 rx_src_tag_lo_sel; + u8 rx_dest_tag_hi_sel; + u8 rx_dest_tag_lo_sel; + u16 rx_fdq0_sz0_qnum; + u16 rx_fdq1_qnum; + u16 rx_fdq2_qnum; + u16 rx_fdq3_qnum; + u8 rx_ps_location; +}; + +/** + * struct ti_sci_rm_udmap_ops - UDMA Management operations + * @tx_ch_cfg: configure SoC Navigator Subsystem UDMA transmit channel. + * @rx_ch_cfg: configure SoC Navigator Subsystem UDMA receive channel. + * @rx_flow_cfg1: configure SoC Navigator Subsystem UDMA receive flow. + */ +struct ti_sci_rm_udmap_ops { + int (*tx_ch_cfg)(const struct ti_sci_handle *handle, + const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params); + int (*rx_ch_cfg)(const struct ti_sci_handle *handle, + const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params); + int (*rx_flow_cfg)(const struct ti_sci_handle *handle, + const struct ti_sci_msg_rm_udmap_flow_cfg *params); +}; + +/** + * struct ti_sci_proc_ops - Processor Control operations + * @request: Request to control a physical processor. The requesting host + * should be in the processor access list + * @release: Relinquish a physical processor control + * @handover: Handover a physical processor control to another host + * in the permitted list + * @set_config: Set base configuration of a processor + * @set_control: Setup limited control flags in specific cases + * @get_status: Get the state of physical processor + * + * NOTE: The following paramteres are generic in nature for all these ops, + * -handle: Pointer to TI SCI handle as retrieved by *ti_sci_get_handle + * -pid: Processor ID + * -hid: Host ID + */ +struct ti_sci_proc_ops { + int (*request)(const struct ti_sci_handle *handle, u8 pid); + int (*release)(const struct ti_sci_handle *handle, u8 pid); + int (*handover)(const struct ti_sci_handle *handle, u8 pid, u8 hid); + int (*set_config)(const struct ti_sci_handle *handle, u8 pid, + u64 boot_vector, u32 cfg_set, u32 cfg_clr); + int (*set_control)(const struct ti_sci_handle *handle, u8 pid, + u32 ctrl_set, u32 ctrl_clr); + int (*get_status)(const struct ti_sci_handle *handle, u8 pid, + u64 *boot_vector, u32 *cfg_flags, u32 *ctrl_flags, + u32 *status_flags); +}; + /** * struct ti_sci_ops - Function support for TI SCI * @dev_ops: Device specific operations * @clk_ops: Clock specific operations * @rm_core_ops: Resource management core operations. * @rm_irq_ops: IRQ management specific operations + * @proc_ops: Processor Control specific operations */ struct ti_sci_ops { struct ti_sci_core_ops core_ops; @@ -254,6 +496,10 @@ struct ti_sci_ops { struct ti_sci_clk_ops clk_ops; struct ti_sci_rm_core_ops rm_core_ops; struct ti_sci_rm_irq_ops rm_irq_ops; + struct ti_sci_rm_ringacc_ops rm_ring_ops; + struct ti_sci_rm_psil_ops rm_psil_ops; + struct ti_sci_rm_udmap_ops rm_udmap_ops; + struct ti_sci_proc_ops proc_ops; }; /** diff --git a/include/linux/socket.h b/include/linux/socket.h index b57cd8bf96e2..97523818cb14 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -12,6 +12,7 @@ struct pid; struct cred; +struct socket; #define __sockaddr_check_size(size) \ BUILD_BUG_ON(((size) > sizeof(struct __kernel_sockaddr_storage))) @@ -374,6 +375,12 @@ extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, unsigned int flags, bool forbid_cmsg_compat); +extern long __sys_sendmsg_sock(struct socket *sock, + struct user_msghdr __user *msg, + unsigned int flags); +extern long __sys_recvmsg_sock(struct socket *sock, + struct user_msghdr __user *msg, + unsigned int flags); /* helpers which do the actual work for syscalls */ extern int __sys_recvfrom(int fd, void __user *ubuf, size_t size, diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h index 35662d9c2c62..bea46bd8b6ce 100644 --- a/include/linux/soundwire/sdw.h +++ b/include/linux/soundwire/sdw.h @@ -41,6 +41,31 @@ struct sdw_slave; #define SDW_DAI_ID_RANGE_START 100 #define SDW_DAI_ID_RANGE_END 200 +enum { + SDW_PORT_DIRN_SINK = 0, + SDW_PORT_DIRN_SOURCE, + SDW_PORT_DIRN_MAX, +}; + +/* + * constants for flow control, ports and transport + * + * these are bit masks as devices can have multiple capabilities + */ + +/* + * flow modes for SDW port. These can be isochronous, tx controlled, + * rx controlled or async + */ +#define SDW_PORT_FLOW_MODE_ISOCH 0 +#define SDW_PORT_FLOW_MODE_TX_CNTRL BIT(0) +#define SDW_PORT_FLOW_MODE_RX_CNTRL BIT(1) +#define SDW_PORT_FLOW_MODE_ASYNC GENMASK(1, 0) + +/* sample packaging for block. It can be per port or per channel */ +#define SDW_BLOCK_PACKG_PER_PORT BIT(0) +#define SDW_BLOCK_PACKG_PER_CH BIT(1) + /** * enum sdw_slave_status - Slave status * @SDW_SLAVE_UNATTACHED: Slave is not attached with the bus. @@ -76,6 +101,14 @@ enum sdw_command_response { SDW_CMD_FAIL_OTHER = 4, }; +/* block group count enum */ +enum sdw_dpn_grouping { + SDW_BLK_GRP_CNT_1 = 0, + SDW_BLK_GRP_CNT_2 = 1, + SDW_BLK_GRP_CNT_3 = 2, + SDW_BLK_GRP_CNT_4 = 3, +}; + /** * enum sdw_stream_type: data stream type * @@ -100,6 +133,26 @@ enum sdw_data_direction { SDW_DATA_DIR_TX = 1, }; +/** + * enum sdw_port_data_mode: Data Port mode + * + * @SDW_PORT_DATA_MODE_NORMAL: Normal data mode where audio data is received + * and transmitted. + * @SDW_PORT_DATA_MODE_STATIC_1: Simple test mode which uses static value of + * logic 1. The encoding will result in signal transitions at every bitslot + * owned by this Port + * @SDW_PORT_DATA_MODE_STATIC_0: Simple test mode which uses static value of + * logic 0. The encoding will result in no signal transitions + * @SDW_PORT_DATA_MODE_PRBS: Test mode which uses a PRBS generator to produce + * a pseudo random data pattern that is transferred + */ +enum sdw_port_data_mode { + SDW_PORT_DATA_MODE_NORMAL = 0, + SDW_PORT_DATA_MODE_STATIC_1 = 1, + SDW_PORT_DATA_MODE_STATIC_0 = 2, + SDW_PORT_DATA_MODE_PRBS = 3, +}; + /* * SDW properties, defined in MIPI DisCo spec v1.0 */ @@ -153,10 +206,11 @@ enum sdw_clk_stop_mode { * (inclusive) * @num_words: number of wordlengths supported * @words: wordlengths supported - * @flow_controlled: Slave implementation results in an OK_NotReady + * @BRA_flow_controlled: Slave implementation results in an OK_NotReady * response * @simple_ch_prep_sm: If channel prepare sequence is required - * @device_interrupts: If implementation-defined interrupts are supported + * @imp_def_interrupts: If set, each bit corresponds to support for + * implementation-defined interrupts * * The wordlengths are specified by Spec as max, min AND number of * discrete values, implementation can define based on the wordlengths they @@ -167,9 +221,9 @@ struct sdw_dp0_prop { u32 min_word; u32 num_words; u32 *words; - bool flow_controlled; + bool BRA_flow_controlled; bool simple_ch_prep_sm; - bool device_interrupts; + bool imp_def_interrupts; }; /** @@ -219,7 +273,7 @@ struct sdw_dpn_audio_mode { * @simple_ch_prep_sm: If the port supports simplified channel prepare state * machine * @ch_prep_timeout: Port-specific timeout value, in milliseconds - * @device_interrupts: If set, each bit corresponds to support for + * @imp_def_interrupts: If set, each bit corresponds to support for * implementation-defined interrupts * @max_ch: Maximum channels supported * @min_ch: Minimum channels supported @@ -244,7 +298,7 @@ struct sdw_dpn_prop { u32 max_grouping; bool simple_ch_prep_sm; u32 ch_prep_timeout; - u32 device_interrupts; + u32 imp_def_interrupts; u32 max_ch; u32 min_ch; u32 num_ch; @@ -311,36 +365,32 @@ struct sdw_slave_prop { /** * struct sdw_master_prop - Master properties * @revision: MIPI spec version of the implementation - * @master_count: Number of masters - * @clk_stop_mode: Bitmap for Clock Stop modes supported - * @max_freq: Maximum Bus clock frequency, in Hz + * @clk_stop_modes: Bitmap, bit N set when clock-stop-modeN supported + * @max_clk_freq: Maximum Bus clock frequency, in Hz * @num_clk_gears: Number of clock gears supported * @clk_gears: Clock gears supported - * @num_freq: Number of clock frequencies supported, in Hz - * @freq: Clock frequencies supported, in Hz + * @num_clk_freq: Number of clock frequencies supported, in Hz + * @clk_freq: Clock frequencies supported, in Hz * @default_frame_rate: Controller default Frame rate, in Hz * @default_row: Number of rows * @default_col: Number of columns - * @dynamic_frame: Dynamic frame supported + * @dynamic_frame: Dynamic frame shape supported * @err_threshold: Number of times that software may retry sending a single * command - * @dpn_prop: Data Port N properties */ struct sdw_master_prop { u32 revision; - u32 master_count; - enum sdw_clk_stop_mode clk_stop_mode; - u32 max_freq; + u32 clk_stop_modes; + u32 max_clk_freq; u32 num_clk_gears; u32 *clk_gears; - u32 num_freq; - u32 *freq; + u32 num_clk_freq; + u32 *clk_freq; u32 default_frame_rate; u32 default_row; u32 default_col; bool dynamic_frame; u32 err_threshold; - struct sdw_dpn_prop *dpn_prop; }; int sdw_master_read_prop(struct sdw_bus *bus); diff --git a/include/linux/soundwire/sdw_type.h b/include/linux/soundwire/sdw_type.h index 9c756b5a0dfe..aaa7f4267c14 100644 --- a/include/linux/soundwire/sdw_type.h +++ b/include/linux/soundwire/sdw_type.h @@ -16,4 +16,15 @@ void sdw_unregister_driver(struct sdw_driver *drv); int sdw_slave_modalias(const struct sdw_slave *slave, char *buf, size_t size); +/** + * module_sdw_driver() - Helper macro for registering a Soundwire driver + * @__sdw_driver: soundwire slave driver struct + * + * Helper macro for Soundwire drivers which do not do anything special in + * module init/exit. This eliminates a lot of boilerplate. Each module may only + * use this macro once, and calling it replaces module_init() and module_exit() + */ +#define module_sdw_driver(__sdw_driver) \ + module_driver(__sdw_driver, sdw_register_driver, \ + sdw_unregister_driver) #endif /* __SOUNDWIRE_TYPES_H */ diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 053abd22ad31..af4f265d0f67 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -109,6 +109,7 @@ void spi_statistics_add_transfer_stats(struct spi_statistics *stats, * This may be changed by the device's driver, or left at the * default (0) indicating protocol words are eight bit bytes. * The spi_transfer.bits_per_word can override this for each transfer. + * @rt: Make the pump thread real time priority. * @irq: Negative, or the number passed to request_irq() to receive * interrupts from this device. * @controller_state: Controller's runtime state @@ -143,6 +144,7 @@ struct spi_device { u32 max_speed_hz; u8 chip_select; u8 bits_per_word; + bool rt; u32 mode; #define SPI_CPHA 0x01 /* clock phase */ #define SPI_CPOL 0x02 /* clock polarity */ @@ -735,6 +737,9 @@ extern void spi_res_release(struct spi_controller *ctlr, * @bits_per_word: select a bits_per_word other than the device default * for this transfer. If 0 the default (from @spi_device) is used. * @cs_change: affects chipselect after this transfer completes + * @cs_change_delay: delay between cs deassert and assert when + * @cs_change is set and @spi_transfer is not the last in @spi_message + * @cs_change_delay_unit: unit of cs_change_delay * @delay_usecs: microseconds to delay after this transfer before * (optionally) changing the chipselect status, then starting * the next transfer or completing this @spi_message. @@ -742,6 +747,9 @@ extern void spi_res_release(struct spi_controller *ctlr, * (set by bits_per_word) transmission. * @word_delay: clock cycles to inter word delay after each word size * (set by bits_per_word) transmission. + * @effective_speed_hz: the effective SCK-speed that was used to + * transfer this transfer. Set to 0 if the spi bus driver does + * not support it. * @transfer_list: transfers are sequenced through @spi_message.transfers * @tx_sg: Scatterlist for transmit, currently not for client use * @rx_sg: Scatterlist for receive, currently not for client use @@ -824,9 +832,16 @@ struct spi_transfer { u8 bits_per_word; u8 word_delay_usecs; u16 delay_usecs; + u16 cs_change_delay; + u8 cs_change_delay_unit; +#define SPI_DELAY_UNIT_USECS 0 +#define SPI_DELAY_UNIT_NSECS 1 +#define SPI_DELAY_UNIT_SCK 2 u32 speed_hz; u16 word_delay; + u32 effective_speed_hz; + struct list_head transfer_list; }; @@ -967,6 +982,8 @@ static inline void spi_message_free(struct spi_message *m) kfree(m); } +extern void spi_set_cs_timing(struct spi_device *spi, u8 setup, u8 hold, u8 inactive_dly); + extern int spi_setup(struct spi_device *spi); extern int spi_async(struct spi_device *spi, struct spi_message *message); extern int spi_async_locked(struct spi_device *spi, @@ -997,6 +1014,26 @@ spi_max_transfer_size(struct spi_device *spi) return min(tr_max, msg_max); } +/** + * spi_is_bpw_supported - Check if bits per word is supported + * @spi: SPI device + * @bpw: Bits per word + * + * This function checks to see if the SPI controller supports @bpw. + * + * Returns: + * True if @bpw is supported, false otherwise. + */ +static inline bool spi_is_bpw_supported(struct spi_device *spi, u32 bpw) +{ + u32 bpw_mask = spi->master->bits_per_word_mask; + + if (bpw == 8 || (bpw <= 32 && bpw_mask & SPI_BPW_MASK(bpw))) + return true; + + return false; +} + /*---------------------------------------------------------------------------*/ /* SPI transfer replacement methods which make use of spi_res */ diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h index 7f7c8c050f63..9cfcc8a756ae 100644 --- a/include/linux/srcutree.h +++ b/include/linux/srcutree.h @@ -120,9 +120,17 @@ struct srcu_struct { * * See include/linux/percpu-defs.h for the rules on per-CPU variables. */ -#define __DEFINE_SRCU(name, is_static) \ - static DEFINE_PER_CPU(struct srcu_data, name##_srcu_data);\ - is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name##_srcu_data) +#ifdef MODULE +# define __DEFINE_SRCU(name, is_static) \ + is_static struct srcu_struct name; \ + struct srcu_struct * const __srcu_struct_##name \ + __section("___srcu_struct_ptrs") = &name +#else +# define __DEFINE_SRCU(name, is_static) \ + static DEFINE_PER_CPU(struct srcu_data, name##_srcu_data); \ + is_static struct srcu_struct name = \ + __SRCU_STRUCT_INIT(name, name##_srcu_data) +#endif #define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */) #define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static) diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 1a0bb622cf10..7d06241582dd 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -78,14 +78,9 @@ /* Platfrom data for platform device structure's platform_data field */ struct stmmac_mdio_bus_data { - int (*phy_reset)(void *priv); unsigned int phy_mask; int *irqs; int probed_phy_irq; -#ifdef CONFIG_OF - int reset_gpio, active_low; - u32 delays[3]; -#endif }; struct stmmac_dma_cfg { @@ -137,6 +132,7 @@ struct plat_stmmacenet_data { int interface; struct stmmac_mdio_bus_data *mdio_bus_data; struct device_node *phy_node; + struct device_node *phylink_node; struct device_node *mdio_node; struct stmmac_dma_cfg *dma_cfg; int clk_csr; diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h index 6d3635c86dbe..f9a0c6189852 100644 --- a/include/linux/stop_machine.h +++ b/include/linux/stop_machine.h @@ -36,6 +36,7 @@ int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg); int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg); void stop_machine_park(int cpu); void stop_machine_unpark(int cpu); +void stop_machine_yield(const struct cpumask *cpumask); #else /* CONFIG_SMP */ diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h index d23c5030901a..c28955132234 100644 --- a/include/linux/string_helpers.h +++ b/include/linux/string_helpers.h @@ -54,6 +54,9 @@ static inline int string_unescape_any_inplace(char *buf) int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz, unsigned int flags, const char *only); +int string_escape_mem_ascii(const char *src, size_t isz, char *dst, + size_t osz); + static inline int string_escape_mem_any_np(const char *src, size_t isz, char *dst, size_t osz, const char *only) { diff --git a/include/linux/sudmac.h b/include/linux/sudmac.h deleted file mode 100644 index cccc0a665d26..000000000000 --- a/include/linux/sudmac.h +++ /dev/null @@ -1,49 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Header for the SUDMAC driver - * - * Copyright (C) 2013 Renesas Solutions Corp. - */ -#ifndef SUDMAC_H -#define SUDMAC_H - -#include <linux/dmaengine.h> -#include <linux/shdma-base.h> -#include <linux/types.h> - -/* Used by slave DMA clients to request DMA to/from a specific peripheral */ -struct sudmac_slave { - struct shdma_slave shdma_slave; /* Set by the platform */ -}; - -/* - * Supplied by platforms to specify, how a DMA channel has to be configured for - * a certain peripheral - */ -struct sudmac_slave_config { - int slave_id; -}; - -struct sudmac_channel { - unsigned long offset; - unsigned long config; - unsigned long wait; /* The configuable range is 0 to 3 */ - unsigned long dint_end_bit; -}; - -struct sudmac_pdata { - const struct sudmac_slave_config *slave; - int slave_num; - const struct sudmac_channel *channel; - int channel_num; -}; - -/* Definitions for the sudmac_channel.config */ -#define SUDMAC_TX_BUFFER_MODE BIT(0) -#define SUDMAC_RX_END_MODE BIT(1) - -/* Definitions for the sudmac_channel.dint_end_bit */ -#define SUDMAC_DMA_BIT_CH0 BIT(0) -#define SUDMAC_DMA_BIT_CH1 BIT(1) - -#endif diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h index d4229a78524a..87d27e13d885 100644 --- a/include/linux/sunrpc/bc_xprt.h +++ b/include/linux/sunrpc/bc_xprt.h @@ -43,6 +43,7 @@ void xprt_destroy_backchannel(struct rpc_xprt *, unsigned int max_reqs); int xprt_setup_bc(struct rpc_xprt *xprt, unsigned int min_reqs); void xprt_destroy_bc(struct rpc_xprt *xprt, unsigned int max_reqs); void xprt_free_bc_rqst(struct rpc_rqst *req); +unsigned int xprt_bc_max_slots(struct rpc_xprt *xprt); /* * Determine if a shared backchannel is in use diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 6e8073140a5d..abc63bd1be2b 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -124,6 +124,7 @@ struct rpc_create_args { u32 prognumber; /* overrides program->number */ u32 version; rpc_authflavor_t authflavor; + u32 nconnect; unsigned long flags; char *client_name; struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */ @@ -163,6 +164,8 @@ void rpc_shutdown_client(struct rpc_clnt *); void rpc_release_client(struct rpc_clnt *); void rpc_task_release_transport(struct rpc_task *); void rpc_task_release_client(struct rpc_task *); +struct rpc_xprt *rpc_task_get_xprt(struct rpc_clnt *clnt, + struct rpc_xprt *xprt); int rpcb_create_local(struct net *); void rpcb_put_local(struct net *); @@ -191,6 +194,7 @@ void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int); struct net * rpc_net_ns(struct rpc_clnt *); size_t rpc_max_payload(struct rpc_clnt *); size_t rpc_max_bc_payload(struct rpc_clnt *); +unsigned int rpc_num_bc_slots(struct rpc_clnt *); void rpc_force_rebind(struct rpc_clnt *); size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t); const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t); diff --git a/include/linux/sunrpc/metrics.h b/include/linux/sunrpc/metrics.h index 1b3751327575..0ee3f7052846 100644 --- a/include/linux/sunrpc/metrics.h +++ b/include/linux/sunrpc/metrics.h @@ -30,7 +30,7 @@ #include <linux/ktime.h> #include <linux/spinlock.h> -#define RPC_IOSTATS_VERS "1.0" +#define RPC_IOSTATS_VERS "1.1" struct rpc_iostats { spinlock_t om_lock; @@ -66,6 +66,11 @@ struct rpc_iostats { ktime_t om_queue, /* queued for xmit */ om_rtt, /* RPC RTT */ om_execute; /* RPC execution */ + /* + * The count of operations that complete with tk_status < 0. + * These statuses usually indicate error conditions. + */ + unsigned long om_error_status; } ____cacheline_aligned; struct rpc_task; diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index d0e451868f02..baa3ecdb882f 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -126,6 +126,7 @@ struct rpc_task_setup { #define RPC_CALL_MAJORSEEN 0x0020 /* major timeout seen */ #define RPC_TASK_ROOTCREDS 0x0040 /* force root creds */ #define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */ +#define RPC_TASK_NO_ROUND_ROBIN 0x0100 /* send requests on "main" xprt */ #define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */ #define RPC_TASK_SOFTCONN 0x0400 /* Fail if can't connect */ #define RPC_TASK_SENT 0x0800 /* message was sent */ @@ -183,8 +184,9 @@ struct rpc_task_setup { #define RPC_NR_PRIORITY (1 + RPC_PRIORITY_PRIVILEGED - RPC_PRIORITY_LOW) struct rpc_timer { - struct timer_list timer; struct list_head list; + unsigned long expires; + struct delayed_work dwork; }; /* diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 9ee3970ba59c..8a87d8bcb197 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -164,6 +164,13 @@ xdr_decode_opaque_fixed(__be32 *p, void *ptr, unsigned int len) return p + XDR_QUADLEN(len); } +static inline void xdr_netobj_dup(struct xdr_netobj *dst, + struct xdr_netobj *src, gfp_t gfp_mask) +{ + dst->data = kmemdup(src->data, src->len, gfp_mask); + dst->len = src->len; +} + /* * Adjust kvec to reflect end of xdr'ed data (RPC client XDR) */ diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index a6d9fce7f20e..13e108bcc9eb 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -158,6 +158,7 @@ struct rpc_xprt_ops { int (*bc_setup)(struct rpc_xprt *xprt, unsigned int min_reqs); size_t (*bc_maxpayload)(struct rpc_xprt *xprt); + unsigned int (*bc_num_slots)(struct rpc_xprt *xprt); void (*bc_free_rqst)(struct rpc_rqst *rqst); void (*bc_destroy)(struct rpc_xprt *xprt, unsigned int max_reqs); @@ -238,6 +239,7 @@ struct rpc_xprt { /* * Send stuff */ + atomic_long_t queuelen; spinlock_t transport_lock; /* lock transport info */ spinlock_t reserve_lock; /* lock slot table */ spinlock_t queue_lock; /* send/receive queue lock */ @@ -250,8 +252,9 @@ struct rpc_xprt { #if defined(CONFIG_SUNRPC_BACKCHANNEL) struct svc_serv *bc_serv; /* The RPC service which will */ /* process the callback */ - int bc_alloc_count; /* Total number of preallocs */ - atomic_t bc_free_slots; + unsigned int bc_alloc_max; + unsigned int bc_alloc_count; /* Total number of preallocs */ + atomic_t bc_slot_count; /* Number of allocated slots */ spinlock_t bc_pa_lock; /* Protects the preallocated * items */ struct list_head bc_pa_list; /* List of preallocated @@ -334,6 +337,9 @@ struct xprt_class { */ struct rpc_xprt *xprt_create_transport(struct xprt_create *args); void xprt_connect(struct rpc_task *task); +unsigned long xprt_reconnect_delay(const struct rpc_xprt *xprt); +void xprt_reconnect_backoff(struct rpc_xprt *xprt, + unsigned long init_to); void xprt_reserve(struct rpc_task *task); void xprt_retry_reserve(struct rpc_task *task); int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task); diff --git a/include/linux/sunrpc/xprtmultipath.h b/include/linux/sunrpc/xprtmultipath.h index af1257c030d2..c6cce3fbf29d 100644 --- a/include/linux/sunrpc/xprtmultipath.h +++ b/include/linux/sunrpc/xprtmultipath.h @@ -15,6 +15,8 @@ struct rpc_xprt_switch { struct kref xps_kref; unsigned int xps_nxprts; + unsigned int xps_nactive; + atomic_long_t xps_queuelen; struct list_head xps_xprt_list; struct net * xps_net; diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h index b81d0b3e0799..7638dbe7bc50 100644 --- a/include/linux/sunrpc/xprtsock.h +++ b/include/linux/sunrpc/xprtsock.h @@ -56,6 +56,7 @@ struct sock_xprt { */ unsigned long sock_state; struct delayed_work connect_worker; + struct work_struct error_worker; struct work_struct recv_worker; struct mutex recv_mutex; struct sockaddr_storage srcaddr; @@ -84,6 +85,10 @@ struct sock_xprt { #define XPRT_SOCK_CONNECTING 1U #define XPRT_SOCK_DATA_READY (2) #define XPRT_SOCK_UPD_TIMEOUT (3) +#define XPRT_SOCK_WAKE_ERROR (4) +#define XPRT_SOCK_WAKE_WRITE (5) +#define XPRT_SOCK_WAKE_PENDING (6) +#define XPRT_SOCK_WAKE_DISCONNECT (7) #endif /* __KERNEL__ */ diff --git a/include/linux/suspend.h b/include/linux/suspend.h index f0d262ad7b78..9c0ad1a3a727 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -304,7 +304,7 @@ static inline bool idle_should_enter_s2idle(void) return unlikely(s2idle_state == S2IDLE_STATE_ENTER); } -extern bool pm_suspend_via_s2idle(void); +extern bool pm_suspend_default_s2idle(void); extern void __init pm_states_init(void); extern void s2idle_set_ops(const struct platform_s2idle_ops *ops); extern void s2idle_wake(void); @@ -336,7 +336,7 @@ static inline void pm_set_suspend_via_firmware(void) {} static inline void pm_set_resume_via_firmware(void) {} static inline bool pm_suspend_via_firmware(void) { return false; } static inline bool pm_resume_via_firmware(void) { return false; } -static inline bool pm_suspend_via_s2idle(void) { return false; } +static inline bool pm_suspend_default_s2idle(void) { return false; } static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {} static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; } @@ -448,6 +448,7 @@ extern bool system_entering_hibernation(void); extern bool hibernation_available(void); asmlinkage int swsusp_save(void); extern struct pbe *restore_pblist; +int pfn_is_nosave(unsigned long pfn); #else /* CONFIG_HIBERNATION */ static inline void register_nosave_region(unsigned long b, unsigned long e) {} static inline void register_nosave_region_late(unsigned long b, unsigned long e) {} diff --git a/include/linux/swap.h b/include/linux/swap.h index 4bfb5c4ac108..de2c67a33b7e 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -148,7 +148,7 @@ struct zone; * We always assume that blocks are of size PAGE_SIZE. */ struct swap_extent { - struct list_head list; + struct rb_node rb_node; pgoff_t start_page; pgoff_t nr_pages; sector_t start_block; @@ -175,8 +175,9 @@ enum { SWP_PAGE_DISCARD = (1 << 10), /* freed swap page-cluster discards */ SWP_STABLE_WRITES = (1 << 11), /* no overwrite PG_writeback pages */ SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */ + SWP_VALID = (1 << 13), /* swap is valid to be operated on? */ /* add others here before... */ - SWP_SCANNING = (1 << 13), /* refcount in scan_swap_map */ + SWP_SCANNING = (1 << 14), /* refcount in scan_swap_map */ }; #define SWAP_CLUSTER_MAX 32UL @@ -247,8 +248,7 @@ struct swap_info_struct { unsigned int cluster_next; /* likely index for next allocation */ unsigned int cluster_nr; /* countdown to next cluster search */ struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */ - struct swap_extent *curr_swap_extent; - struct swap_extent first_swap_extent; + struct rb_root swap_extent_root;/* root of the swap extent rbtree */ struct block_device *bdev; /* swap device or bdev of swap file */ struct file *swap_file; /* seldom referenced */ unsigned int old_block_size; /* seldom referenced */ @@ -460,7 +460,7 @@ extern unsigned int count_swap_pages(int, int); extern sector_t map_swap_page(struct page *, struct block_device **); extern sector_t swapdev_block(int, pgoff_t); extern int page_swapcount(struct page *); -extern int __swap_count(struct swap_info_struct *si, swp_entry_t entry); +extern int __swap_count(swp_entry_t entry); extern int __swp_swapcount(swp_entry_t entry); extern int swp_swapcount(swp_entry_t entry); extern struct swap_info_struct *page_swap_info(struct page *); @@ -470,6 +470,12 @@ extern int try_to_free_swap(struct page *); struct backing_dev_info; extern int init_swap_address_space(unsigned int type, unsigned long nr_pages); extern void exit_swap_address_space(unsigned int type); +extern struct swap_info_struct *get_swap_device(swp_entry_t entry); + +static inline void put_swap_device(struct swap_info_struct *si) +{ + rcu_read_unlock(); +} #else /* CONFIG_SWAP */ @@ -576,7 +582,7 @@ static inline int page_swapcount(struct page *page) return 0; } -static inline int __swap_count(struct swap_info_struct *si, swp_entry_t entry) +static inline int __swap_count(swp_entry_t entry) { return 0; } diff --git a/include/linux/swapops.h b/include/linux/swapops.h index 4d961668e5fc..877fd239b6ff 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h @@ -6,6 +6,8 @@ #include <linux/bug.h> #include <linux/mm_types.h> +#ifdef CONFIG_MMU + /* * swapcache pages are stored in the swapper_space radix tree. We want to * get good packing density in that tree, so the index should be dense in @@ -50,13 +52,11 @@ static inline pgoff_t swp_offset(swp_entry_t entry) return entry.val & SWP_OFFSET_MASK; } -#ifdef CONFIG_MMU /* check whether a pte points to a swap entry */ static inline int is_swap_pte(pte_t pte) { return !pte_none(pte) && !pte_present(pte); } -#endif /* * Convert the arch-dependent pte representation of a swp_entry_t into an @@ -129,12 +129,6 @@ static inline struct page *device_private_entry_to_page(swp_entry_t entry) { return pfn_to_page(swp_offset(entry)); } - -vm_fault_t device_private_entry_fault(struct vm_area_struct *vma, - unsigned long addr, - swp_entry_t entry, - unsigned int flags, - pmd_t *pmdp); #else /* CONFIG_DEVICE_PRIVATE */ static inline swp_entry_t make_device_private_entry(struct page *page, bool write) { @@ -164,15 +158,6 @@ static inline struct page *device_private_entry_to_page(swp_entry_t entry) { return NULL; } - -static inline vm_fault_t device_private_entry_fault(struct vm_area_struct *vma, - unsigned long addr, - swp_entry_t entry, - unsigned int flags, - pmd_t *pmdp) -{ - return VM_FAULT_SIGBUS; -} #endif /* CONFIG_DEVICE_PRIVATE */ #ifdef CONFIG_MIGRATION @@ -375,4 +360,5 @@ static inline int non_swap_entry(swp_entry_t entry) } #endif +#endif /* CONFIG_MMU */ #endif /* _LINUX_SWAPOPS_H */ diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 2bcef4c70183..88145da7d140 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -68,6 +68,7 @@ struct sigaltstack; struct rseq; union bpf_attr; struct io_uring_params; +struct clone_args; #include <linux/types.h> #include <linux/aio_abi.h> @@ -264,7 +265,7 @@ static inline void addr_limit_user_check(void) if (CHECK_DATA_CORRUPTION(!segment_eq(get_fs(), USER_DS), "Invalid address limit on user-mode return")) - force_sig(SIGKILL, current); + force_sig(SIGKILL); #ifdef TIF_FSCHECK clear_thread_flag(TIF_FSCHECK); @@ -850,6 +851,9 @@ asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, int __user *, unsigned long); #endif #endif + +asmlinkage long sys_clone3(struct clone_args __user *uargs, size_t size); + asmlinkage long sys_execve(const char __user *filename, const char __user *const __user *argv, const char __user *const __user *envp); @@ -927,6 +931,7 @@ asmlinkage long sys_clock_adjtime32(clockid_t which_clock, struct old_timex32 __user *tx); asmlinkage long sys_syncfs(int fd); asmlinkage long sys_setns(int fd, int nstype); +asmlinkage long sys_pidfd_open(pid_t pid, unsigned int flags); asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg, unsigned int vlen, unsigned flags); asmlinkage long sys_process_vm_readv(pid_t pid, @@ -1226,8 +1231,8 @@ asmlinkage long sys_ni_syscall(void); * the ksys_xyzyyz() functions prototyped below. */ -int ksys_mount(char __user *dev_name, char __user *dir_name, char __user *type, - unsigned long flags, void __user *data); +int ksys_mount(const char __user *dev_name, const char __user *dir_name, + const char __user *type, unsigned long flags, void __user *data); int ksys_umount(char __user *name, int flags); int ksys_dup(unsigned int fildes); int ksys_chroot(const char __user *filename); diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index aadd310769d0..6df477329b76 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -37,6 +37,13 @@ struct ctl_table_root; struct ctl_table_header; struct ctl_dir; +/* Keep the same order as in fs/proc/proc_sysctl.c */ +#define SYSCTL_ZERO ((void *)&sysctl_vals[0]) +#define SYSCTL_ONE ((void *)&sysctl_vals[1]) +#define SYSCTL_INT_MAX ((void *)&sysctl_vals[2]) + +extern const int sysctl_vals[]; + typedef int proc_handler (struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos); diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index 786816cf4aa5..965236795750 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -268,6 +268,8 @@ int __must_check sysfs_create_group(struct kobject *kobj, const struct attribute_group *grp); int __must_check sysfs_create_groups(struct kobject *kobj, const struct attribute_group **groups); +int __must_check sysfs_update_groups(struct kobject *kobj, + const struct attribute_group **groups); int sysfs_update_group(struct kobject *kobj, const struct attribute_group *grp); void sysfs_remove_group(struct kobject *kobj, @@ -433,6 +435,12 @@ static inline int sysfs_create_groups(struct kobject *kobj, return 0; } +static inline int sysfs_update_groups(struct kobject *kobj, + const struct attribute_group **groups) +{ + return 0; +} + static inline int sysfs_update_group(struct kobject *kobj, const struct attribute_group *grp) { diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 9a478a0cd3a2..f3a85a7fb4b1 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -58,12 +58,7 @@ static inline unsigned int tcp_optlen(const struct sk_buff *skb) /* TCP Fast Open Cookie as stored in memory */ struct tcp_fastopen_cookie { - union { - u8 val[TCP_FASTOPEN_COOKIE_MAX]; -#if IS_ENABLED(CONFIG_IPV6) - struct in6_addr addr; -#endif - }; + __le64 val[DIV_ROUND_UP(TCP_FASTOPEN_COOKIE_MAX, sizeof(u64))]; s8 len; bool exp; /* In RFC6994 experimental option format */ }; @@ -245,6 +240,7 @@ struct tcp_sock { syn_smc:1; /* SYN includes SMC */ u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */ + u32 tcp_tx_delay; /* delay (in usec) added to TX packets */ u64 tcp_wstamp_ns; /* departure time for next sent data packet */ u64 tcp_clock_cache; /* cache last tcp_clock_ns() (see tcp_mstamp_refresh()) */ @@ -436,6 +432,7 @@ struct tcp_timewait_sock { u32 tw_last_oow_ack_time; int tw_ts_recent_stamp; + u32 tw_tx_delay; #ifdef CONFIG_TCP_MD5SIG struct tcp_md5sig_key *tw_md5_key; #endif diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 15a4ca5d7099..681047f8cc05 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -251,7 +251,7 @@ struct thermal_bind_params { * platform characterization. This value is relative to the * rest of the weights so a cooling device whose weight is * double that of another cooling device is twice as - * effective. See Documentation/thermal/sysfs-api.txt for more + * effective. See Documentation/thermal/sysfs-api.rst for more * information. */ int weight; @@ -259,7 +259,7 @@ struct thermal_bind_params { /* * This is a bit mask that gives the binding relation between this * thermal zone and cdev, for a particular trip point. - * See Documentation/thermal/sysfs-api.txt for more information. + * See Documentation/thermal/sysfs-api.rst for more information. */ int trip_mask; diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index a8ab0f143ac4..b27e2ffa96c1 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h @@ -113,6 +113,34 @@ static inline ktime_t ktime_get_coarse_clocktai(void) return ktime_get_coarse_with_offset(TK_OFFS_TAI); } +static inline ktime_t ktime_get_coarse(void) +{ + struct timespec64 ts; + + ktime_get_coarse_ts64(&ts); + return timespec64_to_ktime(ts); +} + +static inline u64 ktime_get_coarse_ns(void) +{ + return ktime_to_ns(ktime_get_coarse()); +} + +static inline u64 ktime_get_coarse_real_ns(void) +{ + return ktime_to_ns(ktime_get_coarse_real()); +} + +static inline u64 ktime_get_coarse_boottime_ns(void) +{ + return ktime_to_ns(ktime_get_coarse_boottime()); +} + +static inline u64 ktime_get_coarse_clocktai_ns(void) +{ + return ktime_to_ns(ktime_get_coarse_clocktai()); +} + /** * ktime_mono_to_real - Convert monotonic time to clock realtime */ @@ -131,12 +159,12 @@ static inline u64 ktime_get_real_ns(void) return ktime_to_ns(ktime_get_real()); } -static inline u64 ktime_get_boot_ns(void) +static inline u64 ktime_get_boottime_ns(void) { return ktime_to_ns(ktime_get_boottime()); } -static inline u64 ktime_get_tai_ns(void) +static inline u64 ktime_get_clocktai_ns(void) { return ktime_to_ns(ktime_get_clocktai()); } diff --git a/include/linux/timer.h b/include/linux/timer.h index 7b066fd38248..282e4f2a532a 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -36,19 +36,30 @@ struct timer_list { #define __TIMER_LOCKDEP_MAP_INITIALIZER(_kn) #endif -/* - * A deferrable timer will work normally when the system is busy, but - * will not cause a CPU to come out of idle just to service it; instead, - * the timer will be serviced when the CPU eventually wakes up with a - * subsequent non-deferrable timer. +/** + * @TIMER_DEFERRABLE: A deferrable timer will work normally when the + * system is busy, but will not cause a CPU to come out of idle just + * to service it; instead, the timer will be serviced when the CPU + * eventually wakes up with a subsequent non-deferrable timer. * - * An irqsafe timer is executed with IRQ disabled and it's safe to wait for - * the completion of the running instance from IRQ handlers, for example, - * by calling del_timer_sync(). + * @TIMER_IRQSAFE: An irqsafe timer is executed with IRQ disabled and + * it's safe to wait for the completion of the running instance from + * IRQ handlers, for example, by calling del_timer_sync(). * * Note: The irq disabled callback execution is a special case for * workqueue locking issues. It's not meant for executing random crap * with interrupts disabled. Abuse is monitored! + * + * @TIMER_PINNED: A pinned timer will not be affected by any timer + * placement heuristics (like, NOHZ) and will always expire on the CPU + * on which the timer was enqueued. + * + * Note: Because enqueuing of timers can migrate the timer from one + * CPU to another, pinned timers are not guaranteed to stay on the + * initialy selected CPU. They move to the CPU on which the enqueue + * function is invoked via mod_timer() or add_timer(). If the timer + * should be placed on a particular CPU, then add_timer_on() has to be + * used. */ #define TIMER_CPUMASK 0x0003FFFF #define TIMER_MIGRATING 0x00040000 diff --git a/include/linux/topology.h b/include/linux/topology.h index cb0775e1ee4b..47a3e3c08036 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h @@ -184,6 +184,9 @@ static inline int cpu_to_mem(int cpu) #ifndef topology_physical_package_id #define topology_physical_package_id(cpu) ((void)(cpu), -1) #endif +#ifndef topology_die_id +#define topology_die_id(cpu) ((void)(cpu), -1) +#endif #ifndef topology_core_id #define topology_core_id(cpu) ((void)(cpu), 0) #endif @@ -193,6 +196,9 @@ static inline int cpu_to_mem(int cpu) #ifndef topology_core_cpumask #define topology_core_cpumask(cpu) cpumask_of(cpu) #endif +#ifndef topology_die_cpumask +#define topology_die_cpumask(cpu) cpumask_of(cpu) +#endif #ifdef CONFIG_SCHED_SMT static inline const struct cpumask *cpu_smt_mask(int cpu) diff --git a/include/linux/torture.h b/include/linux/torture.h index 23d80db426d7..a620118385bb 100644 --- a/include/linux/torture.h +++ b/include/linux/torture.h @@ -66,7 +66,7 @@ int torture_shutdown_init(int ssecs, void (*cleanup)(void)); /* Task stuttering, which forces load/no-load transitions. */ bool stutter_wait(const char *title); -int torture_stutter_init(int s); +int torture_stutter_init(int s, int sgap); /* Initialization and cleanup. */ bool torture_init_begin(char *ttype, int v); diff --git a/include/linux/tpm_eventlog.h b/include/linux/tpm_eventlog.h index 81519f163211..63238c84dc0b 100644 --- a/include/linux/tpm_eventlog.h +++ b/include/linux/tpm_eventlog.h @@ -112,4 +112,156 @@ struct tcg_pcr_event2_head { struct tpm_digest digests[]; } __packed; +struct tcg_algorithm_size { + u16 algorithm_id; + u16 algorithm_size; +}; + +struct tcg_algorithm_info { + u8 signature[16]; + u32 platform_class; + u8 spec_version_minor; + u8 spec_version_major; + u8 spec_errata; + u8 uintn_size; + u32 number_of_algorithms; + struct tcg_algorithm_size digest_sizes[]; +}; + +#ifndef TPM_MEMREMAP +#define TPM_MEMREMAP(start, size) NULL +#endif + +#ifndef TPM_MEMUNMAP +#define TPM_MEMUNMAP(start, size) do{} while(0) +#endif + +/** + * __calc_tpm2_event_size - calculate the size of a TPM2 event log entry + * @event: Pointer to the event whose size should be calculated + * @event_header: Pointer to the initial event containing the digest lengths + * @do_mapping: Whether or not the event needs to be mapped + * + * The TPM2 event log format can contain multiple digests corresponding to + * separate PCR banks, and also contains a variable length of the data that + * was measured. This requires knowledge of how long each digest type is, + * and this information is contained within the first event in the log. + * + * We calculate the length by examining the number of events, and then looking + * at each event in turn to determine how much space is used for events in + * total. Once we've done this we know the offset of the data length field, + * and can calculate the total size of the event. + * + * Return: size of the event on success, <0 on failure + */ + +static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event, + struct tcg_pcr_event *event_header, + bool do_mapping) +{ + struct tcg_efi_specid_event_head *efispecid; + struct tcg_event_field *event_field; + void *mapping = NULL; + int mapping_size; + void *marker; + void *marker_start; + u32 halg_size; + size_t size; + u16 halg; + int i; + int j; + + marker = event; + marker_start = marker; + marker = marker + sizeof(event->pcr_idx) + sizeof(event->event_type) + + sizeof(event->count); + + /* Map the event header */ + if (do_mapping) { + mapping_size = marker - marker_start; + mapping = TPM_MEMREMAP((unsigned long)marker_start, + mapping_size); + if (!mapping) { + size = 0; + goto out; + } + } else { + mapping = marker_start; + } + + event = (struct tcg_pcr_event2_head *)mapping; + + efispecid = (struct tcg_efi_specid_event_head *)event_header->event; + + /* Check if event is malformed. */ + if (event->count > efispecid->num_algs) { + size = 0; + goto out; + } + + for (i = 0; i < event->count; i++) { + halg_size = sizeof(event->digests[i].alg_id); + + /* Map the digest's algorithm identifier */ + if (do_mapping) { + TPM_MEMUNMAP(mapping, mapping_size); + mapping_size = halg_size; + mapping = TPM_MEMREMAP((unsigned long)marker, + mapping_size); + if (!mapping) { + size = 0; + goto out; + } + } else { + mapping = marker; + } + + memcpy(&halg, mapping, halg_size); + marker = marker + halg_size; + + for (j = 0; j < efispecid->num_algs; j++) { + if (halg == efispecid->digest_sizes[j].alg_id) { + marker += + efispecid->digest_sizes[j].digest_size; + break; + } + } + /* Algorithm without known length. Such event is unparseable. */ + if (j == efispecid->num_algs) { + size = 0; + goto out; + } + } + + /* + * Map the event size - we don't read from the event itself, so + * we don't need to map it + */ + if (do_mapping) { + TPM_MEMUNMAP(mapping, mapping_size); + mapping_size += sizeof(event_field->event_size); + mapping = TPM_MEMREMAP((unsigned long)marker, + mapping_size); + if (!mapping) { + size = 0; + goto out; + } + } else { + mapping = marker; + } + + event_field = (struct tcg_event_field *)mapping; + + marker = marker + sizeof(event_field->event_size) + + event_field->event_size; + size = marker - marker_start; + + if ((event->event_type == 0) && (event_field->event_size == 0)) + size = 0; +out: + if (do_mapping) + TPM_MEMUNMAP(mapping, mapping_size); + return size; +} + #endif diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 8a62731673f7..5150436783e8 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -142,6 +142,7 @@ enum print_line_t { enum print_line_t trace_handle_return(struct trace_seq *s); void tracing_generic_entry_update(struct trace_entry *entry, + unsigned short type, unsigned long flags, int pc); struct trace_event_file; @@ -317,6 +318,14 @@ trace_event_name(struct trace_event_call *call) return call->name; } +static inline struct list_head * +trace_get_fields(struct trace_event_call *event_call) +{ + if (!event_call->class->get_fields) + return &event_call->class->fields; + return event_call->class->get_fields(event_call); +} + struct trace_array; struct trace_subsystem_dir; diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index 09d678433fc0..36fb3bbed6b2 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h @@ -54,13 +54,15 @@ struct linux_binprm; /* * ptrace report for syscall entry and exit looks identical. */ -static inline int ptrace_report_syscall(struct pt_regs *regs) +static inline int ptrace_report_syscall(struct pt_regs *regs, + unsigned long message) { int ptrace = current->ptrace; if (!(ptrace & PT_PTRACED)) return 0; + current->ptrace_message = message; ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0)); /* @@ -73,6 +75,7 @@ static inline int ptrace_report_syscall(struct pt_regs *regs) current->exit_code = 0; } + current->ptrace_message = 0; return fatal_signal_pending(current); } @@ -98,7 +101,7 @@ static inline int ptrace_report_syscall(struct pt_regs *regs) static inline __must_check int tracehook_report_syscall_entry( struct pt_regs *regs) { - return ptrace_report_syscall(regs); + return ptrace_report_syscall(regs, PTRACE_EVENTMSG_SYSCALL_ENTRY); } /** @@ -123,7 +126,7 @@ static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step) if (step) user_single_step_report(regs); else - ptrace_report_syscall(regs); + ptrace_report_syscall(regs, PTRACE_EVENTMSG_SYSCALL_EXIT); } /** @@ -184,6 +187,13 @@ static inline void tracehook_notify_resume(struct pt_regs *regs) if (unlikely(current->task_works)) task_work_run(); +#ifdef CONFIG_KEYS_REQUEST_CACHE + if (unlikely(current->cached_requested_key)) { + key_put(current->cached_requested_key); + current->cached_requested_key = NULL; + } +#endif + mem_cgroup_handle_over_high(); blkcg_maybe_throttle_current(); } diff --git a/include/linux/types.h b/include/linux/types.h index 231114ae38f4..05030f608be3 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -174,7 +174,7 @@ typedef struct { #ifdef CONFIG_64BIT typedef struct { - long counter; + s64 counter; } atomic64_t; #endif diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 2b70130af585..34a038563d97 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -203,7 +203,10 @@ static inline void pagefault_enable(void) /* * Is the pagefault handler disabled? If so, user access methods will not sleep. */ -#define pagefault_disabled() (current->pagefault_disabled != 0) +static inline bool pagefault_disabled(void) +{ + return current->pagefault_disabled != 0; +} /* * The pagefault handler is in general disabled by pagefault_disable() or @@ -240,6 +243,18 @@ extern long probe_kernel_read(void *dst, const void *src, size_t size); extern long __probe_kernel_read(void *dst, const void *src, size_t size); /* + * probe_user_read(): safely attempt to read from a location in user space + * @dst: pointer to the buffer that shall take the data + * @src: address to read from + * @size: size of the data chunk + * + * Safely read from address @src to the buffer at @dst. If a kernel fault + * happens, handle that and return -EFAULT. + */ +extern long probe_user_read(void *dst, const void __user *src, size_t size); +extern long __probe_user_read(void *dst, const void __user *src, size_t size); + +/* * probe_kernel_write(): safely attempt to write to a location * @dst: address to write to * @src: pointer to the data that shall be written @@ -252,6 +267,9 @@ extern long notrace probe_kernel_write(void *dst, const void *src, size_t size); extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size); extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); +extern long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr, + long count); +extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count); /** * probe_kernel_address(): safely attempt to read from a location diff --git a/include/linux/uio.h b/include/linux/uio.h index 2c90a0842ee8..ab5f523bc0df 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -19,9 +19,6 @@ struct kvec { }; enum iter_type { - /* set if ITER_BVEC doesn't hold a bv_page ref */ - ITER_BVEC_FLAG_NO_REF = 2, - /* iter types */ ITER_IOVEC = 4, ITER_KVEC = 8, @@ -56,7 +53,7 @@ struct iov_iter { static inline enum iter_type iov_iter_type(const struct iov_iter *i) { - return i->type & ~(READ | WRITE | ITER_BVEC_FLAG_NO_REF); + return i->type & ~(READ | WRITE); } static inline bool iter_is_iovec(const struct iov_iter *i) @@ -89,11 +86,6 @@ static inline unsigned char iov_iter_rw(const struct iov_iter *i) return i->type & (READ | WRITE); } -static inline bool iov_iter_bvec_no_ref(const struct iov_iter *i) -{ - return (i->type & ITER_BVEC_FLAG_NO_REF) != 0; -} - /* * Total number of bytes covered by an iovec. * @@ -275,13 +267,13 @@ bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, struct iov_iter *i); -int import_iovec(int type, const struct iovec __user * uvector, +ssize_t import_iovec(int type, const struct iovec __user * uvector, unsigned nr_segs, unsigned fast_segs, struct iovec **iov, struct iov_iter *i); #ifdef CONFIG_COMPAT struct compat_iovec; -int compat_import_iovec(int type, const struct compat_iovec __user * uvector, +ssize_t compat_import_iovec(int type, const struct compat_iovec __user * uvector, unsigned nr_segs, unsigned fast_segs, struct iovec **iov, struct iov_iter *i); #endif diff --git a/include/linux/unicode.h b/include/linux/unicode.h index aec2c6d800aa..990aa97d8049 100644 --- a/include/linux/unicode.h +++ b/include/linux/unicode.h @@ -17,6 +17,9 @@ int utf8_strncmp(const struct unicode_map *um, int utf8_strncasecmp(const struct unicode_map *um, const struct qstr *s1, const struct qstr *s2); +int utf8_strncasecmp_folded(const struct unicode_map *um, + const struct qstr *cf, + const struct qstr *s1); int utf8_normalize(const struct unicode_map *um, const struct qstr *str, unsigned char *dest, size_t dlen); diff --git a/include/linux/usb.h b/include/linux/usb.h index ae82d9d1112b..83d35d993e8c 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -578,6 +578,7 @@ struct usb3_lpm_parameters { * @bus_mA: Current available from the bus * @portnum: parent port number (origin 1) * @level: number of USB hub ancestors + * @devaddr: device address, XHCI: assigned by HW, others: same as devnum * @can_submit: URBs may be submitted * @persist_enabled: USB_PERSIST enabled for this device * @have_langid: whether string_langid is valid @@ -661,6 +662,7 @@ struct usb_device { unsigned short bus_mA; u8 portnum; u8 level; + u8 devaddr; unsigned can_submit:1; unsigned persist_enabled:1; diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h index 911e05af671e..edd89b7c8f18 100644 --- a/include/linux/usb/chipidea.h +++ b/include/linux/usb/chipidea.h @@ -61,6 +61,7 @@ struct ci_hdrc_platform_data { #define CI_HDRC_OVERRIDE_PHY_CONTROL BIT(12) /* Glue layer manages phy */ #define CI_HDRC_REQUIRES_ALIGNED_DMA BIT(13) #define CI_HDRC_IMX_IS_HSIC BIT(14) +#define CI_HDRC_PMQOS BIT(15) enum usb_dr_mode dr_mode; #define CI_HDRC_CONTROLLER_RESET_EVENT 0 #define CI_HDRC_CONTROLLER_STOPPED_EVENT 1 diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index 7595056b96c1..fb19141151d8 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -310,7 +310,8 @@ struct usb_gadget_ops { int (*pullup) (struct usb_gadget *, int is_on); int (*ioctl)(struct usb_gadget *, unsigned code, unsigned long param); - void (*get_config_params)(struct usb_dcd_config_params *); + void (*get_config_params)(struct usb_gadget *, + struct usb_dcd_config_params *); int (*udc_start)(struct usb_gadget *, struct usb_gadget_driver *); int (*udc_stop)(struct usb_gadget *); diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index bb57b5af4700..bab27ccc8ff5 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -216,6 +216,9 @@ struct usb_hcd { #define HC_IS_RUNNING(state) ((state) & __ACTIVE) #define HC_IS_SUSPENDED(state) ((state) & __SUSPEND) + /* memory pool for HCs having local memory, or %NULL */ + struct gen_pool *localmem_pool; + /* more shared queuing code would be good; it should support * smarter scheduling, handle transaction translators, etc; * input size of periodic table to an interrupt scheduler. @@ -253,7 +256,6 @@ struct hc_driver { int flags; #define HCD_MEMORY 0x0001 /* HC regs use memory (else I/O) */ -#define HCD_LOCAL_MEM 0x0002 /* HC needs local memory */ #define HCD_SHARED 0x0004 /* Two (or more) usb_hcds share HW */ #define HCD_USB11 0x0010 /* USB 1.1 */ #define HCD_USB2 0x0020 /* USB 2.0 */ @@ -461,6 +463,8 @@ extern int usb_add_hcd(struct usb_hcd *hcd, unsigned int irqnum, unsigned long irqflags); extern void usb_remove_hcd(struct usb_hcd *hcd); extern int usb_hcd_find_raw_port_number(struct usb_hcd *hcd, int port1); +int usb_hcd_setup_local_mem(struct usb_hcd *hcd, phys_addr_t phys_addr, + dma_addr_t dma, size_t size); struct platform_device; extern void usb_hcd_platform_shutdown(struct platform_device *dev); diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h index 53924f8e840c..6914475bbc86 100644 --- a/include/linux/usb/renesas_usbhs.h +++ b/include/linux/usb/renesas_usbhs.h @@ -3,6 +3,7 @@ * Renesas USB * * Copyright (C) 2011 Renesas Solutions Corp. + * Copyright (C) 2019 Renesas Electronics Corporation * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> * * This program is distributed in the hope that it will be useful, @@ -33,17 +34,6 @@ enum { }; /* - * callback functions table for driver - * - * These functions are called from platform for driver. - * Callback function's pointer will be set before - * renesas_usbhs_platform_callback :: hardware_init was called - */ -struct renesas_usbhs_driver_callback { - int (*notify_hotplug)(struct platform_device *pdev); -}; - -/* * callback functions for platform * * These functions are called from driver for platform @@ -180,23 +170,20 @@ struct renesas_usbhs_driver_param { */ int pio_dma_border; /* default is 64byte */ - uintptr_t type; u32 enable_gpio; /* * option: */ - u32 has_otg:1; /* for controlling PWEN/EXTLP */ - u32 has_sudmac:1; /* for SUDMAC */ u32 has_usb_dmac:1; /* for USB-DMAC */ + u32 runtime_pwctrl:1; + u32 has_cnen:1; + u32 cfifo_byte_addr:1; /* CFIFO is byte addressable */ #define USBHS_USB_DMAC_XFER_SIZE 32 /* hardcode the xfer size */ + u32 multi_clks:1; + u32 has_new_pipe_configs:1; }; -#define USBHS_TYPE_RCAR_GEN2 1 -#define USBHS_TYPE_RCAR_GEN3 2 -#define USBHS_TYPE_RCAR_GEN3_WITH_PLL 3 -#define USBHS_TYPE_RZA1 4 - /* * option: * @@ -212,12 +199,6 @@ struct renesas_usbhs_platform_info { struct renesas_usbhs_platform_callback platform_callback; /* - * driver set these callback functions pointer. - * platform can use it on callback functions - */ - struct renesas_usbhs_driver_callback driver_callback; - - /* * option: * * driver use these param for some register @@ -230,12 +211,4 @@ struct renesas_usbhs_platform_info { */ #define renesas_usbhs_get_info(pdev)\ ((struct renesas_usbhs_platform_info *)(pdev)->dev.platform_data) - -#define renesas_usbhs_call_notify_hotplug(pdev) \ - ({ \ - struct renesas_usbhs_driver_callback *dc; \ - dc = &(renesas_usbhs_get_info(pdev)->driver_callback); \ - if (dc && dc->notify_hotplug) \ - dc->notify_hotplug(pdev); \ - }) #endif /* RENESAS_USB_H */ diff --git a/include/linux/usb/typec_mux.h b/include/linux/usb/typec_mux.h index 43f40685e53c..873ace5b0cf8 100644 --- a/include/linux/usb/typec_mux.h +++ b/include/linux/usb/typec_mux.h @@ -3,54 +3,48 @@ #ifndef __USB_TYPEC_MUX #define __USB_TYPEC_MUX -#include <linux/list.h> #include <linux/usb/typec.h> struct device; +struct typec_mux; +struct typec_switch; +struct fwnode_handle; -/** - * struct typec_switch - USB Type-C cable orientation switch - * @dev: Switch device - * @entry: List entry - * @set: Callback to the driver for setting the orientation - * - * USB Type-C pin flipper switch routing the correct data pairs from the - * connector to the USB controller depending on the orientation of the cable - * plug. - */ -struct typec_switch { - struct device *dev; - struct list_head entry; - - int (*set)(struct typec_switch *sw, enum typec_orientation orientation); -}; +typedef int (*typec_switch_set_fn_t)(struct typec_switch *sw, + enum typec_orientation orientation); -/** - * struct typec_switch - USB Type-C connector pin mux - * @dev: Mux device - * @entry: List entry - * @set: Callback to the driver for setting the state of the mux - * - * Pin Multiplexer/DeMultiplexer switch routing the USB Type-C connector pins to - * different components depending on the requested mode of operation. Used with - * Accessory/Alternate modes. - */ -struct typec_mux { - struct device *dev; - struct list_head entry; - - int (*set)(struct typec_mux *mux, int state); +struct typec_switch_desc { + struct fwnode_handle *fwnode; + typec_switch_set_fn_t set; + void *drvdata; }; struct typec_switch *typec_switch_get(struct device *dev); void typec_switch_put(struct typec_switch *sw); -int typec_switch_register(struct typec_switch *sw); +struct typec_switch * +typec_switch_register(struct device *parent, + const struct typec_switch_desc *desc); void typec_switch_unregister(struct typec_switch *sw); +void typec_switch_set_drvdata(struct typec_switch *sw, void *data); +void *typec_switch_get_drvdata(struct typec_switch *sw); + +typedef int (*typec_mux_set_fn_t)(struct typec_mux *mux, int state); + +struct typec_mux_desc { + struct fwnode_handle *fwnode; + typec_mux_set_fn_t set; + void *drvdata; +}; + struct typec_mux * typec_mux_get(struct device *dev, const struct typec_altmode_desc *desc); void typec_mux_put(struct typec_mux *mux); -int typec_mux_register(struct typec_mux *mux); +struct typec_mux * +typec_mux_register(struct device *parent, const struct typec_mux_desc *desc); void typec_mux_unregister(struct typec_mux *mux); +void typec_mux_set_drvdata(struct typec_mux *mux, void *data); +void *typec_mux_get_drvdata(struct typec_mux *mux); + #endif /* __USB_TYPEC_MUX */ diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h index d6b74b91096b..fb9f4f799554 100644 --- a/include/linux/user_namespace.h +++ b/include/linux/user_namespace.h @@ -64,10 +64,20 @@ struct user_namespace { struct ns_common ns; unsigned long flags; +#ifdef CONFIG_KEYS + /* List of joinable keyrings in this namespace. Modification access of + * these pointers is controlled by keyring_sem. Once + * user_keyring_register is set, it won't be changed, so it can be + * accessed directly with READ_ONCE(). + */ + struct list_head keyring_name_list; + struct key *user_keyring_register; + struct rw_semaphore keyring_sem; +#endif + /* Register of per-UID persistent keyrings for this namespace */ #ifdef CONFIG_PERSISTENT_KEYRINGS struct key *persistent_keyring_register; - struct rw_semaphore persistent_keyring_register_sem; #endif struct work_struct work; #ifdef CONFIG_SYSCTL diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 51e131245379..9b21d0047710 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -72,10 +72,12 @@ extern void vm_unmap_aliases(void); #ifdef CONFIG_MMU extern void __init vmalloc_init(void); +extern unsigned long vmalloc_nr_pages(void); #else static inline void vmalloc_init(void) { } +static inline unsigned long vmalloc_nr_pages(void) { return 0; } #endif extern void *vmalloc(unsigned long size); diff --git a/include/linux/vmpressure.h b/include/linux/vmpressure.h index 61e6fddfb26f..6d28bc433c1c 100644 --- a/include/linux/vmpressure.h +++ b/include/linux/vmpressure.h @@ -17,7 +17,7 @@ struct vmpressure { unsigned long tree_scanned; unsigned long tree_reclaimed; /* The lock is used to keep the scanned/reclaimed above in sync. */ - struct spinlock sr_lock; + spinlock_t sr_lock; /* The list of vmpressure_event structs. */ struct list_head events; diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h index 77ac9c7b9483..fefb5292403b 100644 --- a/include/linux/vmw_vmci_defs.h +++ b/include/linux/vmw_vmci_defs.h @@ -62,9 +62,18 @@ enum { /* * A single VMCI device has an upper limit of 128MB on the amount of - * memory that can be used for queue pairs. + * memory that can be used for queue pairs. Since each queue pair + * consists of at least two pages, the memory limit also dictates the + * number of queue pairs a guest can create. */ #define VMCI_MAX_GUEST_QP_MEMORY (128 * 1024 * 1024) +#define VMCI_MAX_GUEST_QP_COUNT (VMCI_MAX_GUEST_QP_MEMORY / PAGE_SIZE / 2) + +/* + * There can be at most PAGE_SIZE doorbells since there is one doorbell + * per byte in the doorbell bitmap page. + */ +#define VMCI_MAX_GUEST_DOORBELL_COUNT PAGE_SIZE /* * Queues with pre-mapped data pages must be small, so that we don't pin @@ -430,8 +439,8 @@ enum { struct vmci_queue_header { /* All fields are 64bit and aligned. */ struct vmci_handle handle; /* Identifier. */ - atomic64_t producer_tail; /* Offset in this queue. */ - atomic64_t consumer_head; /* Offset in peer queue. */ + u64 producer_tail; /* Offset in this queue. */ + u64 consumer_head; /* Offset in peer queue. */ }; /* @@ -732,13 +741,9 @@ static inline void *vmci_event_data_payload(struct vmci_event_data *ev_data) * prefix will be used, so correctness isn't an issue, but using a * 64bit operation still adds unnecessary overhead. */ -static inline u64 vmci_q_read_pointer(atomic64_t *var) +static inline u64 vmci_q_read_pointer(u64 *var) { -#if defined(CONFIG_X86_32) - return atomic_read((atomic_t *)var); -#else - return atomic64_read(var); -#endif + return READ_ONCE(*(unsigned long *)var); } /* @@ -747,23 +752,17 @@ static inline u64 vmci_q_read_pointer(atomic64_t *var) * never exceeds a 32bit value in this case. On 32bit SMP, using a * locked cmpxchg8b adds unnecessary overhead. */ -static inline void vmci_q_set_pointer(atomic64_t *var, - u64 new_val) +static inline void vmci_q_set_pointer(u64 *var, u64 new_val) { -#if defined(CONFIG_X86_32) - return atomic_set((atomic_t *)var, (u32)new_val); -#else - return atomic64_set(var, new_val); -#endif + /* XXX buggered on big-endian */ + WRITE_ONCE(*(unsigned long *)var, (unsigned long)new_val); } /* * Helper to add a given offset to a head or tail pointer. Wraps the * value of the pointer around the max size of the queue. */ -static inline void vmci_qp_add_pointer(atomic64_t *var, - size_t add, - u64 size) +static inline void vmci_qp_add_pointer(u64 *var, size_t add, u64 size) { u64 new_val = vmci_q_read_pointer(var); @@ -840,8 +839,8 @@ static inline void vmci_q_header_init(struct vmci_queue_header *q_header, const struct vmci_handle handle) { q_header->handle = handle; - atomic64_set(&q_header->producer_tail, 0); - atomic64_set(&q_header->consumer_head, 0); + q_header->producer_tail = 0; + q_header->consumer_head = 0; } /* diff --git a/include/linux/wait.h b/include/linux/wait.h index b6f77cf60dd7..30c515520fb2 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -127,6 +127,19 @@ static inline int waitqueue_active(struct wait_queue_head *wq_head) } /** + * wq_has_single_sleeper - check if there is only one sleeper + * @wq_head: wait queue head + * + * Returns true of wq_head has only one sleeper on the list. + * + * Please refer to the comment for waitqueue_active. + */ +static inline bool wq_has_single_sleeper(struct wait_queue_head *wq_head) +{ + return list_is_singular(&wq_head->head); +} + +/** * wq_has_sleeper - check if there are any waiting processes * @wq_head: wait queue head * diff --git a/include/linux/wanrouter.h b/include/linux/wanrouter.h deleted file mode 100644 index f6358558f9f5..000000000000 --- a/include/linux/wanrouter.h +++ /dev/null @@ -1,11 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * wanrouter.h Legacy declarations kept around until X25 is removed - */ - -#ifndef _ROUTER_H -#define _ROUTER_H - -#include <uapi/linux/wanrouter.h> - -#endif /* _ROUTER_H */ diff --git a/include/linux/wmi.h b/include/linux/wmi.h index fcc9d029f67a..8ef7e7faea1e 100644 --- a/include/linux/wmi.h +++ b/include/linux/wmi.h @@ -36,7 +36,7 @@ struct wmi_driver { struct device_driver driver; const struct wmi_device_id *id_table; - int (*probe)(struct wmi_device *wdev); + int (*probe)(struct wmi_device *wdev, const void *context); int (*remove)(struct wmi_device *wdev); void (*notify)(struct wmi_device *device, union acpi_object *data); long (*filter_callback)(struct wmi_device *wdev, unsigned int cmd, diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index d59525fca4d3..b7c585b5ec1c 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -435,10 +435,6 @@ struct workqueue_struct *alloc_workqueue(const char *fmt, extern void destroy_workqueue(struct workqueue_struct *wq); -struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask); -void free_workqueue_attrs(struct workqueue_attrs *attrs); -int apply_workqueue_attrs(struct workqueue_struct *wq, - const struct workqueue_attrs *attrs); int workqueue_set_unbound_cpumask(cpumask_var_t cpumask); extern bool queue_work_on(int cpu, struct workqueue_struct *wq, diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 738a0c24874f..8945aac31392 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -11,6 +11,7 @@ #include <linux/flex_proportions.h> #include <linux/backing-dev-defs.h> #include <linux/blk_types.h> +#include <linux/blk-cgroup.h> struct bio; @@ -68,6 +69,17 @@ struct writeback_control { unsigned for_reclaim:1; /* Invoked from the page allocator */ unsigned range_cyclic:1; /* range_start is cyclic */ unsigned for_sync:1; /* sync(2) WB_SYNC_ALL writeback */ + + /* + * When writeback IOs are bounced through async layers, only the + * initial synchronous phase should be accounted towards inode + * cgroup ownership arbitration to avoid confusion. Later stages + * can set the following flag to disable the accounting. + */ + unsigned no_cgroup_owner:1; + + unsigned punt_to_cgroup:1; /* cgrp punting, see __REQ_CGROUP_PUNT */ + #ifdef CONFIG_CGROUP_WRITEBACK struct bdi_writeback *wb; /* wb this writeback is issued under */ struct inode *inode; /* inode being written out */ @@ -84,12 +96,27 @@ struct writeback_control { static inline int wbc_to_write_flags(struct writeback_control *wbc) { + int flags = 0; + + if (wbc->punt_to_cgroup) + flags = REQ_CGROUP_PUNT; + if (wbc->sync_mode == WB_SYNC_ALL) - return REQ_SYNC; + flags |= REQ_SYNC; else if (wbc->for_kupdate || wbc->for_background) - return REQ_BACKGROUND; + flags |= REQ_BACKGROUND; - return 0; + return flags; +} + +static inline struct cgroup_subsys_state * +wbc_blkcg_css(struct writeback_control *wbc) +{ +#ifdef CONFIG_CGROUP_WRITEBACK + if (wbc->wb) + return wbc->wb->blkcg_css; +#endif + return blkcg_root_css; } /* @@ -188,8 +215,8 @@ void wbc_attach_and_unlock_inode(struct writeback_control *wbc, struct inode *inode) __releases(&inode->i_lock); void wbc_detach_inode(struct writeback_control *wbc); -void wbc_account_io(struct writeback_control *wbc, struct page *page, - size_t bytes); +void wbc_account_cgroup_owner(struct writeback_control *wbc, struct page *page, + size_t bytes); void cgroup_writeback_umount(void); /** @@ -291,8 +318,8 @@ static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio) { } -static inline void wbc_account_io(struct writeback_control *wbc, - struct page *page, size_t bytes) +static inline void wbc_account_cgroup_owner(struct writeback_control *wbc, + struct page *page, size_t bytes) { } |