summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acnames.h1
-rw-r--r--include/acpi/acpi.h4
-rw-r--r--include/acpi/acpi_bus.h61
-rw-r--r--include/acpi/acpi_numa.h1
-rw-r--r--include/acpi/acpixf.h15
-rw-r--r--include/acpi/acrestyp.h7
-rw-r--r--include/acpi/actbl.h1
-rw-r--r--include/acpi/actbl2.h30
-rw-r--r--include/acpi/actypes.h11
-rw-r--r--include/acpi/apei.h8
-rw-r--r--include/acpi/ghes.h5
-rw-r--r--include/acpi/platform/acenv.h9
-rw-r--r--include/acpi/platform/acgcc.h4
-rw-r--r--include/acpi/platform/aclinux.h5
-rw-r--r--include/asm-generic/atomic64.h2
-rw-r--r--include/asm-generic/early_ioremap.h2
-rw-r--r--include/asm-generic/futex.h50
-rw-r--r--include/asm-generic/io.h27
-rw-r--r--include/asm-generic/pgtable.h12
-rw-r--r--include/asm-generic/qspinlock.h14
-rw-r--r--include/asm-generic/sections.h4
-rw-r--r--include/asm-generic/tlb.h7
-rw-r--r--include/asm-generic/topology.h6
-rw-r--r--include/asm-generic/vmlinux.lds.h73
-rw-r--r--include/crypto/algapi.h23
-rw-r--r--include/crypto/if_alg.h170
-rw-r--r--include/crypto/internal/akcipher.h6
-rw-r--r--include/crypto/internal/hash.h2
-rw-r--r--include/crypto/kpp.h10
-rw-r--r--include/drm/bridge/dw_mipi_dsi.h39
-rw-r--r--include/drm/drmP.h161
-rw-r--r--include/drm/drm_atomic.h132
-rw-r--r--include/drm/drm_atomic_helper.h27
-rw-r--r--include/drm/drm_bridge.h3
-rw-r--r--include/drm/drm_connector.h42
-rw-r--r--include/drm/drm_crtc.h17
-rw-r--r--include/drm/drm_device.h190
-rw-r--r--include/drm/drm_dp_mst_helper.h10
-rw-r--r--include/drm/drm_drv.h94
-rw-r--r--include/drm/drm_edid.h11
-rw-r--r--include/drm/drm_fb_cma_helper.h4
-rw-r--r--include/drm/drm_fb_helper.h74
-rw-r--r--include/drm/drm_framebuffer.h7
-rw-r--r--include/drm/drm_gem.h17
-rw-r--r--include/drm/drm_gem_cma_helper.h5
-rw-r--r--include/drm/drm_gem_framebuffer_helper.h37
-rw-r--r--include/drm/drm_mode_config.h6
-rw-r--r--include/drm/drm_modes.h11
-rw-r--r--include/drm/drm_modeset_helper_vtables.h125
-rw-r--r--include/drm/drm_pci.h11
-rw-r--r--include/drm/drm_plane.h28
-rw-r--r--include/drm/drm_property.h2
-rw-r--r--include/drm/drm_scdc_helper.h25
-rw-r--r--include/drm/drm_simple_kms_helper.h1
-rw-r--r--include/drm/drm_syncobj.h57
-rw-r--r--include/drm/drm_vblank.h3
-rw-r--r--include/drm/tinydrm/mipi-dbi.h6
-rw-r--r--include/drm/tinydrm/tinydrm-helpers.h2
-rw-r--r--include/drm/tinydrm/tinydrm.h4
-rw-r--r--include/drm/ttm/ttm_bo_driver.h22
-rw-r--r--include/dt-bindings/pinctrl/qcom,pmic-gpio.h2
-rw-r--r--include/dt-bindings/pinctrl/samsung.h3
-rw-r--r--include/keys/rxrpc-type.h23
-rw-r--r--include/kvm/arm_pmu.h2
-rw-r--r--include/linux/acpi.h100
-rw-r--r--include/linux/acpi_iort.h5
-rw-r--r--include/linux/arch_topology.h4
-rw-r--r--include/linux/ata.h10
-rw-r--r--include/linux/atomic.h3
-rw-r--r--include/linux/avf/virtchnl.h9
-rw-r--r--include/linux/binfmts.h4
-rw-r--r--include/linux/bio.h8
-rw-r--r--include/linux/bitrev.h19
-rw-r--r--include/linux/blk-mq-rdma.h10
-rw-r--r--include/linux/blkdev.h3
-rw-r--r--include/linux/bpf-cgroup.h2
-rw-r--r--include/linux/bpf.h93
-rw-r--r--include/linux/bpf_types.h7
-rw-r--r--include/linux/bpf_verifier.h77
-rw-r--r--include/linux/bsg-lib.h2
-rw-r--r--include/linux/ccp.h11
-rw-r--r--include/linux/cdev.h2
-rw-r--r--include/linux/ceph/ceph_features.h8
-rw-r--r--include/linux/ceph/osd_client.h1
-rw-r--r--include/linux/ceph/osdmap.h2
-rw-r--r--include/linux/ceph/rados.h4
-rw-r--r--include/linux/compat.h18
-rw-r--r--include/linux/compiler-gcc.h26
-rw-r--r--include/linux/compiler.h43
-rw-r--r--include/linux/completion.h47
-rw-r--r--include/linux/coresight-pmu.h6
-rw-r--r--include/linux/cpufreq.h38
-rw-r--r--include/linux/cpuhotplug.h2
-rw-r--r--include/linux/cpuidle.h21
-rw-r--r--include/linux/cpuset.h25
-rw-r--r--include/linux/cred.h4
-rw-r--r--include/linux/crush/crush.h2
-rw-r--r--include/linux/dax.h46
-rw-r--r--include/linux/dcache.h2
-rw-r--r--include/linux/debugfs.h16
-rw-r--r--include/linux/devfreq.h13
-rw-r--r--include/linux/device-mapper.h41
-rw-r--r--include/linux/device.h37
-rw-r--r--include/linux/devpts_fs.h10
-rw-r--r--include/linux/dma-fence.h21
-rw-r--r--include/linux/dma-mapping.h53
-rw-r--r--include/linux/edac.h1
-rw-r--r--include/linux/eeprom_93xx46.h3
-rw-r--r--include/linux/efi.h31
-rw-r--r--include/linux/errseq.h14
-rw-r--r--include/linux/ethtool.h15
-rw-r--r--include/linux/extcon.h130
-rw-r--r--include/linux/filter.h17
-rw-r--r--include/linux/fmc.h39
-rw-r--r--include/linux/fpga/fpga-mgr.h4
-rw-r--r--include/linux/fs.h67
-rw-r--r--include/linux/fs_struct.h2
-rw-r--r--include/linux/fscache.h9
-rw-r--r--include/linux/ftrace.h6
-rw-r--r--include/linux/futex.h7
-rw-r--r--include/linux/fwnode.h56
-rw-r--r--include/linux/genalloc.h5
-rw-r--r--include/linux/gpio/driver.h24
-rw-r--r--include/linux/gpio/machine.h3
-rw-r--r--include/linux/hid.h24
-rw-r--r--include/linux/hyperv.h88
-rw-r--r--include/linux/i2c.h3
-rw-r--r--include/linux/idr.h69
-rw-r--r--include/linux/igmp.h3
-rw-r--r--include/linux/iio/common/st_sensors.h19
-rw-r--r--include/linux/iio/common/st_sensors_i2c.h10
-rw-r--r--include/linux/iio/iio.h2
-rw-r--r--include/linux/iio/timer/stm32-timer-trigger.h14
-rw-r--r--include/linux/iio/trigger.h4
-rw-r--r--include/linux/inet_diag.h7
-rw-r--r--include/linux/init_task.h8
-rw-r--r--include/linux/interrupt.h14
-rw-r--r--include/linux/io.h2
-rw-r--r--include/linux/iommu.h12
-rw-r--r--include/linux/ipc.h2
-rw-r--r--include/linux/ipc_namespace.h2
-rw-r--r--include/linux/ipv6.h16
-rw-r--r--include/linux/irq.h14
-rw-r--r--include/linux/irq_sim.h44
-rw-r--r--include/linux/irqchip/arm-gic-common.h2
-rw-r--r--include/linux/irqchip/arm-gic-v3.h84
-rw-r--r--include/linux/irqchip/arm-gic-v4.h105
-rw-r--r--include/linux/irqdomain.h7
-rw-r--r--include/linux/irqflags.h24
-rw-r--r--include/linux/jhash.h29
-rw-r--r--include/linux/jump_label.h33
-rw-r--r--include/linux/kasan-checks.h10
-rw-r--r--include/linux/kernel.h7
-rw-r--r--include/linux/kexec.h8
-rw-r--r--include/linux/key-type.h4
-rw-r--r--include/linux/kmod.h2
-rw-r--r--include/linux/kobject.h4
-rw-r--r--include/linux/kthread.h2
-rw-r--r--include/linux/kvm_host.h7
-rw-r--r--include/linux/lguest.h73
-rw-r--r--include/linux/lguest_launcher.h44
-rw-r--r--include/linux/libata.h2
-rw-r--r--include/linux/llist.h21
-rw-r--r--include/linux/lockdep.h165
-rw-r--r--include/linux/lsm_hooks.h4
-rw-r--r--include/linux/mcb.h2
-rw-r--r--include/linux/mdio-mux.h9
-rw-r--r--include/linux/mem_encrypt.h48
-rw-r--r--include/linux/memblock.h6
-rw-r--r--include/linux/memcontrol.h62
-rw-r--r--include/linux/memory_hotplug.h2
-rw-r--r--include/linux/mfd/cros_ec_commands.h75
-rw-r--r--include/linux/mfd/da9052/da9052.h6
-rw-r--r--include/linux/mfd/da9052/reg.h11
-rw-r--r--include/linux/mfd/ds1wm.h29
-rw-r--r--include/linux/mfd/rn5t618.h6
-rw-r--r--include/linux/mlx4/device.h39
-rw-r--r--include/linux/mlx5/device.h15
-rw-r--r--include/linux/mlx5/driver.h42
-rw-r--r--include/linux/mlx5/mlx5_ifc.h170
-rw-r--r--include/linux/mlx5/qp.h4
-rw-r--r--include/linux/mlx5/srq.h5
-rw-r--r--include/linux/mlx5/vport.h3
-rw-r--r--include/linux/mm.h15
-rw-r--r--include/linux/mm_inline.h6
-rw-r--r--include/linux/mm_types.h114
-rw-r--r--include/linux/mmc/sdio_ids.h1
-rw-r--r--include/linux/mmu_notifier.h25
-rw-r--r--include/linux/mmzone.h5
-rw-r--r--include/linux/mod_devicetable.h2
-rw-r--r--include/linux/module.h4
-rw-r--r--include/linux/mount.h2
-rw-r--r--include/linux/msg.h2
-rw-r--r--include/linux/msi.h1
-rw-r--r--include/linux/mtd/nand.h6
-rw-r--r--include/linux/mux/consumer.h2
-rw-r--r--include/linux/net.h14
-rw-r--r--include/linux/netdev_features.h6
-rw-r--r--include/linux/netdevice.h71
-rw-r--r--include/linux/netfilter.h54
-rw-r--r--include/linux/netfilter/xt_hashlimit.h3
-rw-r--r--include/linux/netfilter_ingress.h4
-rw-r--r--include/linux/nfs_fs.h2
-rw-r--r--include/linux/nfs_xdr.h2
-rw-r--r--include/linux/nmi.h8
-rw-r--r--include/linux/nvme-fc-driver.h7
-rw-r--r--include/linux/nvme-fc.h19
-rw-r--r--include/linux/nvme.h18
-rw-r--r--include/linux/nvmem-consumer.h10
-rw-r--r--include/linux/of.h3
-rw-r--r--include/linux/oom.h22
-rw-r--r--include/linux/page-flags.h4
-rw-r--r--include/linux/pagemap.h14
-rw-r--r--include/linux/pagevec.h12
-rw-r--r--include/linux/path.h2
-rw-r--r--include/linux/pci.h5
-rw-r--r--include/linux/percpu.h20
-rw-r--r--include/linux/perf/arm_pmu.h4
-rw-r--r--include/linux/perf_event.h31
-rw-r--r--include/linux/phy.h23
-rw-r--r--include/linux/phy/phy.h2
-rw-r--r--include/linux/phylink.h148
-rw-r--r--include/linux/pid.h4
-rw-r--r--include/linux/pid_namespace.h2
-rw-r--r--include/linux/pinctrl/machine.h4
-rw-r--r--include/linux/pinctrl/pinconf-generic.h6
-rw-r--r--include/linux/platform_data/hsmmc-omap.h10
-rw-r--r--include/linux/platform_data/mdio-bcm-unimac.h13
-rw-r--r--include/linux/platform_data/omap_drm.h53
-rw-r--r--include/linux/platform_data/st_sensors_pdata.h2
-rw-r--r--include/linux/platform_data/x86/apple.h13
-rw-r--r--include/linux/pm.h4
-rw-r--r--include/linux/pm_domain.h3
-rw-r--r--include/linux/proc_ns.h2
-rw-r--r--include/linux/property.h67
-rw-r--r--include/linux/ptp_clock_kernel.h20
-rw-r--r--include/linux/ptr_ring.h9
-rw-r--r--include/linux/qed/qed_eth_if.h1
-rw-r--r--include/linux/qed/qed_if.h37
-rw-r--r--include/linux/radix-tree.h21
-rw-r--r--include/linux/raid/pq.h1
-rw-r--r--include/linux/rcupdate.h15
-rw-r--r--include/linux/rcutiny.h8
-rw-r--r--include/linux/refcount.h4
-rw-r--r--include/linux/regulator/mt6380-regulator.h32
-rw-r--r--include/linux/reservation.h3
-rw-r--r--include/linux/rwsem-spinlock.h1
-rw-r--r--include/linux/rwsem.h1
-rw-r--r--include/linux/sched.h102
-rw-r--r--include/linux/sched/debug.h4
-rw-r--r--include/linux/sched/mm.h14
-rw-r--r--include/linux/sched/signal.h2
-rw-r--r--include/linux/sched/task.h1
-rw-r--r--include/linux/sched/topology.h8
-rw-r--r--include/linux/sched/user.h3
-rw-r--r--include/linux/sctp.h171
-rw-r--r--include/linux/seg6_local.h6
-rw-r--r--include/linux/sem.h2
-rw-r--r--include/linux/serial_8250.h7
-rw-r--r--include/linux/serial_core.h10
-rw-r--r--include/linux/sfp.h434
-rw-r--r--include/linux/shm.h19
-rw-r--r--include/linux/shmem_fs.h6
-rw-r--r--include/linux/shrinker.h7
-rw-r--r--include/linux/skb_array.h3
-rw-r--r--include/linux/skbuff.h242
-rw-r--r--include/linux/slub_def.h4
-rw-r--r--include/linux/smp.h8
-rw-r--r--include/linux/soc/ti/knav_dma.h2
-rw-r--r--include/linux/socket.h1
-rw-r--r--include/linux/spinlock.h72
-rw-r--r--include/linux/spinlock_up.h6
-rw-r--r--include/linux/srcutiny.h13
-rw-r--r--include/linux/srcutree.h3
-rw-r--r--include/linux/suspend.h48
-rw-r--r--include/linux/swait.h55
-rw-r--r--include/linux/swap.h78
-rw-r--r--include/linux/swiotlb.h1
-rw-r--r--include/linux/sync_file.h3
-rw-r--r--include/linux/syscalls.h41
-rw-r--r--include/linux/sysctl.h2
-rw-r--r--include/linux/tcp.h11
-rw-r--r--include/linux/thread_info.h4
-rw-r--r--include/linux/time.h15
-rw-r--r--include/linux/tnum.h81
-rw-r--r--include/linux/trace_events.h6
-rw-r--r--include/linux/tty.h28
-rw-r--r--include/linux/tty_driver.h6
-rw-r--r--include/linux/tty_flip.h3
-rw-r--r--include/linux/uaccess.h2
-rw-r--r--include/linux/usb/audio-v2.h14
-rw-r--r--include/linux/usb/cdc_ncm.h1
-rw-r--r--include/linux/usb/chipidea.h1
-rw-r--r--include/linux/usb/gadget.h2
-rw-r--r--include/linux/usb/phy.h54
-rw-r--r--include/linux/user_namespace.h2
-rw-r--r--include/linux/utsname.h2
-rw-r--r--include/linux/uuid.h14
-rw-r--r--include/linux/vfio.h4
-rw-r--r--include/linux/virtio_net.h5
-rw-r--r--include/linux/vm_event_item.h6
-rw-r--r--include/linux/vmstat.h4
-rw-r--r--include/linux/w1.h4
-rw-r--r--include/linux/wait.h45
-rw-r--r--include/linux/workqueue.h4
-rw-r--r--include/media/cec-notifier.h15
-rw-r--r--include/media/davinci/dm644x_ccdc.h12
-rw-r--r--include/media/davinci/vpfe_capture.h10
-rw-r--r--include/media/vsp1.h12
-rw-r--r--include/net/act_api.h76
-rw-r--r--include/net/addrconf.h10
-rw-r--r--include/net/af_rxrpc.h21
-rw-r--r--include/net/af_unix.h3
-rw-r--r--include/net/bluetooth/bluetooth.h2
-rw-r--r--include/net/bonding.h5
-rw-r--r--include/net/busy_poll.h12
-rw-r--r--include/net/devlink.h19
-rw-r--r--include/net/dsa.h53
-rw-r--r--include/net/dst.h3
-rw-r--r--include/net/erspan.h61
-rw-r--r--include/net/fib_notifier.h46
-rw-r--r--include/net/fib_rules.h9
-rw-r--r--include/net/flow.h35
-rw-r--r--include/net/flow_dissector.h8
-rw-r--r--include/net/flowcache.h25
-rw-r--r--include/net/inet6_hashtables.h22
-rw-r--r--include/net/inet_frag.h35
-rw-r--r--include/net/inet_hashtables.h31
-rw-r--r--include/net/inetpeer.h11
-rw-r--r--include/net/ip.h23
-rw-r--r--include/net/ip6_fib.h85
-rw-r--r--include/net/ip6_route.h13
-rw-r--r--include/net/ip_fib.h67
-rw-r--r--include/net/ip_tunnels.h7
-rw-r--r--include/net/irda/af_irda.h87
-rw-r--r--include/net/irda/crc.h29
-rw-r--r--include/net/irda/discovery.h95
-rw-r--r--include/net/irda/ircomm_core.h106
-rw-r--r--include/net/irda/ircomm_event.h83
-rw-r--r--include/net/irda/ircomm_lmp.h36
-rw-r--r--include/net/irda/ircomm_param.h147
-rw-r--r--include/net/irda/ircomm_ttp.h37
-rw-r--r--include/net/irda/ircomm_tty.h121
-rw-r--r--include/net/irda/ircomm_tty_attach.h92
-rw-r--r--include/net/irda/irda.h115
-rw-r--r--include/net/irda/irda_device.h285
-rw-r--r--include/net/irda/iriap.h108
-rw-r--r--include/net/irda/iriap_event.h85
-rw-r--r--include/net/irda/irias_object.h108
-rw-r--r--include/net/irda/irlan_client.h42
-rw-r--r--include/net/irda/irlan_common.h230
-rw-r--r--include/net/irda/irlan_eth.h32
-rw-r--r--include/net/irda/irlan_event.h81
-rw-r--r--include/net/irda/irlan_filter.h35
-rw-r--r--include/net/irda/irlan_provider.h52
-rw-r--r--include/net/irda/irlap.h311
-rw-r--r--include/net/irda/irlap_event.h129
-rw-r--r--include/net/irda/irlap_frame.h167
-rw-r--r--include/net/irda/irlmp.h295
-rw-r--r--include/net/irda/irlmp_event.h98
-rw-r--r--include/net/irda/irlmp_frame.h62
-rw-r--r--include/net/irda/irmod.h109
-rw-r--r--include/net/irda/irqueue.h96
-rw-r--r--include/net/irda/irttp.h210
-rw-r--r--include/net/irda/parameters.h100
-rw-r--r--include/net/irda/qos.h101
-rw-r--r--include/net/irda/timer.h105
-rw-r--r--include/net/irda/wrapper.h58
-rw-r--r--include/net/mac80211.h15
-rw-r--r--include/net/ncsi.h12
-rw-r--r--include/net/neighbour.h2
-rw-r--r--include/net/net_namespace.h3
-rw-r--r--include/net/netfilter/nf_conntrack.h11
-rw-r--r--include/net/netfilter/nf_conntrack_expect.h5
-rw-r--r--include/net/netfilter/nf_conntrack_l3proto.h45
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h37
-rw-r--r--include/net/netfilter/nf_conntrack_timeout.h4
-rw-r--r--include/net/netfilter/nf_queue.h2
-rw-r--r--include/net/netfilter/nf_tables.h45
-rw-r--r--include/net/netfilter/nf_tables_core.h2
-rw-r--r--include/net/netlink.h21
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--include/net/netns/ipv6.h3
-rw-r--r--include/net/netns/netfilter.h2
-rw-r--r--include/net/netns/xfrm.h11
-rw-r--r--include/net/nsh.h307
-rw-r--r--include/net/pkt_cls.h145
-rw-r--r--include/net/pkt_sched.h14
-rw-r--r--include/net/raw.h2
-rw-r--r--include/net/rawv6.h2
-rw-r--r--include/net/route.h7
-rw-r--r--include/net/rtnetlink.h9
-rw-r--r--include/net/sch_generic.h29
-rw-r--r--include/net/sctp/command.h70
-rw-r--r--include/net/sctp/constants.h89
-rw-r--r--include/net/sctp/sctp.h22
-rw-r--r--include/net/sctp/sm.h205
-rw-r--r--include/net/sctp/structs.h74
-rw-r--r--include/net/seg6.h5
-rw-r--r--include/net/sock.h23
-rw-r--r--include/net/strparser.h121
-rw-r--r--include/net/switchdev.h87
-rw-r--r--include/net/tc_act/tc_gact.h20
-rw-r--r--include/net/tcp.h71
-rw-r--r--include/net/tso.h2
-rw-r--r--include/net/tun_proto.h49
-rw-r--r--include/net/udp.h45
-rw-r--r--include/net/udp_tunnel.h8
-rw-r--r--include/net/vxlan.h6
-rw-r--r--include/net/xfrm.h42
-rw-r--r--include/rdma/ib_addr.h17
-rw-r--r--include/rdma/ib_hdrs.h84
-rw-r--r--include/rdma/ib_marshall.h6
-rw-r--r--include/rdma/ib_verbs.h181
-rw-r--r--include/rdma/opa_addr.h42
-rw-r--r--include/rdma/opa_vnic.h3
-rw-r--r--include/rdma/rdma_netlink.h58
-rw-r--r--include/rdma/rdma_vt.h28
-rw-r--r--include/rdma/rdmavt_mr.h3
-rw-r--r--include/rdma/rdmavt_qp.h47
-rw-r--r--include/rdma/uverbs_ioctl.h438
-rw-r--r--include/rdma/uverbs_std_types.h58
-rw-r--r--include/rdma/uverbs_types.h39
-rw-r--r--include/rxrpc/packet.h235
-rw-r--r--include/scsi/scsi_cmnd.h1
-rw-r--r--include/sound/omap-hdmi-audio.h2
-rw-r--r--include/sound/soc.h6
-rw-r--r--include/target/iscsi/iscsi_target_core.h1
-rw-r--r--include/trace/events/bridge.h129
-rw-r--r--include/trace/events/ext4.h35
-rw-r--r--include/trace/events/fs_dax.h2
-rw-r--r--include/trace/events/mmflags.h8
-rw-r--r--include/trace/events/qdisc.h50
-rw-r--r--include/trace/events/rcu.h7
-rw-r--r--include/trace/events/xdp.h118
-rw-r--r--include/uapi/asm-generic/hugetlb_encode.h34
-rw-r--r--include/uapi/asm-generic/ioctls.h2
-rw-r--r--include/uapi/asm-generic/mman-common.h14
-rw-r--r--include/uapi/asm-generic/socket.h2
-rw-r--r--include/uapi/drm/armada_drm.h22
-rw-r--r--include/uapi/drm/drm.h22
-rw-r--r--include/uapi/drm/drm_fourcc.h31
-rw-r--r--include/uapi/drm/drm_mode.h50
-rw-r--r--include/uapi/drm/i915_drm.h51
-rw-r--r--include/uapi/drm/msm_drm.h6
-rw-r--r--include/uapi/drm/qxl_drm.h6
-rw-r--r--include/uapi/drm/vc4_drm.h22
-rw-r--r--include/uapi/drm/vmwgfx_drm.h11
-rw-r--r--include/uapi/linux/aio_abi.h21
-rw-r--r--include/uapi/linux/android/binder.h16
-rw-r--r--include/uapi/linux/bpf.h74
-rw-r--r--include/uapi/linux/devlink.h18
-rw-r--r--include/uapi/linux/dlm_netlink.h1
-rw-r--r--include/uapi/linux/errqueue.h3
-rw-r--r--include/uapi/linux/ethtool.h48
-rw-r--r--include/uapi/linux/fs.h28
-rw-r--r--include/uapi/linux/fsmap.h2
-rw-r--r--include/uapi/linux/if_arp.h1
-rw-r--r--include/uapi/linux/if_ether.h6
-rw-r--r--include/uapi/linux/if_tunnel.h1
-rw-r--r--include/uapi/linux/inet_diag.h2
-rw-r--r--include/uapi/linux/kfd_ioctl.h37
-rw-r--r--include/uapi/linux/loop.h3
-rw-r--r--include/uapi/linux/lwtunnel.h1
-rw-r--r--include/uapi/linux/membarrier.h23
-rw-r--r--include/uapi/linux/memfd.h24
-rw-r--r--include/uapi/linux/mman.h22
-rw-r--r--include/uapi/linux/ndctl.h37
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h20
-rw-r--r--include/uapi/linux/netfilter/xt_hashlimit.h36
-rw-r--r--include/uapi/linux/netlink.h20
-rw-r--r--include/uapi/linux/perf_event.h61
-rw-r--r--include/uapi/linux/rtnetlink.h23
-rw-r--r--include/uapi/linux/rxrpc.h (renamed from include/linux/rxrpc.h)57
-rw-r--r--include/uapi/linux/seg6_iptunnel.h18
-rw-r--r--include/uapi/linux/seg6_local.h68
-rw-r--r--include/uapi/linux/serial_core.h14
-rw-r--r--include/uapi/linux/shm.h31
-rw-r--r--include/uapi/linux/snmp.h7
-rw-r--r--include/uapi/linux/tcp.h17
-rw-r--r--include/uapi/linux/usb/audio.h6
-rw-r--r--include/uapi/linux/usb/charger.h31
-rw-r--r--include/uapi/linux/userfaultfd.h16
-rw-r--r--include/uapi/linux/virtio_ring.h4
-rw-r--r--include/uapi/linux/xfrm.h1
-rw-r--r--include/uapi/rdma/ib_user_ioctl_verbs.h84
-rw-r--r--include/uapi/rdma/ib_user_verbs.h19
-rw-r--r--include/uapi/rdma/mlx4-abi.h52
-rw-r--r--include/uapi/rdma/mlx5-abi.h23
-rw-r--r--include/uapi/rdma/qedr-abi.h3
-rw-r--r--include/uapi/rdma/rdma_netlink.h84
-rw-r--r--include/uapi/rdma/rdma_user_ioctl.h33
-rw-r--r--include/uapi/rdma/vmw_pvrdma-abi.h6
-rw-r--r--include/xen/balloon.h8
494 files changed, 9186 insertions, 7362 deletions
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h
index b421584033a5..d8dd3bf51ca7 100644
--- a/include/acpi/acnames.h
+++ b/include/acpi/acnames.h
@@ -54,6 +54,7 @@
#define METHOD_NAME__CLS "_CLS"
#define METHOD_NAME__CRS "_CRS"
#define METHOD_NAME__DDN "_DDN"
+#define METHOD_NAME__DMA "_DMA"
#define METHOD_NAME__HID "_HID"
#define METHOD_NAME__INI "_INI"
#define METHOD_NAME__PLD "_PLD"
diff --git a/include/acpi/acpi.h b/include/acpi/acpi.h
index ca036620703c..0887d7cbb7e1 100644
--- a/include/acpi/acpi.h
+++ b/include/acpi/acpi.h
@@ -58,10 +58,10 @@
#include <acpi/actypes.h> /* ACPICA data types and structures */
#include <acpi/acexcep.h> /* ACPICA exceptions */
#include <acpi/actbl.h> /* ACPI table definitions */
-#include <acpi/acoutput.h> /* Error output and Debug macros */
#include <acpi/acrestyp.h> /* Resource Descriptor structs */
+#include <acpi/platform/acenvex.h> /* Extra environment-specific items */
+#include <acpi/acoutput.h> /* Error output and Debug macros */
#include <acpi/acpiosxf.h> /* OSL interfaces (ACPICA-to-OS) */
#include <acpi/acpixf.h> /* ACPI core subsystem external interfaces */
-#include <acpi/platform/acenvex.h> /* Extra environment-specific items */
#endif /* __ACPI_H__ */
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 68bc6be447fd..dedf9d789166 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -316,7 +316,6 @@ struct acpi_device_perf {
struct acpi_device_wakeup_flags {
u8 valid:1; /* Can successfully enable wakeup? */
u8 notifier_present:1; /* Wake-up notify handler has been installed */
- u8 enabled:1; /* Enabled for wakeup */
};
struct acpi_device_wakeup_context {
@@ -333,6 +332,7 @@ struct acpi_device_wakeup {
struct acpi_device_wakeup_context context;
struct wakeup_source *ws;
int prepare_count;
+ int enable_count;
};
struct acpi_device_physical_node {
@@ -395,35 +395,55 @@ struct acpi_data_node {
struct completion kobj_done;
};
-static inline bool is_acpi_node(struct fwnode_handle *fwnode)
-{
- return !IS_ERR_OR_NULL(fwnode) && (fwnode->type == FWNODE_ACPI
- || fwnode->type == FWNODE_ACPI_DATA);
-}
+extern const struct fwnode_operations acpi_device_fwnode_ops;
+extern const struct fwnode_operations acpi_data_fwnode_ops;
+extern const struct fwnode_operations acpi_static_fwnode_ops;
-static inline bool is_acpi_device_node(struct fwnode_handle *fwnode)
+static inline bool is_acpi_node(const struct fwnode_handle *fwnode)
{
- return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_ACPI;
+ return !IS_ERR_OR_NULL(fwnode) &&
+ (fwnode->ops == &acpi_device_fwnode_ops
+ || fwnode->ops == &acpi_data_fwnode_ops);
}
-static inline struct acpi_device *to_acpi_device_node(struct fwnode_handle *fwnode)
+static inline bool is_acpi_device_node(const struct fwnode_handle *fwnode)
{
- return is_acpi_device_node(fwnode) ?
- container_of(fwnode, struct acpi_device, fwnode) : NULL;
+ return !IS_ERR_OR_NULL(fwnode) &&
+ fwnode->ops == &acpi_device_fwnode_ops;
}
-static inline bool is_acpi_data_node(struct fwnode_handle *fwnode)
+#define to_acpi_device_node(__fwnode) \
+ ({ \
+ typeof(__fwnode) __to_acpi_device_node_fwnode = __fwnode; \
+ \
+ is_acpi_device_node(__to_acpi_device_node_fwnode) ? \
+ container_of(__to_acpi_device_node_fwnode, \
+ struct acpi_device, fwnode) : \
+ NULL; \
+ })
+
+static inline bool is_acpi_data_node(const struct fwnode_handle *fwnode)
{
- return fwnode && fwnode->type == FWNODE_ACPI_DATA;
+ return !IS_ERR_OR_NULL(fwnode) && fwnode->ops == &acpi_data_fwnode_ops;
}
-static inline struct acpi_data_node *to_acpi_data_node(struct fwnode_handle *fwnode)
+#define to_acpi_data_node(__fwnode) \
+ ({ \
+ typeof(__fwnode) __to_acpi_data_node_fwnode = __fwnode; \
+ \
+ is_acpi_data_node(__to_acpi_data_node_fwnode) ? \
+ container_of(__to_acpi_data_node_fwnode, \
+ struct acpi_data_node, fwnode) : \
+ NULL; \
+ })
+
+static inline bool is_acpi_static_node(const struct fwnode_handle *fwnode)
{
- return is_acpi_data_node(fwnode) ?
- container_of(fwnode, struct acpi_data_node, fwnode) : NULL;
+ return !IS_ERR_OR_NULL(fwnode) &&
+ fwnode->ops == &acpi_static_fwnode_ops;
}
-static inline bool acpi_data_node_match(struct fwnode_handle *fwnode,
+static inline bool acpi_data_node_match(const struct fwnode_handle *fwnode,
const char *name)
{
return is_acpi_data_node(fwnode) ?
@@ -578,6 +598,8 @@ struct acpi_pci_root {
bool acpi_dma_supported(struct acpi_device *adev);
enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev);
+int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset,
+ u64 *size);
int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr);
void acpi_dma_deconfigure(struct device *dev);
@@ -606,6 +628,7 @@ acpi_status acpi_remove_pm_notifier(struct acpi_device *adev);
bool acpi_pm_device_can_wakeup(struct device *dev);
int acpi_pm_device_sleep_state(struct device *, int *, int);
int acpi_pm_set_device_wakeup(struct device *dev, bool enable);
+int acpi_pm_set_bridge_wakeup(struct device *dev, bool enable);
#else
static inline void acpi_pm_wakeup_event(struct device *dev)
{
@@ -636,6 +659,10 @@ static inline int acpi_pm_set_device_wakeup(struct device *dev, bool enable)
{
return -ENODEV;
}
+static inline int acpi_pm_set_bridge_wakeup(struct device *dev, bool enable)
+{
+ return -ENODEV;
+}
#endif
#ifdef CONFIG_ACPI_SLEEP
diff --git a/include/acpi/acpi_numa.h b/include/acpi/acpi_numa.h
index d4b72944ccda..1e3a74f94131 100644
--- a/include/acpi/acpi_numa.h
+++ b/include/acpi/acpi_numa.h
@@ -3,6 +3,7 @@
#ifdef CONFIG_ACPI_NUMA
#include <linux/kernel.h>
+#include <linux/numa.h>
/* Proximity bitmap length */
#if MAX_NUMNODES > 256
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index a59c44c3edd8..53c5e2f7bcec 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -46,7 +46,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20170531
+#define ACPI_CA_VERSION 0x20170728
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
@@ -160,13 +160,14 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_create_osi_method, TRUE);
ACPI_INIT_GLOBAL(u8, acpi_gbl_use_default_register_widths, TRUE);
/*
- * Whether or not to verify the table checksum before installation. Set
- * this to TRUE to verify the table checksum before install it to the table
- * manager. Note that enabling this option causes errors to happen in some
- * OSPMs during early initialization stages. Default behavior is to do such
- * verification.
+ * Whether or not to validate (map) an entire table to verify
+ * checksum/duplication in early stage before install. Set this to TRUE to
+ * allow early table validation before install it to the table manager.
+ * Note that enabling this option causes errors to happen in some OSPMs
+ * during early initialization stages. Default behavior is to allow such
+ * validation.
*/
-ACPI_INIT_GLOBAL(u8, acpi_gbl_verify_table_checksum, TRUE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_enable_table_validation, TRUE);
/*
* Optionally enable output from the AML Debug Object.
diff --git a/include/acpi/acrestyp.h b/include/acpi/acrestyp.h
index 4f7f39a02820..343dbdcef20c 100644
--- a/include/acpi/acrestyp.h
+++ b/include/acpi/acrestyp.h
@@ -377,13 +377,6 @@ struct acpi_resource_generic_register {
u64 address;
};
-/* Generic Address Space Access Sizes */
-#define ACPI_ACCESS_SIZE_UNDEFINED 0
-#define ACPI_ACCESS_SIZE_BYTE 1
-#define ACPI_ACCESS_SIZE_WORD 2
-#define ACPI_ACCESS_SIZE_DWORD 3
-#define ACPI_ACCESS_SIZE_QWORD 4
-
struct acpi_resource_gpio {
u8 revision_id;
u8 connection_type;
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index bdc55c0da19c..89509b86cb54 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -394,6 +394,7 @@ struct acpi_table_desc {
#define ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL (1) /* Physical address, internally mapped */
#define ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL (2) /* Virtual address, internallly allocated */
#define ACPI_TABLE_ORIGIN_MASK (3)
+#define ACPI_TABLE_IS_VERIFIED (4)
#define ACPI_TABLE_IS_LOADED (8)
/*
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index 707dda74c272..686b6f8c09dc 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -76,6 +76,7 @@
#define ACPI_SIG_MCHI "MCHI" /* Management Controller Host Interface table */
#define ACPI_SIG_MSDM "MSDM" /* Microsoft Data Management Table */
#define ACPI_SIG_MTMR "MTMR" /* MID Timer table */
+#define ACPI_SIG_SDEI "SDEI" /* Software Delegated Exception Interface Table */
#define ACPI_SIG_SLIC "SLIC" /* Software Licensing Description Table */
#define ACPI_SIG_SPCR "SPCR" /* Serial Port Console Redirection table */
#define ACPI_SIG_SPMI "SPMI" /* Server Platform Management Interface table */
@@ -664,7 +665,7 @@ struct acpi_ibft_target {
* IORT - IO Remapping Table
*
* Conforms to "IO Remapping Table System Software on ARM Platforms",
- * Document number: ARM DEN 0049B, October 2015
+ * Document number: ARM DEN 0049C, May 2017
*
******************************************************************************/
@@ -779,6 +780,8 @@ struct acpi_iort_smmu {
#define ACPI_IORT_SMMU_V2 0x00000001 /* Generic SMMUv2 */
#define ACPI_IORT_SMMU_CORELINK_MMU400 0x00000002 /* ARM Corelink MMU-400 */
#define ACPI_IORT_SMMU_CORELINK_MMU500 0x00000003 /* ARM Corelink MMU-500 */
+#define ACPI_IORT_SMMU_CORELINK_MMU401 0x00000004 /* ARM Corelink MMU-401 */
+#define ACPI_IORT_SMMU_CAVIUM_THUNDERX 0x00000005 /* Cavium thunder_x SMMUv2 */
/* Masks for Flags field above */
@@ -799,17 +802,27 @@ struct acpi_iort_smmu_v3 {
u32 flags;
u32 reserved;
u64 vatos_address;
- u32 model; /* O: generic SMMUv3 */
+ u32 model;
u32 event_gsiv;
u32 pri_gsiv;
u32 gerr_gsiv;
u32 sync_gsiv;
+ u8 pxm;
+ u8 reserved1;
+ u16 reserved2;
};
+/* Values for Model field above */
+
+#define ACPI_IORT_SMMU_V3_GENERIC 0x00000000 /* Generic SMMUv3 */
+#define ACPI_IORT_SMMU_V3_HISILICON_HI161X 0x00000001 /* hi_silicon Hi161x SMMUv3 */
+#define ACPI_IORT_SMMU_V3_CAVIUM_CN99XX 0x00000002 /* Cavium CN99xx SMMUv3 */
+
/* Masks for Flags field above */
#define ACPI_IORT_SMMU_V3_COHACC_OVERRIDE (1)
#define ACPI_IORT_SMMU_V3_HTTU_OVERRIDE (1<<1)
+#define ACPI_IORT_SMMU_V3_PXM_VALID (1<<3)
/*******************************************************************************
*
@@ -1122,6 +1135,19 @@ struct acpi_mtmr_entry {
/*******************************************************************************
*
+ * SDEI - Software Delegated Exception Interface Descriptor Table
+ *
+ * Conforms to "Software Delegated Exception Interface (SDEI)" ARM DEN0054A,
+ * May 8th, 2017. Copyright 2017 ARM Ltd.
+ *
+ ******************************************************************************/
+
+struct acpi_table_sdei {
+ struct acpi_table_header header; /* Common ACPI table header */
+};
+
+/*******************************************************************************
+ *
* SLIC - Software Licensing Description Table
*
* Conforms to "Microsoft Software Licensing Tables (SLIC and MSDM)",
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 2fcbaec8b368..4f077edb9b81 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -166,6 +166,7 @@ typedef u64 acpi_physical_address;
#define ACPI_SIZE_MAX ACPI_UINT64_MAX
#define ACPI_USE_NATIVE_DIVIDE /* Has native 64-bit integer support */
+#define ACPI_USE_NATIVE_MATH64 /* Has native 64-bit integer support */
/*
* In the case of the Itanium Processor Family (IPF), the hardware does not
@@ -554,6 +555,13 @@ typedef u64 acpi_integer;
#define ACPI_VALIDATE_RSDP_SIG(a) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_SIG_RSDP, 8))
#define ACPI_MAKE_RSDP_SIG(dest) (memcpy (ACPI_CAST_PTR (char, (dest)), ACPI_SIG_RSDP, 8))
+/*
+ * Algorithm to obtain access bit width.
+ * Can be used with access_width of struct acpi_generic_address and access_size of
+ * struct acpi_resource_generic_register.
+ */
+#define ACPI_ACCESS_BIT_WIDTH(size) (1 << ((size) + 2))
+
/*******************************************************************************
*
* Miscellaneous constants
@@ -775,7 +783,7 @@ typedef u32 acpi_event_status;
* | | | | +-- Type of dispatch:to method, handler, notify, or none
* | | | +----- Interrupt type: edge or level triggered
* | | +------- Is a Wake GPE
- * | +--------- Is GPE masked by the software GPE masking mechanism
+ * | +--------- Has been enabled automatically at init time
* +------------ <Reserved>
*/
#define ACPI_GPE_DISPATCH_NONE (u8) 0x00
@@ -791,6 +799,7 @@ typedef u32 acpi_event_status;
#define ACPI_GPE_XRUPT_TYPE_MASK (u8) 0x08
#define ACPI_GPE_CAN_WAKE (u8) 0x10
+#define ACPI_GPE_AUTO_ENABLED (u8) 0x20
/*
* Flags for GPE and Lock interfaces
diff --git a/include/acpi/apei.h b/include/acpi/apei.h
index 76284bb560a6..c46694abea28 100644
--- a/include/acpi/apei.h
+++ b/include/acpi/apei.h
@@ -16,7 +16,13 @@
#ifdef __KERNEL__
-extern bool hest_disable;
+enum hest_status {
+ HEST_ENABLED,
+ HEST_DISABLED,
+ HEST_NOT_FOUND,
+};
+
+extern int hest_disable;
extern int erst_disable;
#ifdef CONFIG_ACPI_APEI_GHES
extern bool ghes_disable;
diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h
index 9f26e01186ae..9061c5c743b3 100644
--- a/include/acpi/ghes.h
+++ b/include/acpi/ghes.h
@@ -113,6 +113,11 @@ static inline void *acpi_hest_get_next(struct acpi_hest_generic_data *gdata)
return (void *)(gdata) + acpi_hest_get_record_size(gdata);
}
+#define apei_estatus_for_each_section(estatus, section) \
+ for (section = (struct acpi_hest_generic_data *)(estatus + 1); \
+ (void *)section - (void *)(estatus + 1) < estatus->data_length; \
+ section = acpi_hest_get_next(section))
+
int ghes_notify_sea(void);
#endif /* GHES_H */
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index 912563c66948..043fd559de6e 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -288,6 +288,11 @@
#define ACPI_INLINE
#endif
+/* Use ordered initialization if compiler doesn't support designated. */
+#ifndef ACPI_STRUCT_INIT
+#define ACPI_STRUCT_INIT(field, value) value
+#endif
+
/*
* Configurable calling conventions:
*
@@ -382,8 +387,4 @@
#define ACPI_INIT_FUNCTION
#endif
-#ifndef ACPI_STRUCT_INIT
-#define ACPI_STRUCT_INIT(field, value) value
-#endif
-
#endif /* __ACENV_H__ */
diff --git a/include/acpi/platform/acgcc.h b/include/acpi/platform/acgcc.h
index 97a7e21cfbe0..9c8f8b79644e 100644
--- a/include/acpi/platform/acgcc.h
+++ b/include/acpi/platform/acgcc.h
@@ -84,4 +84,8 @@ typedef __builtin_va_list va_list;
#define COMPILER_VA_MACRO 1
+/* GCC supports native multiply/shift on 32-bit platforms */
+
+#define ACPI_USE_NATIVE_MATH64
+
#endif /* __ACGCC_H__ */
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index 047f13865608..1b473efd9eb6 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -128,6 +128,7 @@
/* Host-dependent types and defines for in-kernel ACPICA */
#define ACPI_MACHINE_WIDTH BITS_PER_LONG
+#define ACPI_USE_NATIVE_MATH64
#define ACPI_EXPORT_SYMBOL(symbol) EXPORT_SYMBOL(symbol);
#define strtoul simple_strtoul
@@ -178,6 +179,9 @@
#define ACPI_MSG_BIOS_ERROR KERN_ERR "ACPI BIOS Error (bug): "
#define ACPI_MSG_BIOS_WARNING KERN_WARNING "ACPI BIOS Warning (bug): "
+/*
+ * Linux wants to use designated initializers for function pointer structs.
+ */
#define ACPI_STRUCT_INIT(field, value) .field = value
#else /* !__KERNEL__ */
@@ -213,6 +217,7 @@
#define COMPILER_DEPENDENT_INT64 long long
#define COMPILER_DEPENDENT_UINT64 unsigned long long
#define ACPI_USE_NATIVE_DIVIDE
+#define ACPI_USE_NATIVE_MATH64
#endif
#ifndef __cdecl
diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
index dad68bf46c77..8d28eb010d0d 100644
--- a/include/asm-generic/atomic64.h
+++ b/include/asm-generic/atomic64.h
@@ -21,6 +21,8 @@ typedef struct {
extern long long atomic64_read(const atomic64_t *v);
extern void atomic64_set(atomic64_t *v, long long i);
+#define atomic64_set_release(v, i) atomic64_set((v), (i))
+
#define ATOMIC64_OP(op) \
extern void atomic64_##op(long long a, atomic64_t *v);
diff --git a/include/asm-generic/early_ioremap.h b/include/asm-generic/early_ioremap.h
index 734ad4db388c..2edef8d7fa6b 100644
--- a/include/asm-generic/early_ioremap.h
+++ b/include/asm-generic/early_ioremap.h
@@ -13,6 +13,8 @@ extern void *early_memremap(resource_size_t phys_addr,
unsigned long size);
extern void *early_memremap_ro(resource_size_t phys_addr,
unsigned long size);
+extern void *early_memremap_prot(resource_size_t phys_addr,
+ unsigned long size, unsigned long prot_val);
extern void early_iounmap(void __iomem *addr, unsigned long size);
extern void early_memunmap(void *addr, unsigned long size);
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
index bf2d34c9d804..f0d8b1c51343 100644
--- a/include/asm-generic/futex.h
+++ b/include/asm-generic/futex.h
@@ -13,7 +13,7 @@
*/
/**
- * futex_atomic_op_inuser() - Atomic arithmetic operation with constant
+ * arch_futex_atomic_op_inuser() - Atomic arithmetic operation with constant
* argument and comparison of the previous
* futex value with another constant.
*
@@ -25,18 +25,11 @@
* <0 - On error
*/
static inline int
-futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval, ret;
u32 tmp;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
preempt_disable();
pagefault_disable();
@@ -74,17 +67,9 @@ out_pagefault_enable:
pagefault_enable();
preempt_enable();
- if (ret == 0) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (ret == 0)
+ *oval = oldval;
+
return ret;
}
@@ -126,18 +111,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
#else
static inline int
-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
pagefault_disable();
@@ -153,17 +129,9 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 7ef015eb3403..b4531e3b2120 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -915,6 +915,9 @@ extern void ioport_unmap(void __iomem *p);
#endif /* CONFIG_GENERIC_IOMAP */
#endif /* CONFIG_HAS_IOPORT_MAP */
+/*
+ * Convert a virtual cached pointer to an uncached pointer
+ */
#ifndef xlate_dev_kmem_ptr
#define xlate_dev_kmem_ptr xlate_dev_kmem_ptr
static inline void *xlate_dev_kmem_ptr(void *addr)
@@ -954,6 +957,14 @@ static inline void *bus_to_virt(unsigned long address)
#ifndef memset_io
#define memset_io memset_io
+/**
+ * memset_io Set a range of I/O memory to a constant value
+ * @addr: The beginning of the I/O-memory range to set
+ * @val: The value to set the memory to
+ * @count: The number of bytes to set
+ *
+ * Set a range of I/O memory to a given value.
+ */
static inline void memset_io(volatile void __iomem *addr, int value,
size_t size)
{
@@ -963,6 +974,14 @@ static inline void memset_io(volatile void __iomem *addr, int value,
#ifndef memcpy_fromio
#define memcpy_fromio memcpy_fromio
+/**
+ * memcpy_fromio Copy a block of data from I/O memory
+ * @dst: The (RAM) destination for the copy
+ * @src: The (I/O memory) source for the data
+ * @count: The number of bytes to copy
+ *
+ * Copy a block of data from I/O memory.
+ */
static inline void memcpy_fromio(void *buffer,
const volatile void __iomem *addr,
size_t size)
@@ -973,6 +992,14 @@ static inline void memcpy_fromio(void *buffer,
#ifndef memcpy_toio
#define memcpy_toio memcpy_toio
+/**
+ * memcpy_toio Copy a block of data into I/O memory
+ * @dst: The (I/O memory) destination for the copy
+ * @src: The (RAM) source for the data
+ * @count: The number of bytes to copy
+ *
+ * Copy a block of data to I/O memory.
+ */
static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer,
size_t size)
{
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 7dfa767dc680..4d7bb98f4134 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -583,6 +583,18 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm,
#endif /* CONFIG_MMU */
/*
+ * No-op macros that just return the current protection value. Defined here
+ * because these macros can be used used even if CONFIG_MMU is not defined.
+ */
+#ifndef pgprot_encrypted
+#define pgprot_encrypted(prot) (prot)
+#endif
+
+#ifndef pgprot_decrypted
+#define pgprot_decrypted(prot) (prot)
+#endif
+
+/*
* A facility to provide lazy MMU batching. This allows PTE updates and
* page invalidations to be delayed until a call to leave lazy MMU mode
* is issued. Some architectures may benefit from doing this, and it is
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index 9f0681bf1e87..66260777d644 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -22,17 +22,6 @@
#include <asm-generic/qspinlock_types.h>
/**
- * queued_spin_unlock_wait - wait until the _current_ lock holder releases the lock
- * @lock : Pointer to queued spinlock structure
- *
- * There is a very slight possibility of live-lock if the lockers keep coming
- * and the waiter is just unfortunate enough to not see any unlock state.
- */
-#ifndef queued_spin_unlock_wait
-extern void queued_spin_unlock_wait(struct qspinlock *lock);
-#endif
-
-/**
* queued_spin_is_locked - is the spinlock locked?
* @lock: Pointer to queued spinlock structure
* Return: 1 if it is locked, 0 otherwise
@@ -41,8 +30,6 @@ extern void queued_spin_unlock_wait(struct qspinlock *lock);
static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
{
/*
- * See queued_spin_unlock_wait().
- *
* Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
* isn't immediately observable.
*/
@@ -135,6 +122,5 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock)
#define arch_spin_trylock(l) queued_spin_trylock(l)
#define arch_spin_unlock(l) queued_spin_unlock(l)
#define arch_spin_lock_flags(l, f) queued_spin_lock(l)
-#define arch_spin_unlock_wait(l) queued_spin_unlock_wait(l)
#endif /* __ASM_GENERIC_QSPINLOCK_H */
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index 532372c6cf15..e5da44eddd2f 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -27,6 +27,8 @@
* __kprobes_text_start, __kprobes_text_end
* __entry_text_start, __entry_text_end
* __ctors_start, __ctors_end
+ * __irqentry_text_start, __irqentry_text_end
+ * __softirqentry_text_start, __softirqentry_text_end
*/
extern char _text[], _stext[], _etext[];
extern char _data[], _sdata[], _edata[];
@@ -39,6 +41,8 @@ extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[];
extern char __kprobes_text_start[], __kprobes_text_end[];
extern char __entry_text_start[], __entry_text_end[];
extern char __start_rodata[], __end_rodata[];
+extern char __irqentry_text_start[], __irqentry_text_end[];
+extern char __softirqentry_text_start[], __softirqentry_text_end[];
/* Start and end of .ctors section - used for constructor calls. */
extern char __ctors_start[], __ctors_end[];
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 8afa4335e5b2..faddde44de8c 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -112,10 +112,11 @@ struct mmu_gather {
#define HAVE_GENERIC_MMU_GATHER
-void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end);
+void arch_tlb_gather_mmu(struct mmu_gather *tlb,
+ struct mm_struct *mm, unsigned long start, unsigned long end);
void tlb_flush_mmu(struct mmu_gather *tlb);
-void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
- unsigned long end);
+void arch_tlb_finish_mmu(struct mmu_gather *tlb,
+ unsigned long start, unsigned long end, bool force);
extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
int page_size);
diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h
index fc824e2828f3..5d2add1a6c96 100644
--- a/include/asm-generic/topology.h
+++ b/include/asm-generic/topology.h
@@ -48,7 +48,11 @@
#define parent_node(node) ((void)(node),0)
#endif
#ifndef cpumask_of_node
-#define cpumask_of_node(node) ((void)node, cpu_online_mask)
+ #ifdef CONFIG_NEED_MULTIPLE_NODES
+ #define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask)
+ #else
+ #define cpumask_of_node(node) ((void)node, cpu_online_mask)
+ #endif
#endif
#ifndef pcibus_to_node
#define pcibus_to_node(bus) ((void)(bus), -1)
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index da0be9a8d1de..9fdb54a95976 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -60,6 +60,22 @@
#define ALIGN_FUNCTION() . = ALIGN(8)
/*
+ * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections, which
+ * generates .data.identifier sections, which need to be pulled in with
+ * .data. We don't want to pull in .data..other sections, which Linux
+ * has defined. Same for text and bss.
+ */
+#ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
+#define TEXT_MAIN .text .text.[0-9a-zA-Z_]*
+#define DATA_MAIN .data .data.[0-9a-zA-Z_]*
+#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]*
+#else
+#define TEXT_MAIN .text
+#define DATA_MAIN .data
+#define BSS_MAIN .bss
+#endif
+
+/*
* Align to a 32 byte boundary equal to the
* alignment gcc 4.5 uses for a struct
*/
@@ -198,12 +214,9 @@
/*
* .data section
- * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections generates
- * .data.identifier which needs to be pulled in with .data, but don't want to
- * pull in .data..stuff which has its own requirements. Same for bss.
*/
#define DATA_DATA \
- *(.data .data.[0-9a-zA-Z_]*) \
+ *(DATA_MAIN) \
*(.ref.data) \
*(.data..shared_aligned) /* percpu related */ \
MEM_KEEP(init.data) \
@@ -434,16 +447,17 @@
VMLINUX_SYMBOL(__security_initcall_end) = .; \
}
-/* .text section. Map to function alignment to avoid address changes
+/*
+ * .text section. Map to function alignment to avoid address changes
* during second ld run in second ld pass when generating System.map
- * LD_DEAD_CODE_DATA_ELIMINATION option enables -ffunction-sections generates
- * .text.identifier which needs to be pulled in with .text , but some
- * architectures define .text.foo which is not intended to be pulled in here.
- * Those enabling LD_DEAD_CODE_DATA_ELIMINATION must ensure they don't have
- * conflicting section names, and must pull in .text.[0-9a-zA-Z_]* */
+ *
+ * TEXT_MAIN here will match .text.fixup and .text.unlikely if dead
+ * code elimination is enabled, so these sections should be converted
+ * to use ".." first.
+ */
#define TEXT_TEXT \
ALIGN_FUNCTION(); \
- *(.text.hot .text .text.fixup .text.unlikely) \
+ *(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \
*(.ref.text) \
MEM_KEEP(init.text) \
MEM_KEEP(exit.text) \
@@ -483,25 +497,17 @@
*(.entry.text) \
VMLINUX_SYMBOL(__entry_text_end) = .;
-#if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN)
#define IRQENTRY_TEXT \
ALIGN_FUNCTION(); \
VMLINUX_SYMBOL(__irqentry_text_start) = .; \
*(.irqentry.text) \
VMLINUX_SYMBOL(__irqentry_text_end) = .;
-#else
-#define IRQENTRY_TEXT
-#endif
-#if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN)
#define SOFTIRQENTRY_TEXT \
ALIGN_FUNCTION(); \
VMLINUX_SYMBOL(__softirqentry_text_start) = .; \
*(.softirqentry.text) \
VMLINUX_SYMBOL(__softirqentry_text_end) = .;
-#else
-#define SOFTIRQENTRY_TEXT
-#endif
/* Section used for early init (in .S files) */
#define HEAD_TEXT *(.head.text)
@@ -613,7 +619,7 @@
BSS_FIRST_SECTIONS \
*(.bss..page_aligned) \
*(.dynbss) \
- *(.bss .bss.[0-9a-zA-Z_]*) \
+ *(BSS_MAIN) \
*(COMMON) \
}
@@ -680,6 +686,31 @@
#define BUG_TABLE
#endif
+#ifdef CONFIG_ORC_UNWINDER
+#define ORC_UNWIND_TABLE \
+ . = ALIGN(4); \
+ .orc_unwind_ip : AT(ADDR(.orc_unwind_ip) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start_orc_unwind_ip) = .; \
+ KEEP(*(.orc_unwind_ip)) \
+ VMLINUX_SYMBOL(__stop_orc_unwind_ip) = .; \
+ } \
+ . = ALIGN(6); \
+ .orc_unwind : AT(ADDR(.orc_unwind) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start_orc_unwind) = .; \
+ KEEP(*(.orc_unwind)) \
+ VMLINUX_SYMBOL(__stop_orc_unwind) = .; \
+ } \
+ . = ALIGN(4); \
+ .orc_lookup : AT(ADDR(.orc_lookup) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(orc_lookup) = .; \
+ . += (((SIZEOF(.text) + LOOKUP_BLOCK_SIZE - 1) / \
+ LOOKUP_BLOCK_SIZE) + 1) * 4; \
+ VMLINUX_SYMBOL(orc_lookup_end) = .; \
+ }
+#else
+#define ORC_UNWIND_TABLE
+#endif
+
#ifdef CONFIG_PM_TRACE
#define TRACEDATA \
. = ALIGN(4); \
@@ -866,7 +897,7 @@
DATA_DATA \
CONSTRUCTORS \
} \
- BUG_TABLE
+ BUG_TABLE \
#define INIT_TEXT_SECTION(inittext_align) \
. = ALIGN(inittext_align); \
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index 436c4c2683c7..e3cebf640c00 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -192,7 +192,7 @@ static inline unsigned int crypto_queue_len(struct crypto_queue *queue)
}
void crypto_inc(u8 *a, unsigned int size);
-void __crypto_xor(u8 *dst, const u8 *src, unsigned int size);
+void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int size);
static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size)
{
@@ -207,7 +207,26 @@ static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size)
size -= sizeof(unsigned long);
}
} else {
- __crypto_xor(dst, src, size);
+ __crypto_xor(dst, dst, src, size);
+ }
+}
+
+static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2,
+ unsigned int size)
+{
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
+ __builtin_constant_p(size) &&
+ (size % sizeof(unsigned long)) == 0) {
+ unsigned long *d = (unsigned long *)dst;
+ unsigned long *s1 = (unsigned long *)src1;
+ unsigned long *s2 = (unsigned long *)src2;
+
+ while (size > 0) {
+ *d++ = *s1++ ^ *s2++;
+ size -= sizeof(unsigned long);
+ }
+ } else {
+ __crypto_xor(dst, src1, src2, size);
}
}
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index e2b9c6fe2714..75ec9c662268 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -20,6 +20,9 @@
#include <linux/types.h>
#include <net/sock.h>
+#include <crypto/aead.h>
+#include <crypto/skcipher.h>
+
#define ALG_MAX_PAGES 16
struct crypto_async_request;
@@ -68,6 +71,99 @@ struct af_alg_sgl {
unsigned int npages;
};
+/* TX SGL entry */
+struct af_alg_tsgl {
+ struct list_head list;
+ unsigned int cur; /* Last processed SG entry */
+ struct scatterlist sg[0]; /* Array of SGs forming the SGL */
+};
+
+#define MAX_SGL_ENTS ((4096 - sizeof(struct af_alg_tsgl)) / \
+ sizeof(struct scatterlist) - 1)
+
+/* RX SGL entry */
+struct af_alg_rsgl {
+ struct af_alg_sgl sgl;
+ struct list_head list;
+ size_t sg_num_bytes; /* Bytes of data in that SGL */
+};
+
+/**
+ * struct af_alg_async_req - definition of crypto request
+ * @iocb: IOCB for AIO operations
+ * @sk: Socket the request is associated with
+ * @first_rsgl: First RX SG
+ * @last_rsgl: Pointer to last RX SG
+ * @rsgl_list: Track RX SGs
+ * @tsgl: Private, per request TX SGL of buffers to process
+ * @tsgl_entries: Number of entries in priv. TX SGL
+ * @outlen: Number of output bytes generated by crypto op
+ * @areqlen: Length of this data structure
+ * @cra_u: Cipher request
+ */
+struct af_alg_async_req {
+ struct kiocb *iocb;
+ struct sock *sk;
+
+ struct af_alg_rsgl first_rsgl;
+ struct af_alg_rsgl *last_rsgl;
+ struct list_head rsgl_list;
+
+ struct scatterlist *tsgl;
+ unsigned int tsgl_entries;
+
+ unsigned int outlen;
+ unsigned int areqlen;
+
+ union {
+ struct aead_request aead_req;
+ struct skcipher_request skcipher_req;
+ } cra_u;
+
+ /* req ctx trails this struct */
+};
+
+/**
+ * struct af_alg_ctx - definition of the crypto context
+ *
+ * The crypto context tracks the input data during the lifetime of an AF_ALG
+ * socket.
+ *
+ * @tsgl_list: Link to TX SGL
+ * @iv: IV for cipher operation
+ * @aead_assoclen: Length of AAD for AEAD cipher operations
+ * @completion: Work queue for synchronous operation
+ * @used: TX bytes sent to kernel. This variable is used to
+ * ensure that user space cannot cause the kernel
+ * to allocate too much memory in sendmsg operation.
+ * @rcvused: Total RX bytes to be filled by kernel. This variable
+ * is used to ensure user space cannot cause the kernel
+ * to allocate too much memory in a recvmsg operation.
+ * @more: More data to be expected from user space?
+ * @merge: Shall new data from user space be merged into existing
+ * SG?
+ * @enc: Cryptographic operation to be performed when
+ * recvmsg is invoked.
+ * @len: Length of memory allocated for this data structure.
+ */
+struct af_alg_ctx {
+ struct list_head tsgl_list;
+
+ void *iv;
+ size_t aead_assoclen;
+
+ struct af_alg_completion completion;
+
+ size_t used;
+ size_t rcvused;
+
+ bool more;
+ bool merge;
+ bool enc;
+
+ unsigned int len;
+};
+
int af_alg_register_type(const struct af_alg_type *type);
int af_alg_unregister_type(const struct af_alg_type *type);
@@ -94,4 +190,78 @@ static inline void af_alg_init_completion(struct af_alg_completion *completion)
init_completion(&completion->completion);
}
+/**
+ * Size of available buffer for sending data from user space to kernel.
+ *
+ * @sk socket of connection to user space
+ * @return number of bytes still available
+ */
+static inline int af_alg_sndbuf(struct sock *sk)
+{
+ struct alg_sock *ask = alg_sk(sk);
+ struct af_alg_ctx *ctx = ask->private;
+
+ return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
+ ctx->used, 0);
+}
+
+/**
+ * Can the send buffer still be written to?
+ *
+ * @sk socket of connection to user space
+ * @return true => writable, false => not writable
+ */
+static inline bool af_alg_writable(struct sock *sk)
+{
+ return PAGE_SIZE <= af_alg_sndbuf(sk);
+}
+
+/**
+ * Size of available buffer used by kernel for the RX user space operation.
+ *
+ * @sk socket of connection to user space
+ * @return number of bytes still available
+ */
+static inline int af_alg_rcvbuf(struct sock *sk)
+{
+ struct alg_sock *ask = alg_sk(sk);
+ struct af_alg_ctx *ctx = ask->private;
+
+ return max_t(int, max_t(int, sk->sk_rcvbuf & PAGE_MASK, PAGE_SIZE) -
+ ctx->rcvused, 0);
+}
+
+/**
+ * Can the RX buffer still be written to?
+ *
+ * @sk socket of connection to user space
+ * @return true => writable, false => not writable
+ */
+static inline bool af_alg_readable(struct sock *sk)
+{
+ return PAGE_SIZE <= af_alg_rcvbuf(sk);
+}
+
+int af_alg_alloc_tsgl(struct sock *sk);
+unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset);
+void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
+ size_t dst_offset);
+void af_alg_free_areq_sgls(struct af_alg_async_req *areq);
+int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags);
+void af_alg_wmem_wakeup(struct sock *sk);
+int af_alg_wait_for_data(struct sock *sk, unsigned flags);
+void af_alg_data_wakeup(struct sock *sk);
+int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ unsigned int ivsize);
+ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
+ int offset, size_t size, int flags);
+void af_alg_async_cb(struct crypto_async_request *_req, int err);
+unsigned int af_alg_poll(struct file *file, struct socket *sock,
+ poll_table *wait);
+struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
+ unsigned int areqlen);
+int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
+ struct af_alg_async_req *areq, size_t maxsize,
+ size_t *outlen);
+
#endif /* _CRYPTO_IF_ALG_H */
diff --git a/include/crypto/internal/akcipher.h b/include/crypto/internal/akcipher.h
index 479a0078f0f7..805686ba2be4 100644
--- a/include/crypto/internal/akcipher.h
+++ b/include/crypto/internal/akcipher.h
@@ -38,6 +38,12 @@ static inline void *akcipher_request_ctx(struct akcipher_request *req)
return req->__ctx;
}
+static inline void akcipher_set_reqsize(struct crypto_akcipher *akcipher,
+ unsigned int reqsize)
+{
+ crypto_akcipher_alg(akcipher)->reqsize = reqsize;
+}
+
static inline void *akcipher_tfm_ctx(struct crypto_akcipher *tfm)
{
return tfm->base.__crt_ctx;
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index f6d9af3efa45..f0b44c16e88f 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -76,6 +76,8 @@ static inline int crypto_ahash_walk_last(struct crypto_hash_walk *walk)
int crypto_register_ahash(struct ahash_alg *alg);
int crypto_unregister_ahash(struct ahash_alg *alg);
+int crypto_register_ahashes(struct ahash_alg *algs, int count);
+void crypto_unregister_ahashes(struct ahash_alg *algs, int count);
int ahash_register_instance(struct crypto_template *tmpl,
struct ahash_instance *inst);
void ahash_free_instance(struct crypto_instance *inst);
diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h
index 2133d17b7156..1bde0a6514fa 100644
--- a/include/crypto/kpp.h
+++ b/include/crypto/kpp.h
@@ -145,6 +145,16 @@ static inline struct crypto_kpp *crypto_kpp_reqtfm(struct kpp_request *req)
return __crypto_kpp_tfm(req->base.tfm);
}
+static inline u32 crypto_kpp_get_flags(struct crypto_kpp *tfm)
+{
+ return crypto_tfm_get_flags(crypto_kpp_tfm(tfm));
+}
+
+static inline void crypto_kpp_set_flags(struct crypto_kpp *tfm, u32 flags)
+{
+ crypto_tfm_set_flags(crypto_kpp_tfm(tfm), flags);
+}
+
/**
* crypto_free_kpp() - free KPP tfm handle
*
diff --git a/include/drm/bridge/dw_mipi_dsi.h b/include/drm/bridge/dw_mipi_dsi.h
new file mode 100644
index 000000000000..9b30fec302c8
--- /dev/null
+++ b/include/drm/bridge/dw_mipi_dsi.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2017
+ *
+ * Authors: Philippe Cornu <philippe.cornu@st.com>
+ * Yannick Fertre <yannick.fertre@st.com>
+ *
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef __DW_MIPI_DSI__
+#define __DW_MIPI_DSI__
+
+struct dw_mipi_dsi_phy_ops {
+ int (*init)(void *priv_data);
+ int (*get_lane_mbps)(void *priv_data, struct drm_display_mode *mode,
+ unsigned long mode_flags, u32 lanes, u32 format,
+ unsigned int *lane_mbps);
+};
+
+struct dw_mipi_dsi_plat_data {
+ void __iomem *base;
+ unsigned int max_data_lanes;
+
+ enum drm_mode_status (*mode_valid)(void *priv_data,
+ const struct drm_display_mode *mode);
+
+ const struct dw_mipi_dsi_phy_ops *phy_ops;
+
+ void *priv_data;
+};
+
+int dw_mipi_dsi_probe(struct platform_device *pdev,
+ const struct dw_mipi_dsi_plat_data *plat_data);
+void dw_mipi_dsi_remove(struct platform_device *pdev);
+int dw_mipi_dsi_bind(struct platform_device *pdev, struct drm_encoder *encoder,
+ const struct dw_mipi_dsi_plat_data *plat_data);
+void dw_mipi_dsi_unbind(struct device *dev);
+
+#endif /* __DW_MIPI_DSI__ */
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 39df16af7a4a..7277783a4ff0 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -82,19 +82,10 @@
#include <drm/drm_sysfs.h>
#include <drm/drm_vblank.h>
#include <drm/drm_irq.h>
-
+#include <drm/drm_device.h>
struct module;
-struct drm_device;
-struct drm_agp_head;
-struct drm_local_map;
-struct drm_device_dma;
-struct drm_gem_object;
-struct drm_master;
-struct drm_vblank_crtc;
-struct drm_vma_offset_manager;
-
struct device_node;
struct videomode;
struct reservation_object;
@@ -306,143 +297,6 @@ struct pci_controller;
/**
- * DRM device structure. This structure represent a complete card that
- * may contain multiple heads.
- */
-struct drm_device {
- struct list_head legacy_dev_list;/**< list of devices per driver for stealth attach cleanup */
- int if_version; /**< Highest interface version set */
-
- /** \name Lifetime Management */
- /*@{ */
- struct kref ref; /**< Object ref-count */
- struct device *dev; /**< Device structure of bus-device */
- struct drm_driver *driver; /**< DRM driver managing the device */
- void *dev_private; /**< DRM driver private data */
- struct drm_minor *control; /**< Control node */
- struct drm_minor *primary; /**< Primary node */
- struct drm_minor *render; /**< Render node */
- bool registered;
-
- /* currently active master for this device. Protected by master_mutex */
- struct drm_master *master;
-
- atomic_t unplugged; /**< Flag whether dev is dead */
- struct inode *anon_inode; /**< inode for private address-space */
- char *unique; /**< unique name of the device */
- /*@} */
-
- /** \name Locks */
- /*@{ */
- struct mutex struct_mutex; /**< For others */
- struct mutex master_mutex; /**< For drm_minor::master and drm_file::is_master */
- /*@} */
-
- /** \name Usage Counters */
- /*@{ */
- int open_count; /**< Outstanding files open, protected by drm_global_mutex. */
- spinlock_t buf_lock; /**< For drm_device::buf_use and a few other things. */
- int buf_use; /**< Buffers in use -- cannot alloc */
- atomic_t buf_alloc; /**< Buffer allocation in progress */
- /*@} */
-
- struct mutex filelist_mutex;
- struct list_head filelist;
-
- /** \name Memory management */
- /*@{ */
- struct list_head maplist; /**< Linked list of regions */
- struct drm_open_hash map_hash; /**< User token hash table for maps */
-
- /** \name Context handle management */
- /*@{ */
- struct list_head ctxlist; /**< Linked list of context handles */
- struct mutex ctxlist_mutex; /**< For ctxlist */
-
- struct idr ctx_idr;
-
- struct list_head vmalist; /**< List of vmas (for debugging) */
-
- /*@} */
-
- /** \name DMA support */
- /*@{ */
- struct drm_device_dma *dma; /**< Optional pointer for DMA support */
- /*@} */
-
- /** \name Context support */
- /*@{ */
-
- __volatile__ long context_flag; /**< Context swapping flag */
- int last_context; /**< Last current context */
- /*@} */
-
- /**
- * @irq_enabled:
- *
- * Indicates that interrupt handling is enabled, specifically vblank
- * handling. Drivers which don't use drm_irq_install() need to set this
- * to true manually.
- */
- bool irq_enabled;
- int irq;
-
- /*
- * If true, vblank interrupt will be disabled immediately when the
- * refcount drops to zero, as opposed to via the vblank disable
- * timer.
- * This can be set to true it the hardware has a working vblank
- * counter and the driver uses drm_vblank_on() and drm_vblank_off()
- * appropriately.
- */
- bool vblank_disable_immediate;
-
- /* array of size num_crtcs */
- struct drm_vblank_crtc *vblank;
-
- spinlock_t vblank_time_lock; /**< Protects vblank count and time updates during vblank enable/disable */
- spinlock_t vbl_lock;
-
- u32 max_vblank_count; /**< size of vblank counter register */
-
- /**
- * List of events
- */
- struct list_head vblank_event_list;
- spinlock_t event_lock;
-
- /*@} */
-
- struct drm_agp_head *agp; /**< AGP data */
-
- struct pci_dev *pdev; /**< PCI device structure */
-#ifdef __alpha__
- struct pci_controller *hose;
-#endif
-
- struct drm_sg_mem *sg; /**< Scatter gather memory */
- unsigned int num_crtcs; /**< Number of CRTCs on this device */
-
- struct {
- int context;
- struct drm_hw_lock *lock;
- } sigdata;
-
- struct drm_local_map *agp_buffer_map;
- unsigned int agp_buffer_token;
-
- struct drm_mode_config mode_config; /**< Current mode config */
-
- /** \name GEM information */
- /*@{ */
- struct mutex object_name_lock;
- struct idr object_name_idr;
- struct drm_vma_offset_manager *vma_offset_manager;
- /*@} */
- int switch_power_state;
-};
-
-/**
* drm_drv_uses_atomic_modeset - check if the driver implements
* atomic_commit()
* @dev: DRM device
@@ -466,19 +320,6 @@ static __inline__ int drm_core_check_feature(struct drm_device *dev,
return ((dev->driver->driver_features & feature) ? 1 : 0);
}
-static inline void drm_device_set_unplugged(struct drm_device *dev)
-{
- smp_wmb();
- atomic_set(&dev->unplugged, 1);
-}
-
-static inline int drm_device_is_unplugged(struct drm_device *dev)
-{
- int ret = atomic_read(&dev->unplugged);
- smp_rmb();
- return ret;
-}
-
/******************************************************************/
/** \name Internal function definitions */
/*@{*/
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 0196f264a418..8a5808eb5628 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -154,6 +154,9 @@ struct __drm_connnectors_state {
struct drm_connector_state *state, *old_state, *new_state;
};
+struct drm_private_obj;
+struct drm_private_state;
+
/**
* struct drm_private_state_funcs - atomic state functions for private objects
*
@@ -166,7 +169,7 @@ struct __drm_connnectors_state {
*/
struct drm_private_state_funcs {
/**
- * @duplicate_state:
+ * @atomic_duplicate_state:
*
* Duplicate the current state of the private object and return it. It
* is an error to call this before obj->state has been initialized.
@@ -176,29 +179,30 @@ struct drm_private_state_funcs {
* Duplicated atomic state or NULL when obj->state is not
* initialized or allocation failed.
*/
- void *(*duplicate_state)(struct drm_atomic_state *state, void *obj);
+ struct drm_private_state *(*atomic_duplicate_state)(struct drm_private_obj *obj);
/**
- * @swap_state:
+ * @atomic_destroy_state:
*
- * This function swaps the existing state of a private object @obj with
- * it's newly created state, the pointer to which is passed as
- * @obj_state_ptr.
+ * Frees the private object state created with @atomic_duplicate_state.
*/
- void (*swap_state)(void *obj, void **obj_state_ptr);
+ void (*atomic_destroy_state)(struct drm_private_obj *obj,
+ struct drm_private_state *state);
+};
- /**
- * @destroy_state:
- *
- * Frees the private object state created with @duplicate_state.
- */
- void (*destroy_state)(void *obj_state);
+struct drm_private_obj {
+ struct drm_private_state *state;
+
+ const struct drm_private_state_funcs *funcs;
+};
+
+struct drm_private_state {
+ struct drm_atomic_state *state;
};
struct __drm_private_objs_state {
- void *obj;
- void *obj_state;
- const struct drm_private_state_funcs *funcs;
+ struct drm_private_obj *ptr;
+ struct drm_private_state *state, *old_state, *new_state;
};
/**
@@ -207,6 +211,7 @@ struct __drm_private_objs_state {
* @dev: parent DRM device
* @allow_modeset: allow full modeset
* @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics
+ * @async_update: hint for asynchronous plane update
* @planes: pointer to array of structures with per-plane data
* @crtcs: pointer to array of CRTC pointers
* @num_connector: size of the @connectors and @connector_states arrays
@@ -221,6 +226,7 @@ struct drm_atomic_state {
struct drm_device *dev;
bool allow_modeset : 1;
bool legacy_cursor_update : 1;
+ bool async_update : 1;
struct __drm_planes_state *planes;
struct __drm_crtcs_state *crtcs;
int num_connector;
@@ -309,20 +315,18 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
struct drm_plane_state * __must_check
drm_atomic_get_plane_state(struct drm_atomic_state *state,
struct drm_plane *plane);
-int drm_atomic_plane_set_property(struct drm_plane *plane,
- struct drm_plane_state *state, struct drm_property *property,
- uint64_t val);
struct drm_connector_state * __must_check
drm_atomic_get_connector_state(struct drm_atomic_state *state,
struct drm_connector *connector);
-int drm_atomic_connector_set_property(struct drm_connector *connector,
- struct drm_connector_state *state, struct drm_property *property,
- uint64_t val);
-void * __must_check
+void drm_atomic_private_obj_init(struct drm_private_obj *obj,
+ struct drm_private_state *state,
+ const struct drm_private_state_funcs *funcs);
+void drm_atomic_private_obj_fini(struct drm_private_obj *obj);
+
+struct drm_private_state * __must_check
drm_atomic_get_private_obj_state(struct drm_atomic_state *state,
- void *obj,
- const struct drm_private_state_funcs *funcs);
+ struct drm_private_obj *obj);
/**
* drm_atomic_get_existing_crtc_state - get crtc state, if it exists
@@ -541,8 +545,6 @@ int __must_check
drm_atomic_add_affected_planes(struct drm_atomic_state *state,
struct drm_crtc *crtc);
-void drm_atomic_legacy_backoff(struct drm_atomic_state *state);
-
void
drm_atomic_clean_old_fb(struct drm_device *dev, unsigned plane_mask, int ret);
@@ -809,43 +811,63 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
for_each_if (plane)
/**
- * __for_each_private_obj - iterate over all private objects
+ * for_each_oldnew_private_obj_in_state - iterate over all private objects in an atomic update
* @__state: &struct drm_atomic_state pointer
- * @obj: private object iteration cursor
- * @obj_state: private object state iteration cursor
+ * @obj: &struct drm_private_obj iteration cursor
+ * @old_obj_state: &struct drm_private_state iteration cursor for the old state
+ * @new_obj_state: &struct drm_private_state iteration cursor for the new state
* @__i: int iteration cursor, for macro-internal use
- * @__funcs: &struct drm_private_state_funcs iteration cursor
*
- * This macro iterates over the array containing private object data in atomic
- * state
+ * This iterates over all private objects in an atomic update, tracking both
+ * old and new state. This is useful in places where the state delta needs
+ * to be considered, for example in atomic check functions.
*/
-#define __for_each_private_obj(__state, obj, obj_state, __i, __funcs) \
- for ((__i) = 0; \
- (__i) < (__state)->num_private_objs && \
- ((obj) = (__state)->private_objs[__i].obj, \
- (__funcs) = (__state)->private_objs[__i].funcs, \
- (obj_state) = (__state)->private_objs[__i].obj_state, \
- 1); \
- (__i)++) \
+#define for_each_oldnew_private_obj_in_state(__state, obj, old_obj_state, new_obj_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->num_private_objs && \
+ ((obj) = (__state)->private_objs[__i].ptr, \
+ (old_obj_state) = (__state)->private_objs[__i].old_state, \
+ (new_obj_state) = (__state)->private_objs[__i].new_state, 1); \
+ (__i)++) \
+ for_each_if (obj)
/**
- * for_each_private_obj - iterate over a specify type of private object
+ * for_each_old_private_obj_in_state - iterate over all private objects in an atomic update
* @__state: &struct drm_atomic_state pointer
- * @obj_funcs: &struct drm_private_state_funcs function table to filter
- * private objects
- * @obj: private object iteration cursor
- * @obj_state: private object state iteration cursor
+ * @obj: &struct drm_private_obj iteration cursor
+ * @old_obj_state: &struct drm_private_state iteration cursor for the old state
* @__i: int iteration cursor, for macro-internal use
- * @__funcs: &struct drm_private_state_funcs iteration cursor
*
- * This macro iterates over the private objects state array while filtering the
- * objects based on the vfunc table that is passed as @obj_funcs. New macros
- * can be created by passing in the vfunc table associated with a specific
- * private object.
+ * This iterates over all private objects in an atomic update, tracking only
+ * the old state. This is useful in disable functions, where we need the old
+ * state the hardware is still in.
+ */
+#define for_each_old_private_obj_in_state(__state, obj, old_obj_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->num_private_objs && \
+ ((obj) = (__state)->private_objs[__i].ptr, \
+ (old_obj_state) = (__state)->private_objs[__i].old_state, 1); \
+ (__i)++) \
+ for_each_if (obj)
+
+/**
+ * for_each_new_private_obj_in_state - iterate over all private objects in an atomic update
+ * @__state: &struct drm_atomic_state pointer
+ * @obj: &struct drm_private_obj iteration cursor
+ * @new_obj_state: &struct drm_private_state iteration cursor for the new state
+ * @__i: int iteration cursor, for macro-internal use
+ *
+ * This iterates over all private objects in an atomic update, tracking only
+ * the new state. This is useful in enable functions, where we need the new state the
+ * hardware should be in when the atomic commit operation has completed.
*/
-#define for_each_private_obj(__state, obj_funcs, obj, obj_state, __i, __funcs) \
- __for_each_private_obj(__state, obj, obj_state, __i, __funcs) \
- for_each_if (__funcs == obj_funcs)
+#define for_each_new_private_obj_in_state(__state, obj, new_obj_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->num_private_objs && \
+ ((obj) = (__state)->private_objs[__i].ptr, \
+ (new_obj_state) = (__state)->private_objs[__i].new_state, 1); \
+ (__i)++) \
+ for_each_if (obj)
/**
* drm_atomic_crtc_needs_modeset - compute combined modeset need
@@ -853,7 +875,7 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
*
* To give drivers flexibility &struct drm_crtc_state has 3 booleans to track
* whether the state CRTC changed enough to need a full modeset cycle:
- * planes_changed, mode_changed and active_changed. This helper simply
+ * mode_changed, active_changed and connectors_changed. This helper simply
* combines these three to compute the overall need for a modeset for @state.
*
* The atomic helper code sets these booleans, but drivers can and should
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index f0a8678ae98e..d2b56cc657e9 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -33,6 +33,8 @@
#include <drm/drm_modeset_helper.h>
struct drm_atomic_state;
+struct drm_private_obj;
+struct drm_private_state;
int drm_atomic_helper_check_modeset(struct drm_device *dev,
struct drm_atomic_state *state);
@@ -41,9 +43,14 @@ int drm_atomic_helper_check_planes(struct drm_device *dev,
int drm_atomic_helper_check(struct drm_device *dev,
struct drm_atomic_state *state);
void drm_atomic_helper_commit_tail(struct drm_atomic_state *state);
+void drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state *state);
int drm_atomic_helper_commit(struct drm_device *dev,
struct drm_atomic_state *state,
bool nonblock);
+int drm_atomic_helper_async_check(struct drm_device *dev,
+ struct drm_atomic_state *state);
+void drm_atomic_helper_async_commit(struct drm_device *dev,
+ struct drm_atomic_state *state);
int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
struct drm_atomic_state *state,
@@ -52,6 +59,9 @@ int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
void drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
struct drm_atomic_state *old_state);
+void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev,
+ struct drm_atomic_state *old_state);
+
void
drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
struct drm_atomic_state *old_state);
@@ -77,8 +87,8 @@ void
drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state *old_crtc_state,
bool atomic);
-void drm_atomic_helper_swap_state(struct drm_atomic_state *state,
- bool stall);
+int __must_check drm_atomic_helper_swap_state(struct drm_atomic_state *state,
+ bool stall);
/* nonblocking commit helpers */
int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
@@ -114,15 +124,6 @@ int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
int drm_atomic_helper_resume(struct drm_device *dev,
struct drm_atomic_state *state);
-int drm_atomic_helper_crtc_set_property(struct drm_crtc *crtc,
- struct drm_property *property,
- uint64_t val);
-int drm_atomic_helper_plane_set_property(struct drm_plane *plane,
- struct drm_property *property,
- uint64_t val);
-int drm_atomic_helper_connector_set_property(struct drm_connector *connector,
- struct drm_property *property,
- uint64_t val);
int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
@@ -135,8 +136,6 @@ int drm_atomic_helper_page_flip_target(
uint32_t flags,
uint32_t target,
struct drm_modeset_acquire_ctx *ctx);
-int drm_atomic_helper_connector_dpms(struct drm_connector *connector,
- int mode);
struct drm_encoder *
drm_atomic_helper_best_encoder(struct drm_connector *connector);
@@ -178,6 +177,8 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
u16 *red, u16 *green, u16 *blue,
uint32_t size,
struct drm_modeset_acquire_ctx *ctx);
+void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj,
+ struct drm_private_state *state);
/**
* drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index 1dc94d5392e2..6522d4cbc9d9 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -268,6 +268,9 @@ void drm_bridge_enable(struct drm_bridge *bridge);
struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel,
u32 connector_type);
void drm_panel_bridge_remove(struct drm_bridge *bridge);
+struct drm_bridge *devm_drm_panel_bridge_add(struct device *dev,
+ struct drm_panel *panel,
+ u32 connector_type);
#endif
#endif
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index ae5b7dc316c8..ea8da401c93c 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -135,6 +135,28 @@ struct drm_scdc {
struct drm_hdmi_info {
/** @scdc: sink's scdc support and capabilities */
struct drm_scdc scdc;
+
+ /**
+ * @y420_vdb_modes: bitmap of modes which can support ycbcr420
+ * output only (not normal RGB/YCBCR444/422 outputs). There are total
+ * 107 VICs defined by CEA-861-F spec, so the size is 128 bits to map
+ * upto 128 VICs;
+ */
+ unsigned long y420_vdb_modes[BITS_TO_LONGS(128)];
+
+ /**
+ * @y420_cmdb_modes: bitmap of modes which can support ycbcr420
+ * output also, along with normal HDMI outputs. There are total 107
+ * VICs defined by CEA-861-F spec, so the size is 128 bits to map upto
+ * 128 VICs;
+ */
+ unsigned long y420_cmdb_modes[BITS_TO_LONGS(128)];
+
+ /** @y420_cmdb_map: bitmap of SVD index, to extraxt vcb modes */
+ u64 y420_cmdb_map;
+
+ /** @y420_dc_modes: bitmap of deep color support index */
+ u8 y420_dc_modes;
};
/**
@@ -198,6 +220,7 @@ struct drm_display_info {
#define DRM_COLOR_FORMAT_RGB444 (1<<0)
#define DRM_COLOR_FORMAT_YCRCB444 (1<<1)
#define DRM_COLOR_FORMAT_YCRCB422 (1<<2)
+#define DRM_COLOR_FORMAT_YCRCB420 (1<<3)
/**
* @color_formats: HDMI Color formats, selects between RGB and YCrCb
@@ -359,8 +382,8 @@ struct drm_connector_funcs {
* implement the 4 level DPMS support on the connector any more, but
* instead only have an on/off "ACTIVE" property on the CRTC object.
*
- * Drivers implementing atomic modeset should use
- * drm_atomic_helper_connector_dpms() to implement this hook.
+ * This hook is not used by atomic drivers, remapping of the legacy DPMS
+ * property is entirely handled in the DRM core.
*
* RETURNS:
*
@@ -457,11 +480,9 @@ struct drm_connector_funcs {
* This is the legacy entry point to update a property attached to the
* connector.
*
- * Drivers implementing atomic modeset should use
- * drm_atomic_helper_connector_set_property() to implement this hook.
- *
* This callback is optional if the driver does not support any legacy
- * driver-private properties.
+ * driver-private properties. For atomic drivers it is not used because
+ * property handling is done entirely in the DRM core.
*
* RETURNS:
*
@@ -726,6 +747,15 @@ struct drm_connector {
bool interlace_allowed;
bool doublescan_allowed;
bool stereo_allowed;
+
+ /**
+ * @ycbcr_420_allowed : This bool indicates if this connector is
+ * capable of handling YCBCR 420 output. While parsing the EDID
+ * blocks, its very helpful to know, if the source is capable of
+ * handling YCBCR 420 outputs.
+ */
+ bool ycbcr_420_allowed;
+
/**
* @registered: Is this connector exposed (registered) with userspace?
* Protected by @mutex.
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 629a5fe075b3..1a642020e306 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -358,14 +358,6 @@ struct drm_crtc_funcs {
* drm_crtc_enable_color_mgmt(), which then supports the legacy gamma
* interface through the drm_atomic_helper_legacy_gamma_set()
* compatibility implementation.
- *
- * NOTE:
- *
- * Drivers that support gamma tables and also fbdev emulation through
- * the provided helper library need to take care to fill out the gamma
- * hooks for both. Currently there's a bit an unfortunate duplication
- * going on, which should eventually be unified to just one set of
- * hooks.
*/
int (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
uint32_t size,
@@ -481,11 +473,9 @@ struct drm_crtc_funcs {
* This is the legacy entry point to update a property attached to the
* CRTC.
*
- * Drivers implementing atomic modeset should use
- * drm_atomic_helper_crtc_set_property() to implement this hook.
- *
* This callback is optional if the driver does not support any legacy
- * driver-private properties.
+ * driver-private properties. For atomic drivers it is not used because
+ * property handling is done entirely in the DRM core.
*
* RETURNS:
*
@@ -685,6 +675,9 @@ struct drm_crtc_funcs {
* drm_crtc_vblank_off() and drm_crtc_vblank_on() when disabling or
* enabling a CRTC.
*
+ * See also &drm_device.vblank_disable_immediate and
+ * &drm_device.max_vblank_count.
+ *
* Returns:
*
* Raw vblank counter value.
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
new file mode 100644
index 000000000000..e21af87a2f3c
--- /dev/null
+++ b/include/drm/drm_device.h
@@ -0,0 +1,190 @@
+#ifndef _DRM_DEVICE_H_
+#define _DRM_DEVICE_H_
+
+#include <linux/list.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
+#include <linux/idr.h>
+
+#include <drm/drm_hashtab.h>
+#include <drm/drm_mode_config.h>
+
+struct drm_driver;
+struct drm_minor;
+struct drm_master;
+struct drm_device_dma;
+struct drm_vblank_crtc;
+struct drm_sg_mem;
+struct drm_local_map;
+struct drm_vma_offset_manager;
+
+struct inode;
+
+struct pci_dev;
+struct pci_controller;
+
+/**
+ * DRM device structure. This structure represent a complete card that
+ * may contain multiple heads.
+ */
+struct drm_device {
+ struct list_head legacy_dev_list;/**< list of devices per driver for stealth attach cleanup */
+ int if_version; /**< Highest interface version set */
+
+ /** \name Lifetime Management */
+ /*@{ */
+ struct kref ref; /**< Object ref-count */
+ struct device *dev; /**< Device structure of bus-device */
+ struct drm_driver *driver; /**< DRM driver managing the device */
+ void *dev_private; /**< DRM driver private data */
+ struct drm_minor *control; /**< Control node */
+ struct drm_minor *primary; /**< Primary node */
+ struct drm_minor *render; /**< Render node */
+ bool registered;
+
+ /* currently active master for this device. Protected by master_mutex */
+ struct drm_master *master;
+
+ atomic_t unplugged; /**< Flag whether dev is dead */
+ struct inode *anon_inode; /**< inode for private address-space */
+ char *unique; /**< unique name of the device */
+ /*@} */
+
+ /** \name Locks */
+ /*@{ */
+ struct mutex struct_mutex; /**< For others */
+ struct mutex master_mutex; /**< For drm_minor::master and drm_file::is_master */
+ /*@} */
+
+ /** \name Usage Counters */
+ /*@{ */
+ int open_count; /**< Outstanding files open, protected by drm_global_mutex. */
+ spinlock_t buf_lock; /**< For drm_device::buf_use and a few other things. */
+ int buf_use; /**< Buffers in use -- cannot alloc */
+ atomic_t buf_alloc; /**< Buffer allocation in progress */
+ /*@} */
+
+ struct mutex filelist_mutex;
+ struct list_head filelist;
+
+ /** \name Memory management */
+ /*@{ */
+ struct list_head maplist; /**< Linked list of regions */
+ struct drm_open_hash map_hash; /**< User token hash table for maps */
+
+ /** \name Context handle management */
+ /*@{ */
+ struct list_head ctxlist; /**< Linked list of context handles */
+ struct mutex ctxlist_mutex; /**< For ctxlist */
+
+ struct idr ctx_idr;
+
+ struct list_head vmalist; /**< List of vmas (for debugging) */
+
+ /*@} */
+
+ /** \name DMA support */
+ /*@{ */
+ struct drm_device_dma *dma; /**< Optional pointer for DMA support */
+ /*@} */
+
+ /** \name Context support */
+ /*@{ */
+
+ __volatile__ long context_flag; /**< Context swapping flag */
+ int last_context; /**< Last current context */
+ /*@} */
+
+ /**
+ * @irq_enabled:
+ *
+ * Indicates that interrupt handling is enabled, specifically vblank
+ * handling. Drivers which don't use drm_irq_install() need to set this
+ * to true manually.
+ */
+ bool irq_enabled;
+ int irq;
+
+ /**
+ * @vblank_disable_immediate:
+ *
+ * If true, vblank interrupt will be disabled immediately when the
+ * refcount drops to zero, as opposed to via the vblank disable
+ * timer.
+ *
+ * This can be set to true it the hardware has a working vblank counter
+ * with high-precision timestamping (otherwise there are races) and the
+ * driver uses drm_crtc_vblank_on() and drm_crtc_vblank_off()
+ * appropriately. See also @max_vblank_count and
+ * &drm_crtc_funcs.get_vblank_counter.
+ */
+ bool vblank_disable_immediate;
+
+ /**
+ * @vblank:
+ *
+ * Array of vblank tracking structures, one per &struct drm_crtc. For
+ * historical reasons (vblank support predates kernel modesetting) this
+ * is free-standing and not part of &struct drm_crtc itself. It must be
+ * initialized explicitly by calling drm_vblank_init().
+ */
+ struct drm_vblank_crtc *vblank;
+
+ spinlock_t vblank_time_lock; /**< Protects vblank count and time updates during vblank enable/disable */
+ spinlock_t vbl_lock;
+
+ /**
+ * @max_vblank_count:
+ *
+ * Maximum value of the vblank registers. This value +1 will result in a
+ * wrap-around of the vblank register. It is used by the vblank core to
+ * handle wrap-arounds.
+ *
+ * If set to zero the vblank core will try to guess the elapsed vblanks
+ * between times when the vblank interrupt is disabled through
+ * high-precision timestamps. That approach is suffering from small
+ * races and imprecision over longer time periods, hence exposing a
+ * hardware vblank counter is always recommended.
+ *
+ * If non-zeor, &drm_crtc_funcs.get_vblank_counter must be set.
+ */
+ u32 max_vblank_count; /**< size of vblank counter register */
+
+ /**
+ * List of events
+ */
+ struct list_head vblank_event_list;
+ spinlock_t event_lock;
+
+ /*@} */
+
+ struct drm_agp_head *agp; /**< AGP data */
+
+ struct pci_dev *pdev; /**< PCI device structure */
+#ifdef __alpha__
+ struct pci_controller *hose;
+#endif
+
+ struct drm_sg_mem *sg; /**< Scatter gather memory */
+ unsigned int num_crtcs; /**< Number of CRTCs on this device */
+
+ struct {
+ int context;
+ struct drm_hw_lock *lock;
+ } sigdata;
+
+ struct drm_local_map *agp_buffer_map;
+ unsigned int agp_buffer_token;
+
+ struct drm_mode_config mode_config; /**< Current mode config */
+
+ /** \name GEM information */
+ /*@{ */
+ struct mutex object_name_lock;
+ struct idr object_name_idr;
+ struct drm_vma_offset_manager *vma_offset_manager;
+ /*@} */
+ int switch_power_state;
+};
+
+#endif
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 177ab6f86855..d55abb75f29a 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -404,12 +404,17 @@ struct drm_dp_payload {
int vcpi;
};
+#define to_dp_mst_topology_state(x) container_of(x, struct drm_dp_mst_topology_state, base)
+
struct drm_dp_mst_topology_state {
+ struct drm_private_state base;
int avail_slots;
struct drm_atomic_state *state;
struct drm_dp_mst_topology_mgr *mgr;
};
+#define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base)
+
/**
* struct drm_dp_mst_topology_mgr - DisplayPort MST manager
*
@@ -419,6 +424,11 @@ struct drm_dp_mst_topology_state {
*/
struct drm_dp_mst_topology_mgr {
/**
+ * @base: Base private object for atomic
+ */
+ struct drm_private_obj base;
+
+ /**
* @dev: device pointer for adding i2c devices etc.
*/
struct drm_device *dev;
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index d855f9ae41a8..71bbaaec836d 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -30,7 +30,8 @@
#include <linux/list.h>
#include <linux/irqreturn.h>
-struct drm_device;
+#include <drm/drm_device.h>
+
struct drm_file;
struct drm_gem_object;
struct drm_master;
@@ -173,8 +174,6 @@ struct drm_driver {
*/
void (*release) (struct drm_device *);
- int (*set_busid)(struct drm_device *dev, struct drm_master *master);
-
/**
* @get_vblank_counter:
*
@@ -392,6 +391,11 @@ struct drm_driver {
*/
void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv);
+ /**
+ * @debugfs_init:
+ *
+ * Allows drivers to create driver-specific debugfs files.
+ */
int (*debugfs_init)(struct drm_minor *minor);
/**
@@ -410,7 +414,18 @@ struct drm_driver {
*/
void (*gem_free_object_unlocked) (struct drm_gem_object *obj);
+ /**
+ * @gem_open_object:
+ *
+ * Driver hook called upon gem handle creation
+ */
int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
+
+ /**
+ * @gem_close_object:
+ *
+ * Driver hook called upon gem handle release
+ */
void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
/**
@@ -423,19 +438,34 @@ struct drm_driver {
size_t size);
/* prime: */
- /* export handle -> fd (see drm_gem_prime_handle_to_fd() helper) */
+ /**
+ * @prime_handle_to_fd:
+ *
+ * export handle -> fd (see drm_gem_prime_handle_to_fd() helper)
+ */
int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv,
uint32_t handle, uint32_t flags, int *prime_fd);
- /* import fd -> handle (see drm_gem_prime_fd_to_handle() helper) */
+ /**
+ * @prime_fd_to_handle:
+ *
+ * import fd -> handle (see drm_gem_prime_fd_to_handle() helper)
+ */
int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv,
int prime_fd, uint32_t *handle);
- /* export GEM -> dmabuf */
+ /**
+ * @gem_prime_export:
+ *
+ * export GEM -> dmabuf
+ */
struct dma_buf * (*gem_prime_export)(struct drm_device *dev,
struct drm_gem_object *obj, int flags);
- /* import dmabuf -> GEM */
+ /**
+ * @gem_prime_import:
+ *
+ * import dmabuf -> GEM
+ */
struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
struct dma_buf *dma_buf);
- /* low-level interface used by drm_gem_prime_{import,export} */
int (*gem_prime_pin)(struct drm_gem_object *obj);
void (*gem_prime_unpin)(struct drm_gem_object *obj);
struct reservation_object * (*gem_prime_res_obj)(
@@ -507,19 +537,46 @@ struct drm_driver {
struct drm_device *dev,
uint32_t handle);
- /* Driver private ops for this object */
+ /**
+ * @gem_vm_ops: Driver private ops for this object
+ */
const struct vm_operations_struct *gem_vm_ops;
+ /** @major: driver major number */
int major;
+ /** @minor: driver minor number */
int minor;
+ /** @patchlevel: driver patch level */
int patchlevel;
+ /** @name: driver name */
char *name;
+ /** @desc: driver description */
char *desc;
+ /** @date: driver date */
char *date;
+ /** @driver_features: driver features */
u32 driver_features;
+
+ /**
+ * @ioctls:
+ *
+ * Array of driver-private IOCTL description entries. See the chapter on
+ * :ref:`IOCTL support in the userland interfaces
+ * chapter<drm_driver_ioctl>` for the full details.
+ */
+
const struct drm_ioctl_desc *ioctls;
+ /** @num_ioctls: Number of entries in @ioctls. */
int num_ioctls;
+
+ /**
+ * @fops:
+ *
+ * File operations for the DRM device node. See the discussion in
+ * :ref:`file operations<drm_driver_fops>` for in-depth coverage and
+ * some examples.
+ */
const struct file_operations *fops;
/* Everything below here is for legacy driver, never use! */
@@ -557,7 +614,24 @@ void drm_dev_unregister(struct drm_device *dev);
void drm_dev_ref(struct drm_device *dev);
void drm_dev_unref(struct drm_device *dev);
void drm_put_dev(struct drm_device *dev);
-void drm_unplug_dev(struct drm_device *dev);
+void drm_dev_unplug(struct drm_device *dev);
+
+/**
+ * drm_dev_is_unplugged - is a DRM device unplugged
+ * @dev: DRM device
+ *
+ * This function can be called to check whether a hotpluggable is unplugged.
+ * Unplugging itself is singalled through drm_dev_unplug(). If a device is
+ * unplugged, these two functions guarantee that any store before calling
+ * drm_dev_unplug() is visible to callers of this function after it completes
+ */
+static inline int drm_dev_is_unplugged(struct drm_device *dev)
+{
+ int ret = atomic_read(&dev->unplugged);
+ smp_rmb();
+ return ret;
+}
+
int drm_dev_set_unique(struct drm_device *dev, const char *name);
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 7b9f48b62e07..1e1908a6b1d6 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -213,6 +213,14 @@ struct detailed_timing {
#define DRM_EDID_HDMI_DC_30 (1 << 4)
#define DRM_EDID_HDMI_DC_Y444 (1 << 3)
+/* YCBCR 420 deep color modes */
+#define DRM_EDID_YCBCR420_DC_48 (1 << 6)
+#define DRM_EDID_YCBCR420_DC_36 (1 << 5)
+#define DRM_EDID_YCBCR420_DC_30 (1 << 4)
+#define DRM_EDID_YCBCR420_DC_MASK (DRM_EDID_YCBCR420_DC_48 | \
+ DRM_EDID_YCBCR420_DC_36 | \
+ DRM_EDID_YCBCR420_DC_30)
+
/* ELD Header Block */
#define DRM_ELD_HEADER_BLOCK_SIZE 4
@@ -343,7 +351,8 @@ drm_load_edid_firmware(struct drm_connector *connector)
int
drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
- const struct drm_display_mode *mode);
+ const struct drm_display_mode *mode,
+ bool is_hdmi2_sink);
int
drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
const struct drm_display_mode *mode);
diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h
index 199a63f48659..a323781afc3f 100644
--- a/include/drm/drm_fb_cma_helper.h
+++ b/include/drm/drm_fb_cma_helper.h
@@ -24,9 +24,9 @@ void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma);
void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma);
void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma);
-void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, int state);
+void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, bool state);
void drm_fbdev_cma_set_suspend_unlocked(struct drm_fbdev_cma *fbdev_cma,
- int state);
+ bool state);
void drm_fb_cma_destroy(struct drm_framebuffer *fb);
int drm_fb_cma_create_handle(struct drm_framebuffer *fb,
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 119e5e4609c7..33fe95927742 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -85,38 +85,6 @@ struct drm_fb_helper_surface_size {
*/
struct drm_fb_helper_funcs {
/**
- * @gamma_set:
- *
- * Set the given gamma LUT register on the given CRTC.
- *
- * This callback is optional.
- *
- * FIXME:
- *
- * This callback is functionally redundant with the core gamma table
- * support and simply exists because the fbdev hasn't yet been
- * refactored to use the core gamma table interfaces.
- */
- void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green,
- u16 blue, int regno);
- /**
- * @gamma_get:
- *
- * Read the given gamma LUT register on the given CRTC, used to save the
- * current LUT when force-restoring the fbdev for e.g. kdbg.
- *
- * This callback is optional.
- *
- * FIXME:
- *
- * This callback is functionally redundant with the core gamma table
- * support and simply exists because the fbdev hasn't yet been
- * refactored to use the core gamma table interfaces.
- */
- void (*gamma_get)(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, int regno);
-
- /**
* @fb_probe:
*
* Driver callback to allocate and initialize the fbdev info structure.
@@ -169,7 +137,6 @@ struct drm_fb_helper_connector {
* @crtc_info: per-CRTC helper state (mode, x/y offset, etc)
* @connector_count: number of connected connectors
* @connector_info_alloc_count: size of connector_info
- * @connector_info: array of per-connector information
* @funcs: driver callbacks for fb helper
* @fbdev: emulated fbdev device info struct
* @pseudo_palette: fake palette of 16 colors
@@ -191,6 +158,12 @@ struct drm_fb_helper {
struct drm_fb_helper_crtc *crtc_info;
int connector_count;
int connector_info_alloc_count;
+ /**
+ * @connector_info:
+ *
+ * Array of per-connector information. Do not iterate directly, but use
+ * drm_fb_helper_for_each_connector.
+ */
struct drm_fb_helper_connector **connector_info;
const struct drm_fb_helper_funcs *funcs;
struct fb_info *fbdev;
@@ -201,6 +174,18 @@ struct drm_fb_helper {
struct work_struct resume_work;
/**
+ * @lock:
+ *
+ * Top-level FBDEV helper lock. This protects all internal data
+ * structures and lists, such as @connector_info and @crtc_info.
+ *
+ * FIXME: fbdev emulation locking is a mess and long term we want to
+ * protect all helper internal state with this lock as well as reduce
+ * core KMS locking as much as possible.
+ */
+ struct mutex lock;
+
+ /**
* @kernel_fb_list:
*
* Entry on the global kernel_fb_helper_list, used for kgdb entry/exit.
@@ -215,6 +200,29 @@ struct drm_fb_helper {
* needs to be reprobe when fbdev is in control again.
*/
bool delayed_hotplug;
+
+ /**
+ * @deferred_setup:
+ *
+ * If no outputs are connected (disconnected or unknown) the FB helper
+ * code will defer setup until at least one of the outputs shows up.
+ * This field keeps track of the status so that setup can be retried
+ * at every hotplug event until it succeeds eventually.
+ *
+ * Protected by @lock.
+ */
+ bool deferred_setup;
+
+ /**
+ * @preferred_bpp:
+ *
+ * Temporary storage for the driver's preferred BPP setting passed to
+ * FB helper initialization. This needs to be tracked so that deferred
+ * FB helper setup can pass this on.
+ *
+ * See also: @deferred_setup
+ */
+ int preferred_bpp;
};
/**
diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h
index 5244f059d23a..b6996ddb19d6 100644
--- a/include/drm/drm_framebuffer.h
+++ b/include/drm/drm_framebuffer.h
@@ -190,6 +190,13 @@ struct drm_framebuffer {
* @filp_head: Placed on &drm_file.fbs, protected by &drm_file.fbs_lock.
*/
struct list_head filp_head;
+ /**
+ * @obj: GEM objects backing the framebuffer, one per plane (optional).
+ *
+ * This is used by the GEM framebuffer helpers, see e.g.
+ * drm_gem_fb_create().
+ */
+ struct drm_gem_object *obj[4];
};
#define obj_to_fb(x) container_of(x, struct drm_framebuffer, base)
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 663d80358057..9c55c2acaa2b 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -131,21 +131,6 @@ struct drm_gem_object {
uint32_t write_domain;
/**
- * @pending_read_domains:
- *
- * While validating an exec operation, the
- * new read/write domain values are computed here.
- * They will be transferred to the above values
- * at the point that any cache flushing occurs
- */
- uint32_t pending_read_domains;
-
- /**
- * @pending_write_domain: Write domain similar to @pending_read_domains.
- */
- uint32_t pending_write_domain;
-
- /**
* @dma_buf:
*
* dma-buf associated with this GEM object.
@@ -317,6 +302,8 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
bool dirty, bool accessed);
struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp, u32 handle);
+int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+ u32 handle, u64 *offset);
int drm_gem_dumb_destroy(struct drm_file *file,
struct drm_device *dev,
uint32_t handle);
diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h
index b42529e0fae0..58a739bf15f1 100644
--- a/include/drm/drm_gem_cma_helper.h
+++ b/include/drm/drm_gem_cma_helper.h
@@ -73,11 +73,6 @@ int drm_gem_cma_dumb_create(struct drm_file *file_priv,
struct drm_device *drm,
struct drm_mode_create_dumb *args);
-/* map memory region for DRM framebuffer to user space */
-int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
- struct drm_device *drm, u32 handle,
- u64 *offset);
-
/* set vm_flags and we can change the VM attribute to other one at here */
int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma);
diff --git a/include/drm/drm_gem_framebuffer_helper.h b/include/drm/drm_gem_framebuffer_helper.h
new file mode 100644
index 000000000000..db9cfa07235e
--- /dev/null
+++ b/include/drm/drm_gem_framebuffer_helper.h
@@ -0,0 +1,37 @@
+#ifndef __DRM_GEM_FB_HELPER_H__
+#define __DRM_GEM_FB_HELPER_H__
+
+struct drm_device;
+struct drm_file;
+struct drm_fb_helper_surface_size;
+struct drm_framebuffer;
+struct drm_framebuffer_funcs;
+struct drm_gem_object;
+struct drm_mode_fb_cmd2;
+struct drm_plane;
+struct drm_plane_state;
+
+struct drm_gem_object *drm_gem_fb_get_obj(struct drm_framebuffer *fb,
+ unsigned int plane);
+void drm_gem_fb_destroy(struct drm_framebuffer *fb);
+int drm_gem_fb_create_handle(struct drm_framebuffer *fb, struct drm_file *file,
+ unsigned int *handle);
+
+struct drm_framebuffer *
+drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ const struct drm_framebuffer_funcs *funcs);
+struct drm_framebuffer *
+drm_gem_fb_create(struct drm_device *dev, struct drm_file *file,
+ const struct drm_mode_fb_cmd2 *mode_cmd);
+
+int drm_gem_fb_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *state);
+
+struct drm_framebuffer *
+drm_gem_fbdev_fb_create(struct drm_device *dev,
+ struct drm_fb_helper_surface_size *sizes,
+ unsigned int pitch_align, struct drm_gem_object *obj,
+ const struct drm_framebuffer_funcs *funcs);
+
+#endif
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index 42981711189b..1b37368416c8 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -757,6 +757,12 @@ struct drm_mode_config {
*/
bool allow_fb_modifiers;
+ /**
+ * @modifiers: Plane property to list support modifier/format
+ * combination.
+ */
+ struct drm_property *modifiers_property;
+
/* cursor size */
uint32_t cursor_width, cursor_height;
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h
index 94ac771fe460..9f3421c8efcd 100644
--- a/include/drm/drm_modes.h
+++ b/include/drm/drm_modes.h
@@ -80,6 +80,7 @@ struct videomode;
* @MODE_ONE_SIZE: only one resolution is supported
* @MODE_NO_REDUCED: monitor doesn't accept reduced blanking
* @MODE_NO_STEREO: stereo modes not supported
+ * @MODE_NO_420: ycbcr 420 modes not supported
* @MODE_STALE: mode has become stale
* @MODE_BAD: unspecified reason
* @MODE_ERROR: error condition
@@ -124,6 +125,7 @@ enum drm_mode_status {
MODE_ONE_SIZE,
MODE_NO_REDUCED,
MODE_NO_STEREO,
+ MODE_NO_420,
MODE_STALE = -3,
MODE_BAD = -2,
MODE_ERROR = -1
@@ -450,6 +452,12 @@ int drm_mode_convert_umode(struct drm_display_mode *out,
const struct drm_mode_modeinfo *in);
void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
void drm_mode_debug_printmodeline(const struct drm_display_mode *mode);
+bool drm_mode_is_420_only(const struct drm_display_info *display,
+ const struct drm_display_mode *mode);
+bool drm_mode_is_420_also(const struct drm_display_info *display,
+ const struct drm_display_mode *mode);
+bool drm_mode_is_420(const struct drm_display_info *display,
+ const struct drm_display_mode *mode);
struct drm_display_mode *drm_cvt_mode(struct drm_device *dev,
int hdisplay, int vdisplay, int vrefresh,
@@ -496,6 +504,9 @@ bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1,
enum drm_mode_status drm_mode_validate_basic(const struct drm_display_mode *mode);
enum drm_mode_status drm_mode_validate_size(const struct drm_display_mode *mode,
int maxX, int maxY);
+enum drm_mode_status
+drm_mode_validate_ycbcr420(const struct drm_display_mode *mode,
+ struct drm_connector *connector);
void drm_mode_prune_invalid(struct drm_device *dev,
struct list_head *mode_list, bool verbose);
void drm_mode_sort(struct list_head *mode_list);
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index 85984b208218..c55cf3ff6847 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -71,7 +71,7 @@ struct drm_crtc_helper_funcs {
* This callback is used by the legacy CRTC helpers. Atomic helpers
* also support using this hook for enabling and disabling a CRTC to
* facilitate transitions to atomic, but it is deprecated. Instead
- * @enable and @disable should be used.
+ * @atomic_enable and @atomic_disable should be used.
*/
void (*dpms)(struct drm_crtc *crtc, int mode);
@@ -85,8 +85,8 @@ struct drm_crtc_helper_funcs {
*
* This callback is used by the legacy CRTC helpers. Atomic helpers
* also support using this hook for disabling a CRTC to facilitate
- * transitions to atomic, but it is deprecated. Instead @disable should
- * be used.
+ * transitions to atomic, but it is deprecated. Instead @atomic_disable
+ * should be used.
*/
void (*prepare)(struct drm_crtc *crtc);
@@ -100,8 +100,8 @@ struct drm_crtc_helper_funcs {
*
* This callback is used by the legacy CRTC helpers. Atomic helpers
* also support using this hook for enabling a CRTC to facilitate
- * transitions to atomic, but it is deprecated. Instead @enable should
- * be used.
+ * transitions to atomic, but it is deprecated. Instead @atomic_enable
+ * should be used.
*/
void (*commit)(struct drm_crtc *crtc);
@@ -222,7 +222,7 @@ struct drm_crtc_helper_funcs {
* pipeline is suspended using either DPMS or the new "ACTIVE" property.
* Which means register values set in this callback might get reset when
* the CRTC is suspended, but not restored. Such drivers should instead
- * move all their CRTC setup into the @enable callback.
+ * move all their CRTC setup into the @atomic_enable callback.
*
* This callback is optional.
*/
@@ -267,22 +267,6 @@ struct drm_crtc_helper_funcs {
enum mode_set_atomic);
/**
- * @load_lut:
- *
- * Load a LUT prepared with the &drm_fb_helper_funcs.gamma_set vfunc.
- *
- * This callback is optional and is only used by the fbdev emulation
- * helpers.
- *
- * FIXME:
- *
- * This callback is functionally redundant with the core gamma table
- * support and simply exists because the fbdev hasn't yet been
- * refactored to use the core gamma table interfaces.
- */
- void (*load_lut)(struct drm_crtc *crtc);
-
- /**
* @disable:
*
* This callback should be used to disable the CRTC. With the atomic
@@ -297,7 +281,7 @@ struct drm_crtc_helper_funcs {
* Atomic drivers don't need to implement it if there's no need to
* disable anything at the CRTC level. To ensure that runtime PM
* handling (using either DPMS or the new "ACTIVE" property) works
- * @disable must be the inverse of @enable for atomic drivers.
+ * @disable must be the inverse of @atomic_enable for atomic drivers.
* Atomic drivers should consider to use @atomic_disable instead of
* this one.
*
@@ -316,24 +300,6 @@ struct drm_crtc_helper_funcs {
void (*disable)(struct drm_crtc *crtc);
/**
- * @enable:
- *
- * This callback should be used to enable the CRTC. With the atomic
- * drivers it is called before all encoders connected to this CRTC are
- * enabled through the encoder's own &drm_encoder_helper_funcs.enable
- * hook. If that sequence is too simple drivers can just add their own
- * hooks and call it from this CRTC callback here by looping over all
- * encoders connected to it using for_each_encoder_on_crtc().
- *
- * This hook is used only by atomic helpers, for symmetry with @disable.
- * Atomic drivers don't need to implement it if there's no need to
- * enable anything at the CRTC level. To ensure that runtime PM handling
- * (using either DPMS or the new "ACTIVE" property) works
- * @enable must be the inverse of @disable for atomic drivers.
- */
- void (*enable)(struct drm_crtc *crtc);
-
- /**
* @atomic_check:
*
* Drivers should check plane-update related CRTC constraints in this
@@ -433,6 +399,30 @@ struct drm_crtc_helper_funcs {
struct drm_crtc_state *old_crtc_state);
/**
+ * @atomic_enable:
+ *
+ * This callback should be used to enable the CRTC. With the atomic
+ * drivers it is called before all encoders connected to this CRTC are
+ * enabled through the encoder's own &drm_encoder_helper_funcs.enable
+ * hook. If that sequence is too simple drivers can just add their own
+ * hooks and call it from this CRTC callback here by looping over all
+ * encoders connected to it using for_each_encoder_on_crtc().
+ *
+ * This hook is used only by atomic helpers, for symmetry with
+ * @atomic_disable. Atomic drivers don't need to implement it if there's
+ * no need to enable anything at the CRTC level. To ensure that runtime
+ * PM handling (using either DPMS or the new "ACTIVE" property) works
+ * @atomic_enable must be the inverse of @atomic_disable for atomic
+ * drivers.
+ *
+ * Drivers can use the @old_crtc_state input parameter if the operations
+ * needed to enable the CRTC don't depend solely on the new state but
+ * also on the transition between the old state and the new state.
+ */
+ void (*atomic_enable)(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state);
+
+ /**
* @atomic_disable:
*
* This callback should be used to disable the CRTC. With the atomic
@@ -1129,6 +1119,56 @@ struct drm_plane_helper_funcs {
*/
void (*atomic_disable)(struct drm_plane *plane,
struct drm_plane_state *old_state);
+
+ /**
+ * @atomic_async_check:
+ *
+ * Drivers should set this function pointer to check if the plane state
+ * can be updated in a async fashion. Here async means "not vblank
+ * synchronized".
+ *
+ * This hook is called by drm_atomic_async_check() to establish if a
+ * given update can be committed asynchronously, that is, if it can
+ * jump ahead of the state currently queued for update.
+ *
+ * RETURNS:
+ *
+ * Return 0 on success and any error returned indicates that the update
+ * can not be applied in asynchronous manner.
+ */
+ int (*atomic_async_check)(struct drm_plane *plane,
+ struct drm_plane_state *state);
+
+ /**
+ * @atomic_async_update:
+ *
+ * Drivers should set this function pointer to perform asynchronous
+ * updates of planes, that is, jump ahead of the currently queued
+ * state and update the plane. Here async means "not vblank
+ * synchronized".
+ *
+ * This hook is called by drm_atomic_helper_async_commit().
+ *
+ * An async update will happen on legacy cursor updates. An async
+ * update won't happen if there is an outstanding commit modifying
+ * the same plane.
+ *
+ * Note that unlike &drm_plane_helper_funcs.atomic_update this hook
+ * takes the new &drm_plane_state as parameter. When doing async_update
+ * drivers shouldn't replace the &drm_plane_state but update the
+ * current one with the new plane configurations in the new
+ * plane_state.
+ *
+ * FIXME:
+ * - It only works for single plane updates
+ * - Async Pageflips are not supported yet
+ * - Some hw might still scan out the old buffer until the next
+ * vblank, however we let go of the fb references as soon as
+ * we run this hook. For now drivers must implement their own workers
+ * for deferring if needed, until a common solution is created.
+ */
+ void (*atomic_async_update)(struct drm_plane *plane,
+ struct drm_plane_state *new_state);
};
/**
@@ -1169,7 +1209,8 @@ struct drm_mode_config_helper_funcs {
* After the atomic update is committed to the hardware this hook needs
* to call drm_atomic_helper_commit_hw_done(). Then wait for the upate
* to be executed by the hardware, for example using
- * drm_atomic_helper_wait_for_vblanks(), and then clean up the old
+ * drm_atomic_helper_wait_for_vblanks() or
+ * drm_atomic_helper_wait_for_flip_done(), and then clean up the old
* framebuffers using drm_atomic_helper_cleanup_planes().
*
* When disabling a CRTC this hook _must_ stall for the commit to
diff --git a/include/drm/drm_pci.h b/include/drm/drm_pci.h
index 4579fac1080c..674599025d7d 100644
--- a/include/drm/drm_pci.h
+++ b/include/drm/drm_pci.h
@@ -43,13 +43,12 @@ struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, size_t size,
size_t align);
void drm_pci_free(struct drm_device *dev, struct drm_dma_handle * dmah);
-int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver);
-void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver);
+int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver);
+void drm_legacy_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver);
#ifdef CONFIG_PCI
int drm_get_pci_dev(struct pci_dev *pdev,
const struct pci_device_id *ent,
struct drm_driver *driver);
-int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master);
#else
static inline int drm_get_pci_dev(struct pci_dev *pdev,
const struct pci_device_id *ent,
@@ -57,12 +56,6 @@ static inline int drm_get_pci_dev(struct pci_dev *pdev,
{
return -ENOSYS;
}
-
-static inline int drm_pci_set_busid(struct drm_device *dev,
- struct drm_master *master)
-{
- return -ENOSYS;
-}
#endif
#define DRM_PCIE_SPEED_25 1
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 9ab3e7044812..73f90f9d057f 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -233,11 +233,9 @@ struct drm_plane_funcs {
* This is the legacy entry point to update a property attached to the
* plane.
*
- * Drivers implementing atomic modeset should use
- * drm_atomic_helper_plane_set_property() to implement this hook.
- *
* This callback is optional if the driver does not support any legacy
- * driver-private properties.
+ * driver-private properties. For atomic drivers it is not used because
+ * property handling is done entirely in the DRM core.
*
* RETURNS:
*
@@ -392,6 +390,22 @@ struct drm_plane_funcs {
*/
void (*atomic_print_state)(struct drm_printer *p,
const struct drm_plane_state *state);
+
+ /**
+ * @format_mod_supported:
+ *
+ * This optional hook is used for the DRM to determine if the given
+ * format/modifier combination is valid for the plane. This allows the
+ * DRM to generate the correct format bitmask (which formats apply to
+ * which modifier).
+ *
+ * Returns:
+ *
+ * True if the given modifier is valid for that format on the plane.
+ * False otherwise.
+ */
+ bool (*format_mod_supported)(struct drm_plane *plane, uint32_t format,
+ uint64_t modifier);
};
/**
@@ -487,6 +501,9 @@ struct drm_plane {
unsigned int format_count;
bool format_default;
+ uint64_t *modifiers;
+ unsigned int modifier_count;
+
struct drm_crtc *crtc;
struct drm_framebuffer *fb;
@@ -527,13 +544,14 @@ struct drm_plane {
#define obj_to_plane(x) container_of(x, struct drm_plane, base)
-__printf(8, 9)
+__printf(9, 10)
int drm_universal_plane_init(struct drm_device *dev,
struct drm_plane *plane,
uint32_t possible_crtcs,
const struct drm_plane_funcs *funcs,
const uint32_t *formats,
unsigned int format_count,
+ const uint64_t *format_modifiers,
enum drm_plane_type type,
const char *name, ...);
int drm_plane_init(struct drm_device *dev,
diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h
index 619868dc08d8..37355c623e6c 100644
--- a/include/drm/drm_property.h
+++ b/include/drm/drm_property.h
@@ -273,6 +273,8 @@ int drm_property_replace_global_blob(struct drm_device *dev,
const void *data,
struct drm_mode_object *obj_holds_id,
struct drm_property *prop_holds_id);
+bool drm_property_replace_blob(struct drm_property_blob **blob,
+ struct drm_property_blob *new_blob);
struct drm_property_blob *drm_property_blob_get(struct drm_property_blob *blob);
void drm_property_blob_put(struct drm_property_blob *blob);
diff --git a/include/drm/drm_scdc_helper.h b/include/drm/drm_scdc_helper.h
index c25122bb490a..f92eb2094d6b 100644
--- a/include/drm/drm_scdc_helper.h
+++ b/include/drm/drm_scdc_helper.h
@@ -131,31 +131,6 @@ static inline int drm_scdc_writeb(struct i2c_adapter *adapter, u8 offset,
bool drm_scdc_get_scrambling_status(struct i2c_adapter *adapter);
-/**
- * drm_scdc_set_scrambling - enable scrambling
- * @adapter: I2C adapter for DDC channel
- * @enable: bool to indicate if scrambling is to be enabled/disabled
- *
- * Writes the TMDS config register over SCDC channel, and:
- * enables scrambling when enable = 1
- * disables scrambling when enable = 0
- *
- * Returns:
- * True if scrambling is set/reset successfully, false otherwise.
- */
bool drm_scdc_set_scrambling(struct i2c_adapter *adapter, bool enable);
-
-/**
- * drm_scdc_set_high_tmds_clock_ratio - set TMDS clock ratio
- * @adapter: I2C adapter for DDC channel
- * @set: ret or reset the high clock ratio
- *
- * Writes to the TMDS config register over SCDC channel, and:
- * sets TMDS clock ratio to 1/40 when set = 1
- * sets TMDS clock ratio to 1/10 when set = 0
- *
- * Returns:
- * True if write is successful, false otherwise.
- */
bool drm_scdc_set_high_tmds_clock_ratio(struct i2c_adapter *adapter, bool set);
#endif
diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h
index 2d36538e4a17..6d9adbb46293 100644
--- a/include/drm/drm_simple_kms_helper.h
+++ b/include/drm/drm_simple_kms_helper.h
@@ -122,6 +122,7 @@ int drm_simple_display_pipe_init(struct drm_device *dev,
struct drm_simple_display_pipe *pipe,
const struct drm_simple_display_pipe_funcs *funcs,
const uint32_t *formats, unsigned int format_count,
+ const uint64_t *format_modifiers,
struct drm_connector *connector);
#endif /* __LINUX_DRM_SIMPLE_KMS_HELPER_H */
diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h
index 89976da542b1..c00fee539822 100644
--- a/include/drm/drm_syncobj.h
+++ b/include/drm/drm_syncobj.h
@@ -28,6 +28,8 @@
#include "linux/dma-fence.h"
+struct drm_syncobj_cb;
+
/**
* struct drm_syncobj - sync object.
*
@@ -43,15 +45,47 @@ struct drm_syncobj {
/**
* @fence:
* NULL or a pointer to the fence bound to this object.
+ *
+ * This field should not be used directly. Use drm_syncobj_fence_get
+ * and drm_syncobj_replace_fence instead.
*/
struct dma_fence *fence;
/**
+ * @cb_list:
+ * List of callbacks to call when the fence gets replaced
+ */
+ struct list_head cb_list;
+ /**
+ * @lock:
+ * locks cb_list and write-locks fence.
+ */
+ spinlock_t lock;
+ /**
* @file:
* a file backing for this syncobj.
*/
struct file *file;
};
+typedef void (*drm_syncobj_func_t)(struct drm_syncobj *syncobj,
+ struct drm_syncobj_cb *cb);
+
+/**
+ * struct drm_syncobj_cb - callback for drm_syncobj_add_callback
+ * @node: used by drm_syncob_add_callback to append this struct to
+ * syncobj::cb_list
+ * @func: drm_syncobj_func_t to call
+ *
+ * This struct will be initialized by drm_syncobj_add_callback, additional
+ * data can be passed along by embedding drm_syncobj_cb in another struct.
+ * The callback will get called the next time drm_syncobj_replace_fence is
+ * called.
+ */
+struct drm_syncobj_cb {
+ struct list_head node;
+ drm_syncobj_func_t func;
+};
+
void drm_syncobj_free(struct kref *kref);
/**
@@ -77,13 +111,30 @@ drm_syncobj_put(struct drm_syncobj *obj)
kref_put(&obj->refcount, drm_syncobj_free);
}
+static inline struct dma_fence *
+drm_syncobj_fence_get(struct drm_syncobj *syncobj)
+{
+ struct dma_fence *fence;
+
+ rcu_read_lock();
+ fence = dma_fence_get_rcu_safe(&syncobj->fence);
+ rcu_read_unlock();
+
+ return fence;
+}
+
struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
u32 handle);
+void drm_syncobj_add_callback(struct drm_syncobj *syncobj,
+ struct drm_syncobj_cb *cb,
+ drm_syncobj_func_t func);
+void drm_syncobj_remove_callback(struct drm_syncobj *syncobj,
+ struct drm_syncobj_cb *cb);
void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
struct dma_fence *fence);
-int drm_syncobj_fence_get(struct drm_file *file_private,
- u32 handle,
- struct dma_fence **fence);
+int drm_syncobj_find_fence(struct drm_file *file_private,
+ u32 handle,
+ struct dma_fence **fence);
void drm_syncobj_free(struct kref *kref);
#endif
diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h
index 4cde47332dfa..7fba9efe4951 100644
--- a/include/drm/drm_vblank.h
+++ b/include/drm/drm_vblank.h
@@ -168,8 +168,7 @@ void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
void drm_crtc_vblank_off(struct drm_crtc *crtc);
void drm_crtc_vblank_reset(struct drm_crtc *crtc);
void drm_crtc_vblank_on(struct drm_crtc *crtc);
-void drm_vblank_cleanup(struct drm_device *dev);
-u32 drm_accurate_vblank_count(struct drm_crtc *crtc);
+u32 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc);
bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
unsigned int pipe, int *max_error,
diff --git a/include/drm/tinydrm/mipi-dbi.h b/include/drm/tinydrm/mipi-dbi.h
index d137b16ee873..83346ddb9dba 100644
--- a/include/drm/tinydrm/mipi-dbi.h
+++ b/include/drm/tinydrm/mipi-dbi.h
@@ -62,11 +62,7 @@ mipi_dbi_from_tinydrm(struct tinydrm_device *tdev)
}
int mipi_dbi_spi_init(struct spi_device *spi, struct mipi_dbi *mipi,
- struct gpio_desc *dc,
- const struct drm_simple_display_pipe_funcs *pipe_funcs,
- struct drm_driver *driver,
- const struct drm_display_mode *mode,
- unsigned int rotation);
+ struct gpio_desc *dc);
int mipi_dbi_init(struct device *dev, struct mipi_dbi *mipi,
const struct drm_simple_display_pipe_funcs *pipe_funcs,
struct drm_driver *driver,
diff --git a/include/drm/tinydrm/tinydrm-helpers.h b/include/drm/tinydrm/tinydrm-helpers.h
index 9b9b6cfe3ba5..d554ded60ee9 100644
--- a/include/drm/tinydrm/tinydrm-helpers.h
+++ b/include/drm/tinydrm/tinydrm-helpers.h
@@ -43,6 +43,8 @@ void tinydrm_swab16(u16 *dst, void *vaddr, struct drm_framebuffer *fb,
void tinydrm_xrgb8888_to_rgb565(u16 *dst, void *vaddr,
struct drm_framebuffer *fb,
struct drm_clip_rect *clip, bool swap);
+void tinydrm_xrgb8888_to_gray8(u8 *dst, void *vaddr, struct drm_framebuffer *fb,
+ struct drm_clip_rect *clip);
struct backlight_device *tinydrm_of_find_backlight(struct device *dev);
int tinydrm_enable_backlight(struct backlight_device *backlight);
diff --git a/include/drm/tinydrm/tinydrm.h b/include/drm/tinydrm/tinydrm.h
index 00b800df4d1b..4774fe3d4273 100644
--- a/include/drm/tinydrm/tinydrm.h
+++ b/include/drm/tinydrm/tinydrm.h
@@ -56,9 +56,7 @@ pipe_to_tinydrm(struct drm_simple_display_pipe *pipe)
.gem_prime_vmap = drm_gem_cma_prime_vmap, \
.gem_prime_vunmap = drm_gem_cma_prime_vunmap, \
.gem_prime_mmap = drm_gem_cma_prime_mmap, \
- .dumb_create = drm_gem_cma_dumb_create, \
- .dumb_map_offset = drm_gem_cma_dumb_map_offset, \
- .dumb_destroy = drm_gem_dumb_destroy
+ .dumb_create = drm_gem_cma_dumb_create
/**
* TINYDRM_MODE - tinydrm display mode
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 990d529f823c..5f821a9b3a1f 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -229,13 +229,14 @@ struct ttm_mem_type_manager_func {
* struct ttm_mem_type_manager member debug
*
* @man: Pointer to a memory type manager.
- * @prefix: Prefix to be used in printout to identify the caller.
+ * @printer: Prefix to be used in printout to identify the caller.
*
* This function is called to print out the state of the memory
* type manager to aid debugging of out-of-memory conditions.
* It may not be called from within atomic context.
*/
- void (*debug)(struct ttm_mem_type_manager *man, const char *prefix);
+ void (*debug)(struct ttm_mem_type_manager *man,
+ struct drm_printer *printer);
};
/**
@@ -472,6 +473,23 @@ struct ttm_bo_driver {
*/
unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo,
unsigned long page_offset);
+
+ /**
+ * Read/write memory buffers for ptrace access
+ *
+ * @bo: the BO to access
+ * @offset: the offset from the start of the BO
+ * @buf: pointer to source/destination buffer
+ * @len: number of bytes to copy
+ * @write: whether to read (0) from or write (non-0) to BO
+ *
+ * If successful, this function should return the number of
+ * bytes copied, -EIO otherwise. If the number of bytes
+ * returned is < len, the function may be called again with
+ * the remainder of the buffer to copy.
+ */
+ int (*access_memory)(struct ttm_buffer_object *bo, unsigned long offset,
+ void *buf, int len, int write);
};
/**
diff --git a/include/dt-bindings/pinctrl/qcom,pmic-gpio.h b/include/dt-bindings/pinctrl/qcom,pmic-gpio.h
index d33f17c8a515..b8ff8824e21b 100644
--- a/include/dt-bindings/pinctrl/qcom,pmic-gpio.h
+++ b/include/dt-bindings/pinctrl/qcom,pmic-gpio.h
@@ -98,6 +98,8 @@
#define PMIC_GPIO_FUNC_PAIRED "paired"
#define PMIC_GPIO_FUNC_FUNC1 "func1"
#define PMIC_GPIO_FUNC_FUNC2 "func2"
+#define PMIC_GPIO_FUNC_FUNC3 "func3"
+#define PMIC_GPIO_FUNC_FUNC4 "func4"
#define PMIC_GPIO_FUNC_DTEST1 "dtest1"
#define PMIC_GPIO_FUNC_DTEST2 "dtest2"
#define PMIC_GPIO_FUNC_DTEST3 "dtest3"
diff --git a/include/dt-bindings/pinctrl/samsung.h b/include/dt-bindings/pinctrl/samsung.h
index b7aa3646208b..ceb672305f59 100644
--- a/include/dt-bindings/pinctrl/samsung.h
+++ b/include/dt-bindings/pinctrl/samsung.h
@@ -66,7 +66,8 @@
#define EXYNOS_PIN_FUNC_4 4
#define EXYNOS_PIN_FUNC_5 5
#define EXYNOS_PIN_FUNC_6 6
-#define EXYNOS_PIN_FUNC_F 0xf
+#define EXYNOS_PIN_FUNC_EINT 0xf
+#define EXYNOS_PIN_FUNC_F EXYNOS_PIN_FUNC_EINT
/* Drive strengths for Exynos7 FSYS1 block */
#define EXYNOS7_FSYS1_PIN_DRV_LV1 0
diff --git a/include/keys/rxrpc-type.h b/include/keys/rxrpc-type.h
index 5de0673f333b..8cf829dbf20e 100644
--- a/include/keys/rxrpc-type.h
+++ b/include/keys/rxrpc-type.h
@@ -127,4 +127,27 @@ struct rxrpc_key_data_v1 {
#define AFSTOKEN_K5_ADDRESSES_MAX 16 /* max K5 addresses */
#define AFSTOKEN_K5_AUTHDATA_MAX 16 /* max K5 pieces of auth data */
+/*
+ * Truncate a time64_t to the range from 1970 to 2106 as in the network
+ * protocol.
+ */
+static inline u32 rxrpc_time64_to_u32(time64_t time)
+{
+ if (time < 0)
+ return 0;
+
+ if (time > UINT_MAX)
+ return UINT_MAX;
+
+ return (u32)time;
+}
+
+/*
+ * Extend u32 back to time64_t using the same 1970-2106 range.
+ */
+static inline time64_t rxrpc_u32_to_time64(u32 time)
+{
+ return (time64_t)time;
+}
+
#endif /* _KEYS_RXRPC_TYPE_H */
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index f6e030617467..f87fe20fcb05 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -48,7 +48,6 @@ void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val);
-void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu);
void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu);
@@ -86,7 +85,6 @@ static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}
static inline void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) {}
static inline void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {}
-static inline void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val) {}
static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}
static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index c749eef1daa1..502af53ec012 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -57,9 +57,6 @@ static inline acpi_handle acpi_device_handle(struct acpi_device *adev)
acpi_fwnode_handle(adev) : NULL)
#define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev))
-
-extern const struct fwnode_operations acpi_fwnode_ops;
-
static inline struct fwnode_handle *acpi_alloc_fwnode_static(void)
{
struct fwnode_handle *fwnode;
@@ -68,15 +65,14 @@ static inline struct fwnode_handle *acpi_alloc_fwnode_static(void)
if (!fwnode)
return NULL;
- fwnode->type = FWNODE_ACPI_STATIC;
- fwnode->ops = &acpi_fwnode_ops;
+ fwnode->ops = &acpi_static_fwnode_ops;
return fwnode;
}
static inline void acpi_free_fwnode_static(struct fwnode_handle *fwnode)
{
- if (WARN_ON(!fwnode || fwnode->type != FWNODE_ACPI_STATIC))
+ if (WARN_ON(!is_acpi_static_node(fwnode)))
return;
kfree(fwnode);
@@ -228,8 +224,8 @@ struct acpi_subtable_proc {
int count;
};
-char * __acpi_map_table (unsigned long phys_addr, unsigned long size);
-void __acpi_unmap_table(char *map, unsigned long size);
+void __iomem *__acpi_map_table(unsigned long phys, unsigned long size);
+void __acpi_unmap_table(void __iomem *map, unsigned long size);
int early_acpi_boot_init(void);
int acpi_boot_init (void);
void acpi_boot_table_init (void);
@@ -427,6 +423,8 @@ void acpi_dev_free_resource_list(struct list_head *list);
int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list,
int (*preproc)(struct acpi_resource *, void *),
void *preproc_data);
+int acpi_dev_get_dma_resources(struct acpi_device *adev,
+ struct list_head *list);
int acpi_dev_filter_resource_type(struct acpi_resource *ares,
unsigned long types);
@@ -556,6 +554,25 @@ extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
#define ACPI_OST_SC_DRIVER_LOAD_FAILURE 0x81
#define ACPI_OST_SC_INSERT_NOT_SUPPORTED 0x82
+enum acpi_predicate {
+ all_versions,
+ less_than_or_equal,
+ equal,
+ greater_than_or_equal,
+};
+
+/* Table must be terminted by a NULL entry */
+struct acpi_platform_list {
+ char oem_id[ACPI_OEM_ID_SIZE+1];
+ char oem_table_id[ACPI_OEM_TABLE_ID_SIZE+1];
+ u32 oem_revision;
+ char *table;
+ enum acpi_predicate pred;
+ char *reason;
+ u32 data;
+};
+int acpi_match_platform_list(const struct acpi_platform_list *plat);
+
extern void acpi_early_init(void);
extern void acpi_subsystem_init(void);
@@ -774,6 +791,12 @@ static inline enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
return DEV_DMA_NOT_SUPPORTED;
}
+static inline int acpi_dma_get_range(struct device *dev, u64 *dma_addr,
+ u64 *offset, u64 *size)
+{
+ return -ENODEV;
+}
+
static inline int acpi_dma_configure(struct device *dev,
enum dev_dma_attr attr)
{
@@ -1007,13 +1030,14 @@ struct acpi_reference_args {
};
#ifdef CONFIG_ACPI
-int acpi_dev_get_property(struct acpi_device *adev, const char *name,
+int acpi_dev_get_property(const struct acpi_device *adev, const char *name,
acpi_object_type type, const union acpi_object **obj);
-int __acpi_node_get_property_reference(struct fwnode_handle *fwnode,
+int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
const char *name, size_t index, size_t num_args,
struct acpi_reference_args *args);
-static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode,
+static inline int acpi_node_get_property_reference(
+ const struct fwnode_handle *fwnode,
const char *name, size_t index,
struct acpi_reference_args *args)
{
@@ -1021,22 +1045,25 @@ static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode,
MAX_ACPI_REFERENCE_ARGS, args);
}
-int acpi_node_prop_get(struct fwnode_handle *fwnode, const char *propname,
+int acpi_node_prop_get(const struct fwnode_handle *fwnode, const char *propname,
void **valptr);
-int acpi_dev_prop_read_single(struct acpi_device *adev, const char *propname,
- enum dev_prop_type proptype, void *val);
-int acpi_node_prop_read(struct fwnode_handle *fwnode, const char *propname,
- enum dev_prop_type proptype, void *val, size_t nval);
-int acpi_dev_prop_read(struct acpi_device *adev, const char *propname,
+int acpi_dev_prop_read_single(struct acpi_device *adev,
+ const char *propname, enum dev_prop_type proptype,
+ void *val);
+int acpi_node_prop_read(const struct fwnode_handle *fwnode,
+ const char *propname, enum dev_prop_type proptype,
+ void *val, size_t nval);
+int acpi_dev_prop_read(const struct acpi_device *adev, const char *propname,
enum dev_prop_type proptype, void *val, size_t nval);
-struct fwnode_handle *acpi_get_next_subnode(struct fwnode_handle *fwnode,
+struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
struct fwnode_handle *child);
-struct fwnode_handle *acpi_node_get_parent(struct fwnode_handle *fwnode);
+struct fwnode_handle *acpi_node_get_parent(const struct fwnode_handle *fwnode);
-struct fwnode_handle *acpi_graph_get_next_endpoint(struct fwnode_handle *fwnode,
- struct fwnode_handle *prev);
-int acpi_graph_get_remote_endpoint(struct fwnode_handle *fwnode,
+struct fwnode_handle *
+acpi_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
+ struct fwnode_handle *prev);
+int acpi_graph_get_remote_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_handle **remote,
struct fwnode_handle **port,
struct fwnode_handle **endpoint);
@@ -1104,35 +1131,36 @@ static inline int acpi_dev_get_property(struct acpi_device *adev,
}
static inline int
-__acpi_node_get_property_reference(struct fwnode_handle *fwnode,
+__acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
const char *name, size_t index, size_t num_args,
struct acpi_reference_args *args)
{
return -ENXIO;
}
-static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode,
- const char *name, size_t index,
- struct acpi_reference_args *args)
+static inline int
+acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
+ const char *name, size_t index,
+ struct acpi_reference_args *args)
{
return -ENXIO;
}
-static inline int acpi_node_prop_get(struct fwnode_handle *fwnode,
+static inline int acpi_node_prop_get(const struct fwnode_handle *fwnode,
const char *propname,
void **valptr)
{
return -ENXIO;
}
-static inline int acpi_dev_prop_get(struct acpi_device *adev,
+static inline int acpi_dev_prop_get(const struct acpi_device *adev,
const char *propname,
void **valptr)
{
return -ENXIO;
}
-static inline int acpi_dev_prop_read_single(struct acpi_device *adev,
+static inline int acpi_dev_prop_read_single(const struct acpi_device *adev,
const char *propname,
enum dev_prop_type proptype,
void *val)
@@ -1140,7 +1168,7 @@ static inline int acpi_dev_prop_read_single(struct acpi_device *adev,
return -ENXIO;
}
-static inline int acpi_node_prop_read(struct fwnode_handle *fwnode,
+static inline int acpi_node_prop_read(const struct fwnode_handle *fwnode,
const char *propname,
enum dev_prop_type proptype,
void *val, size_t nval)
@@ -1148,7 +1176,7 @@ static inline int acpi_node_prop_read(struct fwnode_handle *fwnode,
return -ENXIO;
}
-static inline int acpi_dev_prop_read(struct acpi_device *adev,
+static inline int acpi_dev_prop_read(const struct acpi_device *adev,
const char *propname,
enum dev_prop_type proptype,
void *val, size_t nval)
@@ -1157,26 +1185,27 @@ static inline int acpi_dev_prop_read(struct acpi_device *adev,
}
static inline struct fwnode_handle *
-acpi_get_next_subnode(struct fwnode_handle *fwnode, struct fwnode_handle *child)
+acpi_get_next_subnode(const struct fwnode_handle *fwnode,
+ struct fwnode_handle *child)
{
return NULL;
}
static inline struct fwnode_handle *
-acpi_node_get_parent(struct fwnode_handle *fwnode)
+acpi_node_get_parent(const struct fwnode_handle *fwnode)
{
return NULL;
}
static inline struct fwnode_handle *
-acpi_graph_get_next_endpoint(struct fwnode_handle *fwnode,
+acpi_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_handle *prev)
{
return ERR_PTR(-ENXIO);
}
static inline int
-acpi_graph_get_remote_endpoint(struct fwnode_handle *fwnode,
+acpi_graph_get_remote_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_handle **remote,
struct fwnode_handle **port,
struct fwnode_handle **endpoint)
@@ -1209,6 +1238,7 @@ static inline bool acpi_has_watchdog(void) { return false; }
#endif
#ifdef CONFIG_ACPI_SPCR_TABLE
+extern bool qdf2400_e44_present;
int parse_spcr(bool earlycon);
#else
static inline int parse_spcr(bool earlycon) { return 0; }
diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h
index 8379d406ad2e..8d3f0bf80379 100644
--- a/include/linux/acpi_iort.h
+++ b/include/linux/acpi_iort.h
@@ -36,7 +36,7 @@ struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id);
void acpi_configure_pmsi_domain(struct device *dev);
int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id);
/* IOMMU interface */
-void iort_set_dma_mask(struct device *dev);
+void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *size);
const struct iommu_ops *iort_iommu_configure(struct device *dev);
#else
static inline void acpi_iort_init(void) { }
@@ -47,7 +47,8 @@ static inline struct irq_domain *iort_get_device_domain(struct device *dev,
{ return NULL; }
static inline void acpi_configure_pmsi_domain(struct device *dev) { }
/* IOMMU interface */
-static inline void iort_set_dma_mask(struct device *dev) { }
+static inline void iort_dma_setup(struct device *dev, u64 *dma_addr,
+ u64 *size) { }
static inline
const struct iommu_ops *iort_iommu_configure(struct device *dev)
{ return NULL; }
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index 9af3c174c03a..716ce587247e 100644
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -4,10 +4,12 @@
#ifndef _LINUX_ARCH_TOPOLOGY_H_
#define _LINUX_ARCH_TOPOLOGY_H_
+#include <linux/types.h>
+
void topology_normalize_cpu_scale(void);
struct device_node;
-int topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu);
+bool topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu);
struct sched_domain;
unsigned long topology_get_cpu_scale(struct sched_domain *sd, int cpu);
diff --git a/include/linux/ata.h b/include/linux/ata.h
index e65ae4b2ed48..c7a353825450 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -60,7 +60,8 @@ enum {
ATA_ID_FW_REV = 23,
ATA_ID_PROD = 27,
ATA_ID_MAX_MULTSECT = 47,
- ATA_ID_DWORD_IO = 48,
+ ATA_ID_DWORD_IO = 48, /* before ATA-8 */
+ ATA_ID_TRUSTED = 48, /* ATA-8 and later */
ATA_ID_CAPABILITY = 49,
ATA_ID_OLD_PIO_MODES = 51,
ATA_ID_OLD_DMA_MODES = 52,
@@ -889,6 +890,13 @@ static inline bool ata_id_has_dword_io(const u16 *id)
return id[ATA_ID_DWORD_IO] & (1 << 0);
}
+static inline bool ata_id_has_trusted(const u16 *id)
+{
+ if (ata_id_major_version(id) <= 7)
+ return false;
+ return id[ATA_ID_TRUSTED] & (1 << 0);
+}
+
static inline bool ata_id_has_unload(const u16 *id)
{
if (ata_id_major_version(id) >= 7 &&
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index c56be7410130..40d6bfec0e0d 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -38,6 +38,9 @@
* Besides, if an arch has a special barrier for acquire/release, it could
* implement its own __atomic_op_* and use the same framework for building
* variants
+ *
+ * If an architecture overrides __atomic_op_acquire() it will probably want
+ * to define smp_mb__after_spinlock().
*/
#ifndef __atomic_op_acquire
#define __atomic_op_acquire(op, args...) \
diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h
index c893b9520a67..2b038442c352 100644
--- a/include/linux/avf/virtchnl.h
+++ b/include/linux/avf/virtchnl.h
@@ -133,6 +133,8 @@ enum virtchnl_ops {
VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
VIRTCHNL_OP_SET_RSS_HENA = 26,
+ VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
+ VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
};
/* This macro is used to generate a compilation error if a structure
@@ -223,7 +225,7 @@ struct virtchnl_vsi_resource {
VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
-/* VF offload flags
+/* VF capability flags
* VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including
* TX/RX Checksum offloading and TSO for non-tunnelled packets.
*/
@@ -251,7 +253,7 @@ struct virtchnl_vf_resource {
u16 max_vectors;
u16 max_mtu;
- u32 vf_offload_flags;
+ u32 vf_cap_flags;
u32 rss_key_size;
u32 rss_lut_size;
@@ -686,6 +688,9 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
case VIRTCHNL_OP_SET_RSS_HENA:
valid_len = sizeof(struct virtchnl_rss_hena);
break;
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
+ break;
/* These are always errors coming from the VF. */
case VIRTCHNL_OP_EVENT:
case VIRTCHNL_OP_UNKNOWN:
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 05488da3aee9..3ae9013eeaaa 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -46,7 +46,7 @@ struct linux_binprm {
unsigned interp_flags;
unsigned interp_data;
unsigned long loader, exec;
-};
+} __randomize_layout;
#define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
#define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT)
@@ -81,7 +81,7 @@ struct linux_binfmt {
int (*load_shlib)(struct file *);
int (*core_dump)(struct coredump_params *cprm);
unsigned long min_coredump; /* minimal dump size */
-};
+} __randomize_layout;
extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 7b1cf4ba0902..1f0720de8990 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -38,7 +38,15 @@
#define BIO_BUG_ON
#endif
+#ifdef CONFIG_THP_SWAP
+#if HPAGE_PMD_NR > 256
+#define BIO_MAX_PAGES HPAGE_PMD_NR
+#else
#define BIO_MAX_PAGES 256
+#endif
+#else
+#define BIO_MAX_PAGES 256
+#endif
#define bio_prio(bio) (bio)->bi_ioprio
#define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio)
diff --git a/include/linux/bitrev.h b/include/linux/bitrev.h
index fb790b8449c1..b97be27e5a85 100644
--- a/include/linux/bitrev.h
+++ b/include/linux/bitrev.h
@@ -29,6 +29,8 @@ static inline u32 __bitrev32(u32 x)
#endif /* CONFIG_HAVE_ARCH_BITREVERSE */
+#define __bitrev8x4(x) (__bitrev32(swab32(x)))
+
#define __constant_bitrev32(x) \
({ \
u32 __x = x; \
@@ -50,6 +52,15 @@ static inline u32 __bitrev32(u32 x)
__x; \
})
+#define __constant_bitrev8x4(x) \
+({ \
+ u32 __x = x; \
+ __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4); \
+ __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2); \
+ __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1); \
+ __x; \
+})
+
#define __constant_bitrev8(x) \
({ \
u8 __x = x; \
@@ -75,6 +86,14 @@ static inline u32 __bitrev32(u32 x)
__bitrev16(__x); \
})
+#define bitrev8x4(x) \
+({ \
+ u32 __x = x; \
+ __builtin_constant_p(__x) ? \
+ __constant_bitrev8x4(__x) : \
+ __bitrev8x4(__x); \
+ })
+
#define bitrev8(x) \
({ \
u8 __x = x; \
diff --git a/include/linux/blk-mq-rdma.h b/include/linux/blk-mq-rdma.h
new file mode 100644
index 000000000000..b4ade198007d
--- /dev/null
+++ b/include/linux/blk-mq-rdma.h
@@ -0,0 +1,10 @@
+#ifndef _LINUX_BLK_MQ_RDMA_H
+#define _LINUX_BLK_MQ_RDMA_H
+
+struct blk_mq_tag_set;
+struct ib_device;
+
+int blk_mq_rdma_map_queues(struct blk_mq_tag_set *set,
+ struct ib_device *dev, int first_vec);
+
+#endif /* _LINUX_BLK_MQ_RDMA_H */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 25f6a0cb27d3..4b99b13c7e68 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -134,7 +134,7 @@ typedef __u32 __bitwise req_flags_t;
struct request {
struct list_head queuelist;
union {
- struct call_single_data csd;
+ call_single_data_t csd;
u64 fifo_time;
};
@@ -568,7 +568,6 @@ struct request_queue {
#if defined(CONFIG_BLK_DEV_BSG)
bsg_job_fn *bsg_job_fn;
- int bsg_job_size;
struct bsg_class_device bsg_dev;
#endif
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index 360c082e885c..d41d40ac3efd 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -85,7 +85,7 @@ int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
int __ret = 0; \
if (cgroup_bpf_enabled && (sock_ops)->sk) { \
typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
- if (sk_fullsock(__sk)) \
+ if (__sk && sk_fullsock(__sk)) \
__ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
sock_ops, \
BPF_CGROUP_SOCK_OPS); \
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index b69e7a5869ff..c2cb1b5c094e 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -14,8 +14,10 @@
#include <linux/percpu.h>
#include <linux/err.h>
#include <linux/rbtree_latch.h>
+#include <linux/numa.h>
struct perf_event;
+struct bpf_prog;
struct bpf_map;
/* map is generic key/value storage optionally accesible by eBPF programs */
@@ -48,6 +50,7 @@ struct bpf_map {
u32 map_flags;
u32 pages;
u32 id;
+ int numa_node;
struct user_struct *user;
const struct bpf_map_ops *ops;
struct work_struct work;
@@ -117,39 +120,27 @@ enum bpf_access_type {
};
/* types of values stored in eBPF registers */
+/* Pointer types represent:
+ * pointer
+ * pointer + imm
+ * pointer + (u16) var
+ * pointer + (u16) var + imm
+ * if (range > 0) then [ptr, ptr + range - off) is safe to access
+ * if (id > 0) means that some 'var' was added
+ * if (off > 0) means that 'imm' was added
+ */
enum bpf_reg_type {
NOT_INIT = 0, /* nothing was written into register */
- UNKNOWN_VALUE, /* reg doesn't contain a valid pointer */
+ SCALAR_VALUE, /* reg doesn't contain a valid pointer */
PTR_TO_CTX, /* reg points to bpf_context */
CONST_PTR_TO_MAP, /* reg points to struct bpf_map */
PTR_TO_MAP_VALUE, /* reg points to map element value */
PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
- FRAME_PTR, /* reg == frame_pointer */
- PTR_TO_STACK, /* reg == frame_pointer + imm */
- CONST_IMM, /* constant integer value */
-
- /* PTR_TO_PACKET represents:
- * skb->data
- * skb->data + imm
- * skb->data + (u16) var
- * skb->data + (u16) var + imm
- * if (range > 0) then [ptr, ptr + range - off) is safe to access
- * if (id > 0) means that some 'var' was added
- * if (off > 0) menas that 'imm' was added
- */
- PTR_TO_PACKET,
+ PTR_TO_STACK, /* reg == frame_pointer + offset */
+ PTR_TO_PACKET, /* reg points to skb->data */
PTR_TO_PACKET_END, /* skb->data + headlen */
-
- /* PTR_TO_MAP_VALUE_ADJ is used for doing pointer math inside of a map
- * elem value. We only allow this if we can statically verify that
- * access from this register are going to fall within the size of the
- * map element.
- */
- PTR_TO_MAP_VALUE_ADJ,
};
-struct bpf_prog;
-
/* The information passed from prog-specific *_is_valid_access
* back to the verifier.
*/
@@ -262,6 +253,7 @@ struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type);
struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i);
void bpf_prog_sub(struct bpf_prog *prog, int i);
struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog);
+struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
void bpf_prog_put(struct bpf_prog *prog);
int __bpf_prog_charge(struct user_struct *user, u32 pages);
void __bpf_prog_uncharge(struct user_struct *user, u32 pages);
@@ -272,7 +264,7 @@ struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref);
void bpf_map_put_with_uref(struct bpf_map *map);
void bpf_map_put(struct bpf_map *map);
int bpf_map_precharge_memlock(u32 pages);
-void *bpf_map_area_alloc(size_t size);
+void *bpf_map_area_alloc(size_t size, int numa_node);
void bpf_map_area_free(void *base);
extern int sysctl_unprivileged_bpf_disabled;
@@ -318,6 +310,19 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
/* verify correctness of eBPF program */
int bpf_check(struct bpf_prog **fp, union bpf_attr *attr);
+
+/* Map specifics */
+struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
+void __dev_map_insert_ctx(struct bpf_map *map, u32 index);
+void __dev_map_flush(struct bpf_map *map);
+
+/* Return map's numa specified by userspace */
+static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
+{
+ return (attr->map_flags & BPF_F_NUMA_NODE) ?
+ attr->numa_node : NUMA_NO_NODE;
+}
+
#else
static inline struct bpf_prog *bpf_prog_get(u32 ufd)
{
@@ -348,6 +353,12 @@ static inline struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog)
return ERR_PTR(-EOPNOTSUPP);
}
+static inline struct bpf_prog *__must_check
+bpf_prog_inc_not_zero(struct bpf_prog *prog)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
static inline int __bpf_prog_charge(struct user_struct *user, u32 pages)
{
return 0;
@@ -356,8 +367,39 @@ static inline int __bpf_prog_charge(struct user_struct *user, u32 pages)
static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
{
}
+
+static inline struct net_device *__dev_map_lookup_elem(struct bpf_map *map,
+ u32 key)
+{
+ return NULL;
+}
+
+static inline void __dev_map_insert_ctx(struct bpf_map *map, u32 index)
+{
+}
+
+static inline void __dev_map_flush(struct bpf_map *map)
+{
+}
#endif /* CONFIG_BPF_SYSCALL */
+#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL)
+struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key);
+int sock_map_attach_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type);
+#else
+static inline struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
+{
+ return NULL;
+}
+
+static inline int sock_map_attach_prog(struct bpf_map *map,
+ struct bpf_prog *prog,
+ u32 type)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
/* verifier prototypes for helper functions called from eBPF programs */
extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
extern const struct bpf_func_proto bpf_map_update_elem_proto;
@@ -374,6 +416,7 @@ extern const struct bpf_func_proto bpf_get_current_comm_proto;
extern const struct bpf_func_proto bpf_skb_vlan_push_proto;
extern const struct bpf_func_proto bpf_skb_vlan_pop_proto;
extern const struct bpf_func_proto bpf_get_stackid_proto;
+extern const struct bpf_func_proto bpf_sock_map_update_proto;
/* Shared helpers among cBPF and eBPF. */
void bpf_user_rnd_init_once(void);
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index 3d137c33d664..6f1a567667b8 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -11,6 +11,7 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_IN, lwt_inout_prog_ops)
BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_OUT, lwt_inout_prog_ops)
BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_XMIT, lwt_xmit_prog_ops)
BPF_PROG_TYPE(BPF_PROG_TYPE_SOCK_OPS, sock_ops_prog_ops)
+BPF_PROG_TYPE(BPF_PROG_TYPE_SK_SKB, sk_skb_prog_ops)
#endif
#ifdef CONFIG_BPF_EVENTS
BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe_prog_ops)
@@ -35,3 +36,9 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_STACK_TRACE, stack_map_ops)
#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops)
+#ifdef CONFIG_NET
+BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops)
+#ifdef CONFIG_STREAM_PARSER
+BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops)
+#endif
+#endif
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 621076f56251..b8d200f60a40 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -9,40 +9,75 @@
#include <linux/bpf.h> /* for enum bpf_reg_type */
#include <linux/filter.h> /* for MAX_BPF_STACK */
+#include <linux/tnum.h>
- /* Just some arbitrary values so we can safely do math without overflowing and
- * are obviously wrong for any sort of memory access.
- */
-#define BPF_REGISTER_MAX_RANGE (1024 * 1024 * 1024)
-#define BPF_REGISTER_MIN_RANGE -1
+/* Maximum variable offset umax_value permitted when resolving memory accesses.
+ * In practice this is far bigger than any realistic pointer offset; this limit
+ * ensures that umax_value + (int)off + (int)size cannot overflow a u64.
+ */
+#define BPF_MAX_VAR_OFF (1ULL << 31)
+/* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures
+ * that converting umax_value to int cannot overflow.
+ */
+#define BPF_MAX_VAR_SIZ INT_MAX
+
+/* Liveness marks, used for registers and spilled-regs (in stack slots).
+ * Read marks propagate upwards until they find a write mark; they record that
+ * "one of this state's descendants read this reg" (and therefore the reg is
+ * relevant for states_equal() checks).
+ * Write marks collect downwards and do not propagate; they record that "the
+ * straight-line code that reached this state (from its parent) wrote this reg"
+ * (and therefore that reads propagated from this state or its descendants
+ * should not propagate to its parent).
+ * A state with a write mark can receive read marks; it just won't propagate
+ * them to its parent, since the write mark is a property, not of the state,
+ * but of the link between it and its parent. See mark_reg_read() and
+ * mark_stack_slot_read() in kernel/bpf/verifier.c.
+ */
+enum bpf_reg_liveness {
+ REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */
+ REG_LIVE_READ, /* reg was read, so we're sensitive to initial value */
+ REG_LIVE_WRITTEN, /* reg was written first, screening off later reads */
+};
struct bpf_reg_state {
enum bpf_reg_type type;
union {
- /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */
- s64 imm;
-
- /* valid when type == PTR_TO_PACKET* */
- struct {
- u16 off;
- u16 range;
- };
+ /* valid when type == PTR_TO_PACKET */
+ u16 range;
/* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
* PTR_TO_MAP_VALUE_OR_NULL
*/
struct bpf_map *map_ptr;
};
+ /* Fixed part of pointer offset, pointer types only */
+ s32 off;
+ /* For PTR_TO_PACKET, used to find other pointers with the same variable
+ * offset, so they can share range knowledge.
+ * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we
+ * came from, when one is tested for != NULL.
+ */
u32 id;
+ /* Ordering of fields matters. See states_equal() */
+ /* For scalar types (SCALAR_VALUE), this represents our knowledge of
+ * the actual value.
+ * For pointer types, this represents the variable part of the offset
+ * from the pointed-to object, and is shared with all bpf_reg_states
+ * with the same id as us.
+ */
+ struct tnum var_off;
/* Used to determine if any memory access using this register will
- * result in a bad access. These two fields must be last.
- * See states_equal()
+ * result in a bad access.
+ * These refer to the same value as var_off, not necessarily the actual
+ * contents of the register.
*/
- s64 min_value;
- u64 max_value;
- u32 min_align;
- u32 aux_off;
- u32 aux_off_align;
+ s64 smin_value; /* minimum possible (s64)value */
+ s64 smax_value; /* maximum possible (s64)value */
+ u64 umin_value; /* minimum possible (u64)value */
+ u64 umax_value; /* maximum possible (u64)value */
+ /* This field must be last, for states_equal() reasons. */
+ enum bpf_reg_liveness live;
};
enum bpf_stack_slot_type {
@@ -60,6 +95,7 @@ struct bpf_verifier_state {
struct bpf_reg_state regs[MAX_BPF_REG];
u8 stack_slot_type[MAX_BPF_STACK];
struct bpf_reg_state spilled_regs[MAX_BPF_STACK / BPF_REG_SIZE];
+ struct bpf_verifier_state *parent;
};
/* linked list of verifier states used to prune search */
@@ -102,7 +138,6 @@ struct bpf_verifier_env {
u32 id_gen; /* used to generate unique reg IDs */
bool allow_ptr_leaks;
bool seen_direct_write;
- bool varlen_map_value_access;
struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
};
diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h
index e34dde2da0ef..637a20cfb237 100644
--- a/include/linux/bsg-lib.h
+++ b/include/linux/bsg-lib.h
@@ -24,6 +24,7 @@
#define _BLK_BSG_
#include <linux/blkdev.h>
+#include <scsi/scsi_request.h>
struct request;
struct device;
@@ -37,6 +38,7 @@ struct bsg_buffer {
};
struct bsg_job {
+ struct scsi_request sreq;
struct device *dev;
struct request *req;
diff --git a/include/linux/ccp.h b/include/linux/ccp.h
index 3285c944194a..7e9c991c95e0 100644
--- a/include/linux/ccp.h
+++ b/include/linux/ccp.h
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
@@ -20,12 +20,10 @@
#include <crypto/aes.h>
#include <crypto/sha.h>
-
struct ccp_device;
struct ccp_cmd;
-#if defined(CONFIG_CRYPTO_DEV_CCP_DD) || \
- defined(CONFIG_CRYPTO_DEV_CCP_DD_MODULE)
+#if defined(CONFIG_CRYPTO_DEV_SP_CCP)
/**
* ccp_present - check if a CCP device is present
@@ -71,7 +69,7 @@ unsigned int ccp_version(void);
*/
int ccp_enqueue_cmd(struct ccp_cmd *cmd);
-#else /* CONFIG_CRYPTO_DEV_CCP_DD is not enabled */
+#else /* CONFIG_CRYPTO_DEV_CCP_SP_DEV is not enabled */
static inline int ccp_present(void)
{
@@ -88,7 +86,7 @@ static inline int ccp_enqueue_cmd(struct ccp_cmd *cmd)
return -ENODEV;
}
-#endif /* CONFIG_CRYPTO_DEV_CCP_DD */
+#endif /* CONFIG_CRYPTO_DEV_SP_CCP */
/***** AES engine *****/
@@ -231,6 +229,7 @@ enum ccp_xts_aes_unit_size {
* AES operation the new IV overwrites the old IV.
*/
struct ccp_xts_aes_engine {
+ enum ccp_aes_type type;
enum ccp_aes_action action;
enum ccp_xts_aes_unit_size unit_size;
diff --git a/include/linux/cdev.h b/include/linux/cdev.h
index 408bc09ce497..cb28eb21e3ca 100644
--- a/include/linux/cdev.h
+++ b/include/linux/cdev.h
@@ -17,7 +17,7 @@ struct cdev {
struct list_head list;
dev_t dev;
unsigned int count;
-};
+} __randomize_layout;
void cdev_init(struct cdev *, const struct file_operations *);
diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h
index f0f6c537b64c..040dd105c3e7 100644
--- a/include/linux/ceph/ceph_features.h
+++ b/include/linux/ceph/ceph_features.h
@@ -10,14 +10,14 @@
#define CEPH_FEATURE_INCARNATION_2 (1ull<<57) // CEPH_FEATURE_SERVER_JEWEL
#define DEFINE_CEPH_FEATURE(bit, incarnation, name) \
- const static uint64_t CEPH_FEATURE_##name = (1ULL<<bit); \
- const static uint64_t CEPH_FEATUREMASK_##name = \
+ static const uint64_t CEPH_FEATURE_##name = (1ULL<<bit); \
+ static const uint64_t CEPH_FEATUREMASK_##name = \
(1ULL<<bit | CEPH_FEATURE_INCARNATION_##incarnation);
/* this bit is ignored but still advertised by release *when* */
#define DEFINE_CEPH_FEATURE_DEPRECATED(bit, incarnation, name, when) \
- const static uint64_t DEPRECATED_CEPH_FEATURE_##name = (1ULL<<bit); \
- const static uint64_t DEPRECATED_CEPH_FEATUREMASK_##name = \
+ static const uint64_t DEPRECATED_CEPH_FEATURE_##name = (1ULL<<bit); \
+ static const uint64_t DEPRECATED_CEPH_FEATUREMASK_##name = \
(1ULL<<bit | CEPH_FEATURE_INCARNATION_##incarnation);
/*
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index c6d96a5f46fd..adf670ecaf94 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -148,6 +148,7 @@ struct ceph_osd_request_target {
int size;
int min_size;
bool sort_bitwise;
+ bool recovery_deletes;
unsigned int flags; /* CEPH_OSD_FLAG_* */
bool paused;
diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h
index a0996cb9faed..af3444a5bfdd 100644
--- a/include/linux/ceph/osdmap.h
+++ b/include/linux/ceph/osdmap.h
@@ -272,6 +272,8 @@ bool ceph_is_new_interval(const struct ceph_osds *old_acting,
u32 new_pg_num,
bool old_sort_bitwise,
bool new_sort_bitwise,
+ bool old_recovery_deletes,
+ bool new_recovery_deletes,
const struct ceph_pg *pgid);
bool ceph_osds_changed(const struct ceph_osds *old_acting,
const struct ceph_osds *new_acting,
diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h
index 385db08bb8b2..b8281feda9c7 100644
--- a/include/linux/ceph/rados.h
+++ b/include/linux/ceph/rados.h
@@ -158,6 +158,10 @@ extern const char *ceph_osd_state_name(int s);
#define CEPH_OSDMAP_NOTIERAGENT (1<<13) /* disable tiering agent */
#define CEPH_OSDMAP_NOREBALANCE (1<<14) /* block osd backfill unless pg is degraded */
#define CEPH_OSDMAP_SORTBITWISE (1<<15) /* use bitwise hobject_t sort */
+#define CEPH_OSDMAP_REQUIRE_JEWEL (1<<16) /* require jewel for booting osds */
+#define CEPH_OSDMAP_REQUIRE_KRAKEN (1<<17) /* require kraken for booting osds */
+#define CEPH_OSDMAP_REQUIRE_LUMINOUS (1<<18) /* require l for booting osds */
+#define CEPH_OSDMAP_RECOVERY_DELETES (1<<19) /* deletes performed during recovery instead of peering */
/*
* The error code to return when an OSD can't handle a write
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 5a6a109b4a50..3fc433303d7a 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -27,7 +27,7 @@
#endif
#ifndef __SC_DELOUSE
-#define __SC_DELOUSE(t,v) ((t)(unsigned long)(v))
+#define __SC_DELOUSE(t,v) ((__force t)(unsigned long)(v))
#endif
#define COMPAT_SYSCALL_DEFINE0(name) \
@@ -365,10 +365,10 @@ asmlinkage ssize_t compat_sys_pwritev(compat_ulong_t fd,
compat_ulong_t vlen, u32 pos_low, u32 pos_high);
asmlinkage ssize_t compat_sys_preadv2(compat_ulong_t fd,
const struct compat_iovec __user *vec,
- compat_ulong_t vlen, u32 pos_low, u32 pos_high, int flags);
+ compat_ulong_t vlen, u32 pos_low, u32 pos_high, rwf_t flags);
asmlinkage ssize_t compat_sys_pwritev2(compat_ulong_t fd,
const struct compat_iovec __user *vec,
- compat_ulong_t vlen, u32 pos_low, u32 pos_high, int flags);
+ compat_ulong_t vlen, u32 pos_low, u32 pos_high, rwf_t flags);
#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64
asmlinkage long compat_sys_preadv64(unsigned long fd,
@@ -382,6 +382,18 @@ asmlinkage long compat_sys_pwritev64(unsigned long fd,
unsigned long vlen, loff_t pos);
#endif
+#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64V2
+asmlinkage long compat_sys_readv64v2(unsigned long fd,
+ const struct compat_iovec __user *vec,
+ unsigned long vlen, loff_t pos, rwf_t flags);
+#endif
+
+#ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64V2
+asmlinkage long compat_sys_pwritev64v2(unsigned long fd,
+ const struct compat_iovec __user *vec,
+ unsigned long vlen, loff_t pos, rwf_t flags);
+#endif
+
asmlinkage long compat_sys_lseek(unsigned int, compat_off_t, unsigned int);
asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv,
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index cd4bbe8242bd..16d41de92ee3 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -166,6 +166,8 @@
#if GCC_VERSION >= 40100
# define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
+
+#define __nostackprotector __attribute__((__optimize__("no-stack-protector")))
#endif
#if GCC_VERSION >= 40300
@@ -201,17 +203,6 @@
#endif
#endif
-#ifdef CONFIG_STACK_VALIDATION
-#define annotate_unreachable() ({ \
- asm("%c0:\t\n" \
- ".pushsection .discard.unreachable\t\n" \
- ".long %c0b - .\t\n" \
- ".popsection\t\n" : : "i" (__LINE__)); \
-})
-#else
-#define annotate_unreachable()
-#endif
-
/*
* Mark a position in code as unreachable. This can be used to
* suppress control flow warnings after asm blocks that transfer
@@ -235,6 +226,7 @@
#endif /* GCC_VERSION >= 40500 */
#if GCC_VERSION >= 40600
+
/*
* When used with Link Time Optimization, gcc can optimize away C functions or
* variables which are referenced only from assembly code. __visible tells the
@@ -242,7 +234,17 @@
* this.
*/
#define __visible __attribute__((externally_visible))
-#endif
+
+/*
+ * RANDSTRUCT_PLUGIN wants to use an anonymous struct, but it is only
+ * possible since GCC 4.6. To provide as much build testing coverage
+ * as possible, this is used for all GCC 4.6+ builds, and not just on
+ * RANDSTRUCT_PLUGIN builds.
+ */
+#define randomized_struct_fields_start struct {
+#define randomized_struct_fields_end } __randomize_layout;
+
+#endif /* GCC_VERSION >= 40600 */
#if GCC_VERSION >= 40900 && !defined(__CHECKER__)
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 219f82f3ec1a..e95a2631e545 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -185,8 +185,34 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
#endif
/* Unreachable code */
+#ifdef CONFIG_STACK_VALIDATION
+#define annotate_reachable() ({ \
+ asm("%c0:\n\t" \
+ ".pushsection .discard.reachable\n\t" \
+ ".long %c0b - .\n\t" \
+ ".popsection\n\t" : : "i" (__LINE__)); \
+})
+#define annotate_unreachable() ({ \
+ asm("%c0:\n\t" \
+ ".pushsection .discard.unreachable\n\t" \
+ ".long %c0b - .\n\t" \
+ ".popsection\n\t" : : "i" (__LINE__)); \
+})
+#define ASM_UNREACHABLE \
+ "999:\n\t" \
+ ".pushsection .discard.unreachable\n\t" \
+ ".long 999b - .\n\t" \
+ ".popsection\n\t"
+#else
+#define annotate_reachable()
+#define annotate_unreachable()
+#endif
+
+#ifndef ASM_UNREACHABLE
+# define ASM_UNREACHABLE
+#endif
#ifndef unreachable
-# define unreachable() do { } while (1)
+# define unreachable() do { annotate_reachable(); do { } while (1); } while (0)
#endif
/*
@@ -452,6 +478,11 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
# define __no_randomize_layout
#endif
+#ifndef randomized_struct_fields_start
+# define randomized_struct_fields_start
+# define randomized_struct_fields_end
+#endif
+
/*
* Tell gcc if a function is cold. The compiler will assume any path
* directly leading to the call is unlikely.
@@ -470,6 +501,10 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
#define __visible
#endif
+#ifndef __nostackprotector
+# define __nostackprotector
+#endif
+
/*
* Assume alignment of return value.
*/
@@ -512,7 +547,8 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
# define __compiletime_error_fallback(condition) do { } while (0)
#endif
-#define __compiletime_assert(condition, msg, prefix, suffix) \
+#ifdef __OPTIMIZE__
+# define __compiletime_assert(condition, msg, prefix, suffix) \
do { \
bool __cond = !(condition); \
extern void prefix ## suffix(void) __compiletime_error(msg); \
@@ -520,6 +556,9 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
prefix ## suffix(); \
__compiletime_error_fallback(__cond); \
} while (0)
+#else
+# define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
+#endif
#define _compiletime_assert(condition, msg, prefix, suffix) \
__compiletime_assert(condition, msg, prefix, suffix)
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 5d5aaae3af43..cae5400022a3 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -9,6 +9,9 @@
*/
#include <linux/wait.h>
+#ifdef CONFIG_LOCKDEP_COMPLETIONS
+#include <linux/lockdep.h>
+#endif
/*
* struct completion - structure used to maintain state for a "completion"
@@ -25,13 +28,53 @@
struct completion {
unsigned int done;
wait_queue_head_t wait;
+#ifdef CONFIG_LOCKDEP_COMPLETIONS
+ struct lockdep_map_cross map;
+#endif
};
+#ifdef CONFIG_LOCKDEP_COMPLETIONS
+static inline void complete_acquire(struct completion *x)
+{
+ lock_acquire_exclusive((struct lockdep_map *)&x->map, 0, 0, NULL, _RET_IP_);
+}
+
+static inline void complete_release(struct completion *x)
+{
+ lock_release((struct lockdep_map *)&x->map, 0, _RET_IP_);
+}
+
+static inline void complete_release_commit(struct completion *x)
+{
+ lock_commit_crosslock((struct lockdep_map *)&x->map);
+}
+
+#define init_completion(x) \
+do { \
+ static struct lock_class_key __key; \
+ lockdep_init_map_crosslock((struct lockdep_map *)&(x)->map, \
+ "(complete)" #x, \
+ &__key, 0); \
+ __init_completion(x); \
+} while (0)
+#else
+#define init_completion(x) __init_completion(x)
+static inline void complete_acquire(struct completion *x) {}
+static inline void complete_release(struct completion *x) {}
+static inline void complete_release_commit(struct completion *x) {}
+#endif
+
+#ifdef CONFIG_LOCKDEP_COMPLETIONS
+#define COMPLETION_INITIALIZER(work) \
+ { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait), \
+ STATIC_CROSS_LOCKDEP_MAP_INIT("(complete)" #work, &(work)) }
+#else
#define COMPLETION_INITIALIZER(work) \
{ 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
+#endif
#define COMPLETION_INITIALIZER_ONSTACK(work) \
- ({ init_completion(&work); work; })
+ (*({ init_completion(&work); &work; }))
/**
* DECLARE_COMPLETION - declare and initialize a completion structure
@@ -70,7 +113,7 @@ struct completion {
* This inline function will initialize a dynamically created completion
* structure.
*/
-static inline void init_completion(struct completion *x)
+static inline void __init_completion(struct completion *x)
{
x->done = 0;
init_waitqueue_head(&x->wait);
diff --git a/include/linux/coresight-pmu.h b/include/linux/coresight-pmu.h
index 7d410260661b..edfeaba95429 100644
--- a/include/linux/coresight-pmu.h
+++ b/include/linux/coresight-pmu.h
@@ -24,6 +24,12 @@
/* ETMv3.5/PTM's ETMCR config bit */
#define ETM_OPT_CYCACC 12
#define ETM_OPT_TS 28
+#define ETM_OPT_RETSTK 29
+
+/* ETMv4 CONFIGR programming bits for the ETM OPTs */
+#define ETM4_CFG_BIT_CYCACC 4
+#define ETM4_CFG_BIT_TS 11
+#define ETM4_CFG_BIT_RETSTK 12
static inline int coresight_get_trace_id(int cpu)
{
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index f10a9b3761cd..537ff842ff73 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -127,6 +127,15 @@ struct cpufreq_policy {
*/
unsigned int transition_delay_us;
+ /*
+ * Remote DVFS flag (Not added to the driver structure as we don't want
+ * to access another structure from scheduler hotpath).
+ *
+ * Should be set if CPUs can do DVFS on behalf of other CPUs from
+ * different cpufreq policies.
+ */
+ bool dvfs_possible_from_any_cpu;
+
/* Cached frequency lookup from cpufreq_driver_resolve_freq. */
unsigned int cached_target_freq;
int cached_resolved_idx;
@@ -370,6 +379,12 @@ struct cpufreq_driver {
*/
#define CPUFREQ_NEED_INITIAL_FREQ_CHECK (1 << 5)
+/*
+ * Set by drivers to disallow use of governors with "dynamic_switching" flag
+ * set.
+ */
+#define CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING (1 << 6)
+
int cpufreq_register_driver(struct cpufreq_driver *driver_data);
int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
@@ -487,14 +502,8 @@ static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
* polling frequency is 1000 times the transition latency of the processor. The
* ondemand governor will work on any processor with transition latency <= 10ms,
* using appropriate sampling rate.
- *
- * For CPUs with transition latency > 10ms (mostly drivers with CPUFREQ_ETERNAL)
- * the ondemand governor will not work. All times here are in us (microseconds).
*/
-#define MIN_SAMPLING_RATE_RATIO (2)
#define LATENCY_MULTIPLIER (1000)
-#define MIN_LATENCY_MULTIPLIER (20)
-#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
struct cpufreq_governor {
char name[CPUFREQ_NAME_LEN];
@@ -507,9 +516,8 @@ struct cpufreq_governor {
char *buf);
int (*store_setspeed) (struct cpufreq_policy *policy,
unsigned int freq);
- unsigned int max_transition_latency; /* HW must be able to switch to
- next freq faster than this value in nano secs or we
- will fallback to performance governor */
+ /* For governors which change frequency dynamically by themselves */
+ bool dynamic_switching;
struct list_head governor_list;
struct module *owner;
};
@@ -525,6 +533,7 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int relation);
unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
unsigned int target_freq);
+unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy);
int cpufreq_register_governor(struct cpufreq_governor *governor);
void cpufreq_unregister_governor(struct cpufreq_governor *governor);
@@ -562,6 +571,17 @@ struct governor_attr {
size_t count);
};
+static inline bool cpufreq_can_do_remote_dvfs(struct cpufreq_policy *policy)
+{
+ /*
+ * Allow remote callbacks if:
+ * - dvfs_possible_from_any_cpu flag is set
+ * - the local and remote CPUs share cpufreq policy
+ */
+ return policy->dvfs_possible_from_any_cpu ||
+ cpumask_test_cpu(smp_processor_id(), policy->cpus);
+}
+
/*********************************************************************
* FREQUENCY TABLE HELPERS *
*********************************************************************/
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index b56573bf440d..82b30e638430 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -39,8 +39,6 @@ enum cpuhp_state {
CPUHP_PCI_XGENE_DEAD,
CPUHP_IOMMU_INTEL_DEAD,
CPUHP_LUSTRE_CFS_DEAD,
- CPUHP_SCSI_BNX2FC_DEAD,
- CPUHP_SCSI_BNX2I_DEAD,
CPUHP_WORKQUEUE_PREP,
CPUHP_POWER_NUMA_PREPARE,
CPUHP_HRTIMERS_PREPARE,
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index fc1e5d7fc1c7..8f7788d23b57 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -52,17 +52,18 @@ struct cpuidle_state {
int (*enter_dead) (struct cpuidle_device *dev, int index);
/*
- * CPUs execute ->enter_freeze with the local tick or entire timekeeping
+ * CPUs execute ->enter_s2idle with the local tick or entire timekeeping
* suspended, so it must not re-enable interrupts at any point (even
* temporarily) or attempt to change states of clock event devices.
*/
- void (*enter_freeze) (struct cpuidle_device *dev,
+ void (*enter_s2idle) (struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index);
};
/* Idle State Flags */
#define CPUIDLE_FLAG_NONE (0x00)
+#define CPUIDLE_FLAG_POLLING (0x01) /* polling state */
#define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */
#define CPUIDLE_FLAG_TIMER_STOP (0x04) /* timer is stopped on this state */
@@ -197,14 +198,14 @@ static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; }
#ifdef CONFIG_CPU_IDLE
extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev);
-extern int cpuidle_enter_freeze(struct cpuidle_driver *drv,
+extern int cpuidle_enter_s2idle(struct cpuidle_driver *drv,
struct cpuidle_device *dev);
extern void cpuidle_use_deepest_state(bool enable);
#else
static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{return -ENODEV; }
-static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv,
+static inline int cpuidle_enter_s2idle(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{return -ENODEV; }
static inline void cpuidle_use_deepest_state(bool enable)
@@ -224,6 +225,12 @@ static inline void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev,
}
#endif
+#ifdef CONFIG_ARCH_HAS_CPU_RELAX
+void cpuidle_poll_state_init(struct cpuidle_driver *drv);
+#else
+static inline void cpuidle_poll_state_init(struct cpuidle_driver *drv) {}
+#endif
+
/******************************
* CPUIDLE GOVERNOR INTERFACE *
******************************/
@@ -250,12 +257,6 @@ static inline int cpuidle_register_governor(struct cpuidle_governor *gov)
{return 0;}
#endif
-#ifdef CONFIG_ARCH_HAS_CPU_RELAX
-#define CPUIDLE_DRIVER_STATE_START 1
-#else
-#define CPUIDLE_DRIVER_STATE_START 0
-#endif
-
#define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \
({ \
int __ret; \
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 119a3f9604b0..e74655d941b7 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -18,26 +18,35 @@
#ifdef CONFIG_CPUSETS
+/*
+ * Static branch rewrites can happen in an arbitrary order for a given
+ * key. In code paths where we need to loop with read_mems_allowed_begin() and
+ * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need
+ * to ensure that begin() always gets rewritten before retry() in the
+ * disabled -> enabled transition. If not, then if local irqs are disabled
+ * around the loop, we can deadlock since retry() would always be
+ * comparing the latest value of the mems_allowed seqcount against 0 as
+ * begin() still would see cpusets_enabled() as false. The enabled -> disabled
+ * transition should happen in reverse order for the same reasons (want to stop
+ * looking at real value of mems_allowed.sequence in retry() first).
+ */
+extern struct static_key_false cpusets_pre_enable_key;
extern struct static_key_false cpusets_enabled_key;
static inline bool cpusets_enabled(void)
{
return static_branch_unlikely(&cpusets_enabled_key);
}
-static inline int nr_cpusets(void)
-{
- /* jump label reference count + the top-level cpuset */
- return static_key_count(&cpusets_enabled_key.key) + 1;
-}
-
static inline void cpuset_inc(void)
{
+ static_branch_inc(&cpusets_pre_enable_key);
static_branch_inc(&cpusets_enabled_key);
}
static inline void cpuset_dec(void)
{
static_branch_dec(&cpusets_enabled_key);
+ static_branch_dec(&cpusets_pre_enable_key);
}
extern int cpuset_init(void);
@@ -115,7 +124,7 @@ extern void cpuset_print_current_mems_allowed(void);
*/
static inline unsigned int read_mems_allowed_begin(void)
{
- if (!cpusets_enabled())
+ if (!static_branch_unlikely(&cpusets_pre_enable_key))
return 0;
return read_seqcount_begin(&current->mems_allowed_seq);
@@ -129,7 +138,7 @@ static inline unsigned int read_mems_allowed_begin(void)
*/
static inline bool read_mems_allowed_retry(unsigned int seq)
{
- if (!cpusets_enabled())
+ if (!static_branch_unlikely(&cpusets_enabled_key))
return false;
return read_seqcount_retry(&current->mems_allowed_seq, seq);
diff --git a/include/linux/cred.h b/include/linux/cred.h
index c728d515e5e2..099058e1178b 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -31,7 +31,7 @@ struct group_info {
atomic_t usage;
int ngroups;
kgid_t gid[0];
-};
+} __randomize_layout;
/**
* get_group_info - Get a reference to a group info structure
@@ -145,7 +145,7 @@ struct cred {
struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
struct group_info *group_info; /* supplementary groups for euid/fsgid */
struct rcu_head rcu; /* RCU deletion hook */
-};
+} __randomize_layout;
extern void __put_cred(struct cred *);
extern void exit_creds(struct task_struct *);
diff --git a/include/linux/crush/crush.h b/include/linux/crush/crush.h
index 92e165d417a6..07eed95e10c7 100644
--- a/include/linux/crush/crush.h
+++ b/include/linux/crush/crush.h
@@ -193,7 +193,7 @@ struct crush_choose_arg {
struct crush_choose_arg_map {
#ifdef __KERNEL__
struct rb_node node;
- u64 choose_args_index;
+ s64 choose_args_index;
#endif
struct crush_choose_arg *args; /*!< replacement for each bucket
in the crushmap */
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 794811875732..eb0bff6f1eab 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -87,34 +87,7 @@ size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
void dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
size_t size);
void dax_write_cache(struct dax_device *dax_dev, bool wc);
-
-/*
- * We use lowest available bit in exceptional entry for locking, one bit for
- * the entry size (PMD) and two more to tell us if the entry is a huge zero
- * page (HZP) or an empty entry that is just used for locking. In total four
- * special bits.
- *
- * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the HZP and
- * EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
- * block allocation.
- */
-#define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 4)
-#define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
-#define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1))
-#define RADIX_DAX_HZP (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
-#define RADIX_DAX_EMPTY (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3))
-
-static inline unsigned long dax_radix_sector(void *entry)
-{
- return (unsigned long)entry >> RADIX_DAX_SHIFT;
-}
-
-static inline void *dax_radix_locked_entry(sector_t sector, unsigned long flags)
-{
- return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags |
- ((unsigned long)sector << RADIX_DAX_SHIFT) |
- RADIX_DAX_ENTRY_LOCK);
-}
+bool dax_write_cache_enabled(struct dax_device *dax_dev);
ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops);
@@ -123,8 +96,6 @@ int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
pgoff_t index);
-void dax_wake_mapping_entry_waiter(struct address_space *mapping,
- pgoff_t index, void *entry, bool wake_all);
#ifdef CONFIG_FS_DAX
int __dax_zero_page_range(struct block_device *bdev,
@@ -139,21 +110,6 @@ static inline int __dax_zero_page_range(struct block_device *bdev,
}
#endif
-#ifdef CONFIG_FS_DAX_PMD
-static inline unsigned int dax_radix_order(void *entry)
-{
- if ((unsigned long)entry & RADIX_DAX_PMD)
- return PMD_SHIFT - PAGE_SHIFT;
- return 0;
-}
-#else
-static inline unsigned int dax_radix_order(void *entry)
-{
- return 0;
-}
-#endif
-int dax_pfn_mkwrite(struct vm_fault *vmf);
-
static inline bool dax_mapping(struct address_space *mapping)
{
return mapping->host && IS_DAX(mapping->host);
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 3f3ff4ccdc3f..aae1cdb76851 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -118,7 +118,7 @@ struct dentry {
struct hlist_bl_node d_in_lookup_hash; /* only for in-lookup ones */
struct rcu_head d_rcu;
} d_u;
-};
+} __randomize_layout;
/*
* dentry->d_lock spinlock nesting subclasses:
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index aa86e6d8c1aa..b93efc8feecd 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -196,6 +196,14 @@ static inline struct dentry *debugfs_create_file(const char *name, umode_t mode,
return ERR_PTR(-ENODEV);
}
+static inline struct dentry *debugfs_create_file_unsafe(const char *name,
+ umode_t mode, struct dentry *parent,
+ void *data,
+ const struct file_operations *fops)
+{
+ return ERR_PTR(-ENODEV);
+}
+
static inline struct dentry *debugfs_create_file_size(const char *name, umode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops,
@@ -289,6 +297,14 @@ static inline struct dentry *debugfs_create_u64(const char *name, umode_t mode,
return ERR_PTR(-ENODEV);
}
+static inline struct dentry *debugfs_create_ulong(const char *name,
+ umode_t mode,
+ struct dentry *parent,
+ unsigned long *value)
+{
+ return ERR_PTR(-ENODEV);
+}
+
static inline struct dentry *debugfs_create_x8(const char *name, umode_t mode,
struct dentry *parent,
u8 *value)
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index 6c220e4ebb6b..597294e0cc40 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -214,19 +214,6 @@ extern void devm_devfreq_unregister_notifier(struct device *dev,
extern struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev,
int index);
-/**
- * devfreq_update_stats() - update the last_status pointer in struct devfreq
- * @df: the devfreq instance whose status needs updating
- *
- * Governors are recommended to use this function along with last_status,
- * which allows other entities to reuse the last_status without affecting
- * the values fetched later by governors.
- */
-static inline int devfreq_update_stats(struct devfreq *df)
-{
- return df->profile->get_dev_status(df->dev.parent, &df->last_status);
-}
-
#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
/**
* struct devfreq_simple_ondemand_data - void *data fed to struct devfreq
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 1473455d0341..4f2b3b2076c4 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -549,46 +549,29 @@ void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
*---------------------------------------------------------------*/
#define DM_NAME "device-mapper"
-#ifdef CONFIG_PRINTK
-extern struct ratelimit_state dm_ratelimit_state;
-
-#define dm_ratelimit() __ratelimit(&dm_ratelimit_state)
-#else
-#define dm_ratelimit() 0
-#endif
+#define DM_RATELIMIT(pr_func, fmt, ...) \
+do { \
+ static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, \
+ DEFAULT_RATELIMIT_BURST); \
+ \
+ if (__ratelimit(&rs)) \
+ pr_func(DM_FMT(fmt), ##__VA_ARGS__); \
+} while (0)
#define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n"
#define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__)
#define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__)
-#define DMERR_LIMIT(fmt, ...) \
-do { \
- if (dm_ratelimit()) \
- DMERR(fmt, ##__VA_ARGS__); \
-} while (0)
-
+#define DMERR_LIMIT(fmt, ...) DM_RATELIMIT(pr_err, fmt, ##__VA_ARGS__)
#define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__)
-#define DMWARN_LIMIT(fmt, ...) \
-do { \
- if (dm_ratelimit()) \
- DMWARN(fmt, ##__VA_ARGS__); \
-} while (0)
-
+#define DMWARN_LIMIT(fmt, ...) DM_RATELIMIT(pr_warn, fmt, ##__VA_ARGS__)
#define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__)
-#define DMINFO_LIMIT(fmt, ...) \
-do { \
- if (dm_ratelimit()) \
- DMINFO(fmt, ##__VA_ARGS__); \
-} while (0)
+#define DMINFO_LIMIT(fmt, ...) DM_RATELIMIT(pr_info, fmt, ##__VA_ARGS__)
#ifdef CONFIG_DM_DEBUG
#define DMDEBUG(fmt, ...) printk(KERN_DEBUG DM_FMT(fmt), ##__VA_ARGS__)
-#define DMDEBUG_LIMIT(fmt, ...) \
-do { \
- if (dm_ratelimit()) \
- DMDEBUG(fmt, ##__VA_ARGS__); \
-} while (0)
+#define DMDEBUG_LIMIT(fmt, ...) DM_RATELIMIT(pr_debug, fmt, ##__VA_ARGS__)
#else
#define DMDEBUG(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
#define DMDEBUG_LIMIT(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
diff --git a/include/linux/device.h b/include/linux/device.h
index 723cd54b94da..c6f27207dbe8 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -375,7 +375,7 @@ int subsys_virtual_register(struct bus_type *subsys,
* @suspend: Used to put the device to sleep mode, usually to a low power
* state.
* @resume: Used to bring the device from the sleep mode.
- * @shutdown: Called at shut-down time to quiesce the device.
+ * @shutdown_pre: Called at shut-down time before driver shutdown.
* @ns_type: Callbacks so sysfs can detemine namespaces.
* @namespace: Namespace of the device belongs to this class.
* @pm: The default device power management operations of this class.
@@ -404,7 +404,7 @@ struct class {
int (*suspend)(struct device *dev, pm_message_t state);
int (*resume)(struct device *dev);
- int (*shutdown)(struct device *dev);
+ int (*shutdown_pre)(struct device *dev);
const struct kobj_ns_type_operations *ns_type;
const void *(*namespace)(struct device *dev);
@@ -843,10 +843,11 @@ struct dev_links_info {
* hibernation, system resume and during runtime PM transitions
* along with subsystem-level and driver-level callbacks.
* @pins: For device pin management.
- * See Documentation/pinctrl.txt for details.
+ * See Documentation/driver-api/pinctl.rst for details.
* @msi_list: Hosts MSI descriptors
* @msi_domain: The generic MSI domain this device is using.
* @numa_node: NUMA node this device is close to.
+ * @dma_ops: DMA mapping operations for this device.
* @dma_mask: Dma mask (if dma'ble device).
* @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
* hardware supports 64-bit addresses for consistent allocations
@@ -1200,6 +1201,36 @@ struct device *device_create_with_groups(struct class *cls,
const char *fmt, ...);
extern void device_destroy(struct class *cls, dev_t devt);
+extern int __must_check device_add_groups(struct device *dev,
+ const struct attribute_group **groups);
+extern void device_remove_groups(struct device *dev,
+ const struct attribute_group **groups);
+
+static inline int __must_check device_add_group(struct device *dev,
+ const struct attribute_group *grp)
+{
+ const struct attribute_group *groups[] = { grp, NULL };
+
+ return device_add_groups(dev, groups);
+}
+
+static inline void device_remove_group(struct device *dev,
+ const struct attribute_group *grp)
+{
+ const struct attribute_group *groups[] = { grp, NULL };
+
+ return device_remove_groups(dev, groups);
+}
+
+extern int __must_check devm_device_add_groups(struct device *dev,
+ const struct attribute_group **groups);
+extern void devm_device_remove_groups(struct device *dev,
+ const struct attribute_group **groups);
+extern int __must_check devm_device_add_group(struct device *dev,
+ const struct attribute_group *grp);
+extern void devm_device_remove_group(struct device *dev,
+ const struct attribute_group *grp);
+
/*
* Platform "fixup" functions - allow the platform to have their say
* about devices and actions that the general device layer doesn't
diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h
index 277ab9af9ac2..100cb4343763 100644
--- a/include/linux/devpts_fs.h
+++ b/include/linux/devpts_fs.h
@@ -19,6 +19,7 @@
struct pts_fs_info;
+struct vfsmount *devpts_mntget(struct file *, struct pts_fs_info *);
struct pts_fs_info *devpts_acquire(struct file *);
void devpts_release(struct pts_fs_info *);
@@ -32,6 +33,15 @@ void *devpts_get_priv(struct dentry *);
/* unlink */
void devpts_pty_kill(struct dentry *);
+/* in pty.c */
+int ptm_open_peer(struct file *master, struct tty_struct *tty, int flags);
+
+#else
+static inline int
+ptm_open_peer(struct file *master, struct tty_struct *tty, int flags)
+{
+ return -EIO;
+}
#endif
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index a5195a7d6f77..171895072435 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -55,6 +55,7 @@ struct dma_fence_cb;
* of the time.
*
* DMA_FENCE_FLAG_SIGNALED_BIT - fence is already signaled
+ * DMA_FENCE_FLAG_TIMESTAMP_BIT - timestamp recorded for fence signaling
* DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called
* DMA_FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the
* implementer of the fence for its own purposes. Can be used in different
@@ -84,6 +85,7 @@ struct dma_fence {
enum dma_fence_flag_bits {
DMA_FENCE_FLAG_SIGNALED_BIT,
+ DMA_FENCE_FLAG_TIMESTAMP_BIT,
DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
DMA_FENCE_FLAG_USER_BITS, /* must always be last member */
};
@@ -336,6 +338,19 @@ dma_fence_is_signaled(struct dma_fence *fence)
}
/**
+ * __dma_fence_is_later - return if f1 is chronologically later than f2
+ * @f1: [in] the first fence's seqno
+ * @f2: [in] the second fence's seqno from the same context
+ *
+ * Returns true if f1 is chronologically later than f2. Both fences must be
+ * from the same context, since a seqno is not common across contexts.
+ */
+static inline bool __dma_fence_is_later(u32 f1, u32 f2)
+{
+ return (int)(f1 - f2) > 0;
+}
+
+/**
* dma_fence_is_later - return if f1 is chronologically later than f2
* @f1: [in] the first fence from the same context
* @f2: [in] the second fence from the same context
@@ -349,7 +364,7 @@ static inline bool dma_fence_is_later(struct dma_fence *f1,
if (WARN_ON(f1->context != f2->context))
return false;
- return (int)(f1->seqno - f2->seqno) > 0;
+ return __dma_fence_is_later(f1->seqno, f2->seqno);
}
/**
@@ -416,8 +431,8 @@ int dma_fence_get_status(struct dma_fence *fence);
static inline void dma_fence_set_error(struct dma_fence *fence,
int error)
{
- BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
- BUG_ON(error >= 0 || error < -MAX_ERRNO);
+ WARN_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
+ WARN_ON(error >= 0 || error < -MAX_ERRNO);
fence->error = error;
}
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 843ab866e0f4..2189c79cde5d 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -10,6 +10,7 @@
#include <linux/scatterlist.h>
#include <linux/kmemcheck.h>
#include <linux/bug.h>
+#include <linux/mem_encrypt.h>
/**
* List of possible attributes associated with a DMA mapping. The semantics
@@ -157,16 +158,40 @@ static inline int is_device_dma_capable(struct device *dev)
* These three functions are only for dma allocator.
* Don't use them in device drivers.
*/
-int dma_alloc_from_coherent(struct device *dev, ssize_t size,
+int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
dma_addr_t *dma_handle, void **ret);
-int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
+int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
-int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
+int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, size_t size, int *ret);
+
+void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle);
+int dma_release_from_global_coherent(int order, void *vaddr);
+int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
+ size_t size, int *ret);
+
#else
-#define dma_alloc_from_coherent(dev, size, handle, ret) (0)
-#define dma_release_from_coherent(dev, order, vaddr) (0)
-#define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
+#define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
+#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
+#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
+
+static inline void *dma_alloc_from_global_coherent(ssize_t size,
+ dma_addr_t *dma_handle)
+{
+ return NULL;
+}
+
+static inline int dma_release_from_global_coherent(int order, void *vaddr)
+{
+ return 0;
+}
+
+static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
+ void *cpu_addr, size_t size,
+ int *ret)
+{
+ return 0;
+}
#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
#ifdef CONFIG_HAS_DMA
@@ -481,7 +506,7 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size,
BUG_ON(!ops);
- if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr))
+ if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
return cpu_addr;
if (!arch_dma_alloc_attrs(&dev, &flag))
@@ -503,7 +528,7 @@ static inline void dma_free_attrs(struct device *dev, size_t size,
BUG_ON(!ops);
WARN_ON(irqs_disabled());
- if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
+ if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
return;
if (!ops->free || !cpu_addr)
@@ -548,6 +573,12 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
return 0;
}
+static inline void dma_check_mask(struct device *dev, u64 mask)
+{
+ if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1)))
+ dev_warn(dev, "SME is active, device will require DMA bounce buffers\n");
+}
+
static inline int dma_supported(struct device *dev, u64 mask)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
@@ -564,6 +595,9 @@ static inline int dma_set_mask(struct device *dev, u64 mask)
{
if (!dev->dma_mask || !dma_supported(dev, mask))
return -EIO;
+
+ dma_check_mask(dev, mask);
+
*dev->dma_mask = mask;
return 0;
}
@@ -583,6 +617,9 @@ static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
{
if (!dma_supported(dev, mask))
return -EIO;
+
+ dma_check_mask(dev, mask);
+
dev->coherent_dma_mask = mask;
return 0;
}
diff --git a/include/linux/edac.h b/include/linux/edac.h
index 8ae0f45fafd6..cd75c173fd00 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -619,7 +619,6 @@ struct mem_ctl_info {
*/
struct device *pdev;
const char *mod_name;
- const char *mod_ver;
const char *ctl_name;
const char *dev_name;
void *pvt_info;
diff --git a/include/linux/eeprom_93xx46.h b/include/linux/eeprom_93xx46.h
index 885f587a3555..915898759280 100644
--- a/include/linux/eeprom_93xx46.h
+++ b/include/linux/eeprom_93xx46.h
@@ -2,8 +2,7 @@
* Module: eeprom_93xx46
* platform description for 93xx46 EEPROMs.
*/
-
-struct gpio_desc;
+#include <linux/gpio/consumer.h>
struct eeprom_93xx46_platform_data {
unsigned char flags;
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 8269bcb8ccf7..4102b85217d5 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -985,7 +985,7 @@ static inline void efi_esrt_init(void) { }
extern int efi_config_parse_tables(void *config_tables, int count, int sz,
efi_config_table_type_t *arch_tables);
extern u64 efi_get_iobase (void);
-extern u32 efi_mem_type (unsigned long phys_addr);
+extern int efi_mem_type(unsigned long phys_addr);
extern u64 efi_mem_attributes (unsigned long phys_addr);
extern u64 efi_mem_attribute (unsigned long phys_addr, unsigned long size);
extern int __init efi_uart_console_only (void);
@@ -1020,6 +1020,28 @@ extern int efi_memattr_init(void);
extern int efi_memattr_apply_permissions(struct mm_struct *mm,
efi_memattr_perm_setter fn);
+/*
+ * efi_early_memdesc_ptr - get the n-th EFI memmap descriptor
+ * @map: the start of efi memmap
+ * @desc_size: the size of space for each EFI memmap descriptor
+ * @n: the index of efi memmap descriptor
+ *
+ * EFI boot service provides the GetMemoryMap() function to get a copy of the
+ * current memory map which is an array of memory descriptors, each of
+ * which describes a contiguous block of memory. It also gets the size of the
+ * map, and the size of each descriptor, etc.
+ *
+ * Note that per section 6.2 of UEFI Spec 2.6 Errata A, the returned size of
+ * each descriptor might not be equal to sizeof(efi_memory_memdesc_t),
+ * since efi_memory_memdesc_t may be extended in the future. Thus the OS
+ * MUST use the returned size of the descriptor to find the start of each
+ * efi_memory_memdesc_t in the memory map array. This should only be used
+ * during bootup since for_each_efi_memory_desc_xxx() is available after the
+ * kernel initializes the EFI subsystem to set up struct efi_memory_map.
+ */
+#define efi_early_memdesc_ptr(map, desc_size, n) \
+ (efi_memory_desc_t *)((void *)(map) + ((n) * (desc_size)))
+
/* Iterate through an efi_memory_map */
#define for_each_efi_memory_desc_in_map(m, md) \
for ((md) = (m)->map; \
@@ -1091,6 +1113,8 @@ static inline bool efi_enabled(int feature)
return test_bit(feature, &efi.flags) != 0;
}
extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused);
+
+extern bool efi_is_table_address(unsigned long phys_addr);
#else
static inline bool efi_enabled(int feature)
{
@@ -1104,6 +1128,11 @@ efi_capsule_pending(int *reset_type)
{
return false;
}
+
+static inline bool efi_is_table_address(unsigned long phys_addr)
+{
+ return false;
+}
#endif
extern int efi_status_to_err(efi_status_t status);
diff --git a/include/linux/errseq.h b/include/linux/errseq.h
index 9e0d444ac88d..f746bd8fe4d0 100644
--- a/include/linux/errseq.h
+++ b/include/linux/errseq.h
@@ -1,18 +1,12 @@
+/*
+ * See Documentation/errseq.rst and lib/errseq.c
+ */
#ifndef _LINUX_ERRSEQ_H
#define _LINUX_ERRSEQ_H
-/* See lib/errseq.c for more info */
-
typedef u32 errseq_t;
-errseq_t __errseq_set(errseq_t *eseq, int err);
-static inline void errseq_set(errseq_t *eseq, int err)
-{
- /* Optimize for the common case of no error */
- if (unlikely(err))
- __errseq_set(eseq, err);
-}
-
+errseq_t errseq_set(errseq_t *eseq, int err);
errseq_t errseq_sample(errseq_t *eseq);
int errseq_check(errseq_t *eseq, errseq_t since);
int errseq_check_and_advance(errseq_t *eseq, errseq_t *since);
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 83cc9863444b..4587a4c36923 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -137,6 +137,17 @@ struct ethtool_link_ksettings {
__set_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name)
/**
+ * ethtool_link_ksettings_del_link_mode - clear bit in link_ksettings
+ * link mode mask
+ * @ptr : pointer to struct ethtool_link_ksettings
+ * @name : one of supported/advertising/lp_advertising
+ * @mode : one of the ETHTOOL_LINK_MODE_*_BIT
+ * (not atomic, no bound checking)
+ */
+#define ethtool_link_ksettings_del_link_mode(ptr, name, mode) \
+ __clear_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name)
+
+/**
* ethtool_link_ksettings_test_link_mode - test bit in ksettings link mode mask
* @ptr : pointer to struct ethtool_link_ksettings
* @name : one of supported/advertising/lp_advertising
@@ -374,5 +385,9 @@ struct ethtool_ops {
struct ethtool_link_ksettings *);
int (*set_link_ksettings)(struct net_device *,
const struct ethtool_link_ksettings *);
+ int (*get_fecparam)(struct net_device *,
+ struct ethtool_fecparam *);
+ int (*set_fecparam)(struct net_device *,
+ struct ethtool_fecparam *);
};
#endif /* _LINUX_ETHTOOL_H */
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index 7e206a9f88db..744d60ca80c3 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -1,5 +1,5 @@
/*
- * External connector (extcon) class driver
+ * External Connector (extcon) framework
*
* Copyright (C) 2015 Samsung Electronics
* Author: Chanwoo Choi <cw00.choi@samsung.com>
@@ -20,8 +20,7 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
-*/
+ */
#ifndef __LINUX_EXTCON_H__
#define __LINUX_EXTCON_H__
@@ -93,7 +92,7 @@
#define EXTCON_NUM 63
/*
- * Define the property of supported external connectors.
+ * Define the properties of supported external connectors.
*
* When adding the new extcon property, they *must* have
* the type/value/default information. Also, you *have to*
@@ -176,44 +175,42 @@ struct extcon_dev;
#if IS_ENABLED(CONFIG_EXTCON)
-/*
- * Following APIs are for notifiers or configurations.
- * Notifiers are the external port and connection devices.
- */
+/* Following APIs register/unregister the extcon device. */
extern int extcon_dev_register(struct extcon_dev *edev);
extern void extcon_dev_unregister(struct extcon_dev *edev);
extern int devm_extcon_dev_register(struct device *dev,
- struct extcon_dev *edev);
+ struct extcon_dev *edev);
extern void devm_extcon_dev_unregister(struct device *dev,
- struct extcon_dev *edev);
-extern struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name);
+ struct extcon_dev *edev);
-/*
- * Following APIs control the memory of extcon device.
- */
+/* Following APIs allocate/free the memory of the extcon device. */
extern struct extcon_dev *extcon_dev_allocate(const unsigned int *cable);
extern void extcon_dev_free(struct extcon_dev *edev);
extern struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
- const unsigned int *cable);
+ const unsigned int *cable);
extern void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev);
+/* Synchronize the state and property value for each external connector. */
+extern int extcon_sync(struct extcon_dev *edev, unsigned int id);
+
/*
- * get/set_state access each bit of the 32b encoded state value.
- * They are used to access the status of each cable based on the cable id.
+ * Following APIs get/set the connected state of each external connector.
+ * The 'id' argument indicates the defined external connector.
*/
extern int extcon_get_state(struct extcon_dev *edev, unsigned int id);
extern int extcon_set_state(struct extcon_dev *edev, unsigned int id,
- bool cable_state);
+ bool state);
extern int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
- bool cable_state);
-/*
- * Synchronize the state and property data for a specific external connector.
- */
-extern int extcon_sync(struct extcon_dev *edev, unsigned int id);
+ bool state);
/*
- * get/set_property access the property value of each external connector.
- * They are used to access the property of each cable based on the property id.
+ * Following APIs get/set the property of each external connector.
+ * The 'id' argument indicates the defined external connector
+ * and the 'prop' indicates the extcon property.
+ *
+ * And extcon_get/set_property_capability() set the capability of the property
+ * for each external connector. They are used to set the capability of the
+ * property of each external connector based on the id and property.
*/
extern int extcon_get_property(struct extcon_dev *edev, unsigned int id,
unsigned int prop,
@@ -224,28 +221,24 @@ extern int extcon_set_property(struct extcon_dev *edev, unsigned int id,
extern int extcon_set_property_sync(struct extcon_dev *edev, unsigned int id,
unsigned int prop,
union extcon_property_value prop_val);
-
-/*
- * get/set_property_capability set the capability of the property for each
- * external connector. They are used to set the capability of the property
- * of each external connector based on the id and property.
- */
extern int extcon_get_property_capability(struct extcon_dev *edev,
unsigned int id, unsigned int prop);
extern int extcon_set_property_capability(struct extcon_dev *edev,
unsigned int id, unsigned int prop);
/*
- * Following APIs are to monitor the status change of the external connectors.
+ * Following APIs register the notifier block in order to detect
+ * the change of both state and property value for each external connector.
+ *
* extcon_register_notifier(*edev, id, *nb) : Register a notifier block
* for specific external connector of the extcon.
* extcon_register_notifier_all(*edev, *nb) : Register a notifier block
* for all supported external connectors of the extcon.
*/
extern int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
- struct notifier_block *nb);
+ struct notifier_block *nb);
extern int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id,
- struct notifier_block *nb);
+ struct notifier_block *nb);
extern int devm_extcon_register_notifier(struct device *dev,
struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb);
@@ -265,16 +258,15 @@ extern void devm_extcon_unregister_notifier_all(struct device *dev,
struct notifier_block *nb);
/*
- * Following API get the extcon device from devicetree.
- * This function use phandle of devicetree to get extcon device directly.
+ * Following APIs get the extcon_dev from devicetree or by through extcon name.
*/
+extern struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name);
extern struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev,
int index);
-/* Following API to get information of extcon device */
+/* Following API get the name of extcon device. */
extern const char *extcon_get_edev_name(struct extcon_dev *edev);
-
#else /* CONFIG_EXTCON */
static inline int extcon_dev_register(struct extcon_dev *edev)
{
@@ -284,13 +276,13 @@ static inline int extcon_dev_register(struct extcon_dev *edev)
static inline void extcon_dev_unregister(struct extcon_dev *edev) { }
static inline int devm_extcon_dev_register(struct device *dev,
- struct extcon_dev *edev)
+ struct extcon_dev *edev)
{
return -EINVAL;
}
static inline void devm_extcon_dev_unregister(struct device *dev,
- struct extcon_dev *edev) { }
+ struct extcon_dev *edev) { }
static inline struct extcon_dev *extcon_dev_allocate(const unsigned int *cable)
{
@@ -300,7 +292,7 @@ static inline struct extcon_dev *extcon_dev_allocate(const unsigned int *cable)
static inline void extcon_dev_free(struct extcon_dev *edev) { }
static inline struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
- const unsigned int *cable)
+ const unsigned int *cable)
{
return ERR_PTR(-ENOSYS);
}
@@ -314,13 +306,13 @@ static inline int extcon_get_state(struct extcon_dev *edev, unsigned int id)
}
static inline int extcon_set_state(struct extcon_dev *edev, unsigned int id,
- bool cable_state)
+ bool state)
{
return 0;
}
static inline int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
- bool cable_state)
+ bool state)
{
return 0;
}
@@ -331,52 +323,45 @@ static inline int extcon_sync(struct extcon_dev *edev, unsigned int id)
}
static inline int extcon_get_property(struct extcon_dev *edev, unsigned int id,
- unsigned int prop,
- union extcon_property_value *prop_val)
+ unsigned int prop,
+ union extcon_property_value *prop_val)
{
return 0;
}
static inline int extcon_set_property(struct extcon_dev *edev, unsigned int id,
- unsigned int prop,
- union extcon_property_value prop_val)
+ unsigned int prop,
+ union extcon_property_value prop_val)
{
return 0;
}
static inline int extcon_set_property_sync(struct extcon_dev *edev,
- unsigned int id, unsigned int prop,
- union extcon_property_value prop_val)
+ unsigned int id, unsigned int prop,
+ union extcon_property_value prop_val)
{
return 0;
}
static inline int extcon_get_property_capability(struct extcon_dev *edev,
- unsigned int id, unsigned int prop)
+ unsigned int id, unsigned int prop)
{
return 0;
}
static inline int extcon_set_property_capability(struct extcon_dev *edev,
- unsigned int id, unsigned int prop)
+ unsigned int id, unsigned int prop)
{
return 0;
}
-static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
-{
- return NULL;
-}
-
static inline int extcon_register_notifier(struct extcon_dev *edev,
- unsigned int id,
- struct notifier_block *nb)
+ unsigned int id, struct notifier_block *nb)
{
return 0;
}
static inline int extcon_unregister_notifier(struct extcon_dev *edev,
- unsigned int id,
- struct notifier_block *nb)
+ unsigned int id, struct notifier_block *nb)
{
return 0;
}
@@ -392,8 +377,13 @@ static inline void devm_extcon_unregister_notifier(struct device *dev,
struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb) { }
+static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
+{
+ return ERR_PTR(-ENODEV);
+}
+
static inline struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev,
- int index)
+ int index)
{
return ERR_PTR(-ENODEV);
}
@@ -411,26 +401,14 @@ struct extcon_specific_cable_nb {
};
static inline int extcon_register_interest(struct extcon_specific_cable_nb *obj,
- const char *extcon_name, const char *cable_name,
- struct notifier_block *nb)
+ const char *extcon_name, const char *cable_name,
+ struct notifier_block *nb)
{
return -EINVAL;
}
-static inline int extcon_unregister_interest(struct extcon_specific_cable_nb
- *obj)
+static inline int extcon_unregister_interest(struct extcon_specific_cable_nb *obj)
{
return -EINVAL;
}
-
-static inline int extcon_get_cable_state_(struct extcon_dev *edev, unsigned int id)
-{
- return extcon_get_state(edev, id);
-}
-
-static inline int extcon_set_cable_state_(struct extcon_dev *edev, unsigned int id,
- bool cable_state)
-{
- return extcon_set_state_sync(edev, id, cable_state);
-}
#endif /* __LINUX_EXTCON_H__ */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index bfef1e5734f8..d29e58fde364 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -711,7 +711,24 @@ bool bpf_helper_changes_pkt_data(void *func);
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
const struct bpf_insn *patch, u32 len);
+
+/* The pair of xdp_do_redirect and xdp_do_flush_map MUST be called in the
+ * same cpu context. Further for best results no more than a single map
+ * for the do_redirect/do_flush pair should be used. This limitation is
+ * because we only track one map and force a flush when the map changes.
+ * This does not appear to be a real limitation for existing software.
+ */
+int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
+ struct bpf_prog *prog);
+int xdp_do_redirect(struct net_device *dev,
+ struct xdp_buff *xdp,
+ struct bpf_prog *prog);
+void xdp_do_flush_map(void);
+
void bpf_warn_invalid_xdp_action(u32 act);
+void bpf_warn_invalid_xdp_redirect(u32 ifindex);
+
+struct sock *do_sk_redirect_map(void);
#ifdef CONFIG_BPF_JIT
extern int bpf_jit_enable;
diff --git a/include/linux/fmc.h b/include/linux/fmc.h
index a5f0aa5c2a8d..3dc8a1b2db7b 100644
--- a/include/linux/fmc.h
+++ b/include/linux/fmc.h
@@ -132,6 +132,8 @@ struct fmc_operations {
uint32_t (*read32)(struct fmc_device *fmc, int offset);
void (*write32)(struct fmc_device *fmc, uint32_t value, int offset);
int (*validate)(struct fmc_device *fmc, struct fmc_driver *drv);
+ int (*reprogram_raw)(struct fmc_device *f, struct fmc_driver *d,
+ void *gw, unsigned long len);
int (*reprogram)(struct fmc_device *f, struct fmc_driver *d, char *gw);
int (*irq_request)(struct fmc_device *fmc, irq_handler_t h,
char *name, int flags);
@@ -144,6 +146,8 @@ struct fmc_operations {
};
/* Prefer this helper rather than calling of fmc->reprogram directly */
+int fmc_reprogram_raw(struct fmc_device *fmc, struct fmc_driver *d,
+ void *gw, unsigned long len, int sdb_entry);
extern int fmc_reprogram(struct fmc_device *f, struct fmc_driver *d, char *gw,
int sdb_entry);
@@ -180,6 +184,9 @@ struct fmc_device {
uint32_t device_id; /* Filled by the device */
char *mezzanine_name; /* Defaults to ``fmc'' */
void *mezzanine_data;
+
+ struct dentry *dbg_dir;
+ struct dentry *dbg_sdb_dump;
};
#define to_fmc_device(x) container_of((x), struct fmc_device, dev)
@@ -217,14 +224,23 @@ static inline void fmc_set_drvdata(struct fmc_device *fmc, void *data)
dev_set_drvdata(&fmc->dev, data);
}
-/* The 4 access points */
+struct fmc_gateware {
+ void *bitstream;
+ unsigned long len;
+};
+
+/* The 5 access points */
extern int fmc_driver_register(struct fmc_driver *drv);
extern void fmc_driver_unregister(struct fmc_driver *drv);
extern int fmc_device_register(struct fmc_device *tdev);
+extern int fmc_device_register_gw(struct fmc_device *tdev,
+ struct fmc_gateware *gw);
extern void fmc_device_unregister(struct fmc_device *tdev);
-/* Two more for device sets, all driven by the same FPGA */
+/* Three more for device sets, all driven by the same FPGA */
extern int fmc_device_register_n(struct fmc_device **devs, int n);
+extern int fmc_device_register_n_gw(struct fmc_device **devs, int n,
+ struct fmc_gateware *gw);
extern void fmc_device_unregister_n(struct fmc_device **devs, int n);
/* Internal cross-calls between files; not exported to other modules */
@@ -232,6 +248,23 @@ extern int fmc_match(struct device *dev, struct device_driver *drv);
extern int fmc_fill_id_info(struct fmc_device *fmc);
extern void fmc_free_id_info(struct fmc_device *fmc);
extern void fmc_dump_eeprom(const struct fmc_device *fmc);
-extern void fmc_dump_sdb(const struct fmc_device *fmc);
+
+/* helpers for FMC operations */
+extern int fmc_irq_request(struct fmc_device *fmc, irq_handler_t h,
+ char *name, int flags);
+extern void fmc_irq_free(struct fmc_device *fmc);
+extern void fmc_irq_ack(struct fmc_device *fmc);
+extern int fmc_validate(struct fmc_device *fmc, struct fmc_driver *drv);
+extern int fmc_gpio_config(struct fmc_device *fmc, struct fmc_gpio *gpio,
+ int ngpio);
+extern int fmc_read_ee(struct fmc_device *fmc, int pos, void *d, int l);
+extern int fmc_write_ee(struct fmc_device *fmc, int pos, const void *d, int l);
+
+/* helpers for FMC operations */
+extern int fmc_irq_request(struct fmc_device *fmc, irq_handler_t h,
+ char *name, int flags);
+extern void fmc_irq_free(struct fmc_device *fmc);
+extern void fmc_irq_ack(struct fmc_device *fmc);
+extern int fmc_validate(struct fmc_device *fmc, struct fmc_driver *drv);
#endif /* __LINUX_FMC_H__ */
diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h
index b4ac24c4411d..bfa14bc023fb 100644
--- a/include/linux/fpga/fpga-mgr.h
+++ b/include/linux/fpga/fpga-mgr.h
@@ -67,10 +67,14 @@ enum fpga_mgr_states {
* FPGA Manager flags
* FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported
* FPGA_MGR_EXTERNAL_CONFIG: FPGA has been configured prior to Linux booting
+ * FPGA_MGR_BITSTREAM_LSB_FIRST: SPI bitstream bit order is LSB first
+ * FPGA_MGR_COMPRESSED_BITSTREAM: FPGA bitstream is compressed
*/
#define FPGA_MGR_PARTIAL_RECONFIG BIT(0)
#define FPGA_MGR_EXTERNAL_CONFIG BIT(1)
#define FPGA_MGR_ENCRYPTED_BITSTREAM BIT(2)
+#define FPGA_MGR_BITSTREAM_LSB_FIRST BIT(3)
+#define FPGA_MGR_COMPRESSED_BITSTREAM BIT(4)
/**
* struct fpga_image_info - information specific to a FPGA image
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 7b5d6816542b..c57002ae6520 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -72,6 +72,8 @@ extern int leases_enable, lease_break_time;
extern int sysctl_protected_symlinks;
extern int sysctl_protected_hardlinks;
+typedef __kernel_rwf_t rwf_t;
+
struct buffer_head;
typedef int (get_block_t)(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create);
@@ -296,7 +298,7 @@ struct kiocb {
void *private;
int ki_flags;
enum rw_hint ki_hint;
-};
+} __randomize_layout;
static inline bool is_sync_kiocb(struct kiocb *kiocb)
{
@@ -404,7 +406,7 @@ struct address_space {
struct list_head private_list; /* ditto */
void *private_data; /* ditto */
errseq_t wb_err;
-} __attribute__((aligned(sizeof(long))));
+} __attribute__((aligned(sizeof(long)))) __randomize_layout;
/*
* On most architectures that alignment is already the case; but
* must be enforced here for CRIS, to let the least significant bit
@@ -447,7 +449,7 @@ struct block_device {
int bd_fsfreeze_count;
/* Mutex for freeze */
struct mutex bd_fsfreeze_mutex;
-};
+} __randomize_layout;
/*
* Radix-tree tags, for tagging dirty and writeback pages within the pagecache
@@ -666,7 +668,7 @@ struct inode {
#endif
void *i_private; /* fs or device private pointer */
-};
+} __randomize_layout;
static inline unsigned int i_blocksize(const struct inode *node)
{
@@ -883,7 +885,8 @@ struct file {
#endif /* #ifdef CONFIG_EPOLL */
struct address_space *f_mapping;
errseq_t f_wb_err;
-} __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
+} __randomize_layout
+ __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
struct file_handle {
__u32 handle_bytes;
@@ -906,9 +909,9 @@ static inline struct file *get_file(struct file *f)
/* Page cache limit. The filesystems should put that into their s_maxbytes
limits, otherwise bad things can happen in VM. */
#if BITS_PER_LONG==32
-#define MAX_LFS_FILESIZE (((loff_t)PAGE_SIZE << (BITS_PER_LONG-1))-1)
+#define MAX_LFS_FILESIZE ((loff_t)ULONG_MAX << PAGE_SHIFT)
#elif BITS_PER_LONG==64
-#define MAX_LFS_FILESIZE ((loff_t)0x7fffffffffffffffLL)
+#define MAX_LFS_FILESIZE ((loff_t)LLONG_MAX)
#endif
#define FL_POSIX 1
@@ -999,7 +1002,6 @@ struct file_lock {
unsigned char fl_type;
unsigned int fl_pid;
int fl_link_cpu; /* what cpu's list is this on? */
- struct pid *fl_nspid;
wait_queue_head_t fl_wait;
struct file *fl_file;
loff_t fl_start;
@@ -1020,7 +1022,7 @@ struct file_lock {
int state; /* state of grant or error if -ve */
} afs;
} fl_u;
-};
+} __randomize_layout;
struct file_lock_context {
spinlock_t flc_lock;
@@ -1267,8 +1269,6 @@ extern void f_delown(struct file *filp);
extern pid_t f_getown(struct file *filp);
extern int send_sigurg(struct fown_struct *fown);
-struct mm_struct;
-
/*
* Umount options
*/
@@ -1412,7 +1412,7 @@ struct super_block {
spinlock_t s_inode_wblist_lock;
struct list_head s_inodes_wb; /* writeback inodes */
-};
+} __randomize_layout;
/* Helper functions so that in most cases filesystems will
* not need to deal directly with kuid_t and kgid_t and can
@@ -1698,7 +1698,7 @@ struct file_operations {
u64);
ssize_t (*dedupe_file_range)(struct file *, u64, u64, struct file *,
u64);
-};
+} __randomize_layout;
struct inode_operations {
struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
@@ -1757,9 +1757,9 @@ extern ssize_t __vfs_write(struct file *, const char __user *, size_t, loff_t *)
extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
extern ssize_t vfs_readv(struct file *, const struct iovec __user *,
- unsigned long, loff_t *, int);
+ unsigned long, loff_t *, rwf_t);
extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
- unsigned long, loff_t *, int);
+ unsigned long, loff_t *, rwf_t);
extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *,
loff_t, size_t, unsigned int);
extern int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in,
@@ -2470,9 +2470,13 @@ static inline void bd_unlink_disk_holder(struct block_device *bdev,
#endif
/* fs/char_dev.c */
-#define CHRDEV_MAJOR_HASH_SIZE 255
+#define CHRDEV_MAJOR_MAX 512
/* Marks the bottom of the first segment of free char majors */
#define CHRDEV_MAJOR_DYN_END 234
+/* Marks the top and bottom of the second segment of free char majors */
+#define CHRDEV_MAJOR_DYN_EXT_START 511
+#define CHRDEV_MAJOR_DYN_EXT_END 384
+
extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *);
extern int register_chrdev_region(dev_t, unsigned, const char *);
extern int __register_chrdev(unsigned int major, unsigned int baseminor,
@@ -2499,14 +2503,14 @@ static inline void unregister_chrdev(unsigned int major, const char *name)
#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */
#ifdef CONFIG_BLOCK
-#define BLKDEV_MAJOR_HASH_SIZE 255
+#define BLKDEV_MAJOR_MAX 512
extern const char *__bdevname(dev_t, char *buffer);
extern const char *bdevname(struct block_device *bdev, char *buffer);
extern struct block_device *lookup_bdev(const char *);
extern void blkdev_show(struct seq_file *,off_t);
#else
-#define BLKDEV_MAJOR_HASH_SIZE 0
+#define BLKDEV_MAJOR_MAX 0
#endif
extern void init_special_inode(struct inode *, umode_t, dev_t);
@@ -2538,12 +2542,19 @@ extern int invalidate_inode_pages2_range(struct address_space *mapping,
extern int write_inode_now(struct inode *, int);
extern int filemap_fdatawrite(struct address_space *);
extern int filemap_flush(struct address_space *);
-extern int filemap_fdatawait(struct address_space *);
extern int filemap_fdatawait_keep_errors(struct address_space *mapping);
extern int filemap_fdatawait_range(struct address_space *, loff_t lstart,
loff_t lend);
+
+static inline int filemap_fdatawait(struct address_space *mapping)
+{
+ return filemap_fdatawait_range(mapping, 0, LLONG_MAX);
+}
+
extern bool filemap_range_has_page(struct address_space *, loff_t lstart,
loff_t lend);
+extern int __must_check file_fdatawait_range(struct file *file, loff_t lstart,
+ loff_t lend);
extern int filemap_write_and_wait(struct address_space *mapping);
extern int filemap_write_and_wait_range(struct address_space *mapping,
loff_t lstart, loff_t lend);
@@ -2552,12 +2563,19 @@ extern int __filemap_fdatawrite_range(struct address_space *mapping,
extern int filemap_fdatawrite_range(struct address_space *mapping,
loff_t start, loff_t end);
extern int filemap_check_errors(struct address_space *mapping);
-
extern void __filemap_set_wb_err(struct address_space *mapping, int err);
+
+extern int __must_check file_fdatawait_range(struct file *file, loff_t lstart,
+ loff_t lend);
extern int __must_check file_check_and_advance_wb_err(struct file *file);
extern int __must_check file_write_and_wait_range(struct file *file,
loff_t start, loff_t end);
+static inline int file_write_and_wait(struct file *file)
+{
+ return file_write_and_wait_range(file, 0, LLONG_MAX);
+}
+
/**
* filemap_set_wb_err - set a writeback error on an address_space
* @mapping: mapping in which to set writeback error
@@ -2571,8 +2589,6 @@ extern int __must_check file_write_and_wait_range(struct file *file,
* When a writeback error occurs, most filesystems will want to call
* filemap_set_wb_err to record the error in the mapping so that it will be
* automatically reported whenever fsync is called on the file.
- *
- * FIXME: mention FS_* flag here?
*/
static inline void filemap_set_wb_err(struct address_space *mapping, int err)
{
@@ -2830,6 +2846,7 @@ static inline void lockdep_annotate_inode_mutex_key(struct inode *inode) { };
#endif
extern void unlock_new_inode(struct inode *);
extern unsigned int get_next_ino(void);
+extern void evict_inodes(struct super_block *sb);
extern void __iget(struct inode * inode);
extern void iget_failed(struct inode *);
@@ -2873,9 +2890,9 @@ extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *);
extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t);
ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos,
- int flags);
+ rwf_t flags);
ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos,
- int flags);
+ rwf_t flags);
/* fs/block_dev.c */
extern ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to);
@@ -3142,7 +3159,7 @@ static inline int iocb_flags(struct file *file)
return res;
}
-static inline int kiocb_set_rw_flags(struct kiocb *ki, int flags)
+static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags)
{
if (unlikely(flags & ~RWF_SUPPORTED))
return -EOPNOTSUPP;
diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
index 0efc3e62843a..7a026240cbb1 100644
--- a/include/linux/fs_struct.h
+++ b/include/linux/fs_struct.h
@@ -12,7 +12,7 @@ struct fs_struct {
int umask;
int in_exec;
struct path root, pwd;
-};
+} __randomize_layout;
extern struct kmem_cache *fs_cachep;
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
index 115bb81912cc..f4ff47d4a893 100644
--- a/include/linux/fscache.h
+++ b/include/linux/fscache.h
@@ -143,15 +143,6 @@ struct fscache_cookie_def {
void (*mark_page_cached)(void *cookie_netfs_data,
struct address_space *mapping,
struct page *page);
-
- /* indicate the cookie is no longer cached
- * - this function is called when the backing store currently caching
- * a cookie is removed
- * - the netfs should use this to clean up any markers indicating
- * cached pages
- * - this is mandatory for any object that may have data
- */
- void (*now_uncached)(void *cookie_netfs_data);
};
/*
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 5857390ac35a..6383115e9d2c 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -145,8 +145,8 @@ enum {
#ifdef CONFIG_DYNAMIC_FTRACE
/* The hash used to know what functions callbacks trace */
struct ftrace_ops_hash {
- struct ftrace_hash *notrace_hash;
- struct ftrace_hash *filter_hash;
+ struct ftrace_hash __rcu *notrace_hash;
+ struct ftrace_hash __rcu *filter_hash;
struct mutex regex_lock;
};
@@ -168,7 +168,7 @@ static inline void ftrace_free_init_mem(void) { }
*/
struct ftrace_ops {
ftrace_func_t func;
- struct ftrace_ops *next;
+ struct ftrace_ops __rcu *next;
unsigned long flags;
void *private;
ftrace_func_t saved_func;
diff --git a/include/linux/futex.h b/include/linux/futex.h
index 7c5b694864cd..f36bfd26f998 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -54,7 +54,6 @@ union futex_key {
#ifdef CONFIG_FUTEX
extern void exit_robust_list(struct task_struct *curr);
-extern void exit_pi_state_list(struct task_struct *curr);
#ifdef CONFIG_HAVE_FUTEX_CMPXCHG
#define futex_cmpxchg_enabled 1
#else
@@ -64,8 +63,14 @@ extern int futex_cmpxchg_enabled;
static inline void exit_robust_list(struct task_struct *curr)
{
}
+#endif
+
+#ifdef CONFIG_FUTEX_PI
+extern void exit_pi_state_list(struct task_struct *curr);
+#else
static inline void exit_pi_state_list(struct task_struct *curr)
{
}
#endif
+
#endif
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index 50893a1646cf..0c35b6caf0f6 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -14,20 +14,9 @@
#include <linux/types.h>
-enum fwnode_type {
- FWNODE_INVALID = 0,
- FWNODE_OF,
- FWNODE_ACPI,
- FWNODE_ACPI_DATA,
- FWNODE_ACPI_STATIC,
- FWNODE_PDATA,
- FWNODE_IRQCHIP
-};
-
struct fwnode_operations;
struct fwnode_handle {
- enum fwnode_type type;
struct fwnode_handle *secondary;
const struct fwnode_operations *ops;
};
@@ -44,6 +33,20 @@ struct fwnode_endpoint {
const struct fwnode_handle *local_fwnode;
};
+#define NR_FWNODE_REFERENCE_ARGS 8
+
+/**
+ * struct fwnode_reference_args - Fwnode reference with additional arguments
+ * @fwnode:- A reference to the base fwnode
+ * @nargs: Number of elements in @args array
+ * @args: Integer arguments on the fwnode
+ */
+struct fwnode_reference_args {
+ struct fwnode_handle *fwnode;
+ unsigned int nargs;
+ unsigned int args[NR_FWNODE_REFERENCE_ARGS];
+};
+
/**
* struct fwnode_operations - Operations for fwnode interface
* @get: Get a reference to an fwnode.
@@ -57,6 +60,7 @@ struct fwnode_endpoint {
* @get_parent: Return the parent of an fwnode.
* @get_next_child_node: Return the next child node in an iteration.
* @get_named_child_node: Return a child node with a given name.
+ * @get_reference_args: Return a reference pointed to by a property, with args
* @graph_get_next_endpoint: Return an endpoint node in an iteration.
* @graph_get_remote_endpoint: Return the remote endpoint node of a local
* endpoint node.
@@ -66,30 +70,36 @@ struct fwnode_endpoint {
struct fwnode_operations {
void (*get)(struct fwnode_handle *fwnode);
void (*put)(struct fwnode_handle *fwnode);
- bool (*device_is_available)(struct fwnode_handle *fwnode);
- bool (*property_present)(struct fwnode_handle *fwnode,
+ bool (*device_is_available)(const struct fwnode_handle *fwnode);
+ bool (*property_present)(const struct fwnode_handle *fwnode,
const char *propname);
- int (*property_read_int_array)(struct fwnode_handle *fwnode,
+ int (*property_read_int_array)(const struct fwnode_handle *fwnode,
const char *propname,
unsigned int elem_size, void *val,
size_t nval);
- int (*property_read_string_array)(struct fwnode_handle *fwnode_handle,
- const char *propname,
- const char **val, size_t nval);
- struct fwnode_handle *(*get_parent)(struct fwnode_handle *fwnode);
+ int
+ (*property_read_string_array)(const struct fwnode_handle *fwnode_handle,
+ const char *propname, const char **val,
+ size_t nval);
+ struct fwnode_handle *(*get_parent)(const struct fwnode_handle *fwnode);
struct fwnode_handle *
- (*get_next_child_node)(struct fwnode_handle *fwnode,
+ (*get_next_child_node)(const struct fwnode_handle *fwnode,
struct fwnode_handle *child);
struct fwnode_handle *
- (*get_named_child_node)(struct fwnode_handle *fwnode, const char *name);
+ (*get_named_child_node)(const struct fwnode_handle *fwnode,
+ const char *name);
+ int (*get_reference_args)(const struct fwnode_handle *fwnode,
+ const char *prop, const char *nargs_prop,
+ unsigned int nargs, unsigned int index,
+ struct fwnode_reference_args *args);
struct fwnode_handle *
- (*graph_get_next_endpoint)(struct fwnode_handle *fwnode,
+ (*graph_get_next_endpoint)(const struct fwnode_handle *fwnode,
struct fwnode_handle *prev);
struct fwnode_handle *
- (*graph_get_remote_endpoint)(struct fwnode_handle *fwnode);
+ (*graph_get_remote_endpoint)(const struct fwnode_handle *fwnode);
struct fwnode_handle *
(*graph_get_port_parent)(struct fwnode_handle *fwnode);
- int (*graph_parse_endpoint)(struct fwnode_handle *fwnode,
+ int (*graph_parse_endpoint)(const struct fwnode_handle *fwnode,
struct fwnode_endpoint *endpoint);
};
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
index 29d4385903d4..6dfec4d638df 100644
--- a/include/linux/genalloc.h
+++ b/include/linux/genalloc.h
@@ -38,12 +38,13 @@ struct device_node;
struct gen_pool;
/**
- * Allocation callback function type definition
+ * typedef genpool_algo_t: Allocation callback function type definition
* @map: Pointer to bitmap
* @size: The bitmap size in bits
* @start: The bitnumber to start searching at
* @nr: The number of zeroed bits we're looking for
- * @data: optional additional data used by @genpool_algo_t
+ * @data: optional additional data used by the callback
+ * @pool: the pool being allocated from
*/
typedef unsigned long (*genpool_algo_t)(unsigned long *map,
unsigned long size,
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index af20369ec8e7..c97f8325e8bf 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -180,8 +180,27 @@ struct gpio_chip {
* If CONFIG_OF is enabled, then all GPIO controllers described in the
* device tree automatically may have an OF translation
*/
+
+ /**
+ * @of_node:
+ *
+ * Pointer to a device tree node representing this GPIO controller.
+ */
struct device_node *of_node;
- int of_gpio_n_cells;
+
+ /**
+ * @of_gpio_n_cells:
+ *
+ * Number of cells used to form the GPIO specifier.
+ */
+ unsigned int of_gpio_n_cells;
+
+ /**
+ * @of_xlate:
+ *
+ * Callback to translate a device tree GPIO specifier into a chip-
+ * relative GPIO number and flags.
+ */
int (*of_xlate)(struct gpio_chip *gc,
const struct of_phandle_args *gpiospec, u32 *flags);
#endif
@@ -327,11 +346,10 @@ int gpiochip_generic_config(struct gpio_chip *chip, unsigned offset,
/**
* struct gpio_pin_range - pin range controlled by a gpio chip
- * @head: list for maintaining set of pin ranges, used internally
+ * @node: list for maintaining set of pin ranges, used internally
* @pctldev: pinctrl device which handles corresponding pins
* @range: actual range of pins controlled by a gpio controller
*/
-
struct gpio_pin_range {
struct list_head node;
struct pinctrl_dev *pctldev;
diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h
index 6e76b16fcade..ba4ccfd900f9 100644
--- a/include/linux/gpio/machine.h
+++ b/include/linux/gpio/machine.h
@@ -60,11 +60,14 @@ struct gpiod_lookup_table {
#ifdef CONFIG_GPIOLIB
void gpiod_add_lookup_table(struct gpiod_lookup_table *table);
+void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n);
void gpiod_remove_lookup_table(struct gpiod_lookup_table *table);
#else
static inline
void gpiod_add_lookup_table(struct gpiod_lookup_table *table) {}
static inline
+void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n) {}
+static inline
void gpiod_remove_lookup_table(struct gpiod_lookup_table *table) {}
#endif
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 5006f9b5d837..ab05a86269dc 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -173,6 +173,7 @@ struct hid_item {
#define HID_UP_LOGIVENDOR3 0xff430000
#define HID_UP_LNVENDOR 0xffa00000
#define HID_UP_SENSOR 0x00200000
+#define HID_UP_ASUSVENDOR 0xff310000
#define HID_USAGE 0x0000ffff
@@ -292,6 +293,7 @@ struct hid_item {
#define HID_DG_BARRELSWITCH2 0x000d005a
#define HID_DG_TOOLSERIALNUMBER 0x000d005b
+#define HID_VD_ASUS_CUSTOM_MEDIA_KEYS 0xff310076
/*
* HID report types --- Ouch! HID spec says 1 2 3!
*/
@@ -363,6 +365,12 @@ struct hid_item {
#define HID_GROUP_LOGITECH_DJ_DEVICE 0x0102
/*
+ * HID protocol status
+ */
+#define HID_REPORT_PROTOCOL 1
+#define HID_BOOT_PROTOCOL 0
+
+/*
* This is the global environment of the parser. This information is
* persistent for main-items. The global environment can be saved and
* restored with PUSH/POP statements.
@@ -526,7 +534,6 @@ struct hid_device { /* device report descriptor */
struct hid_report_enum report_enum[HID_REPORT_TYPES];
struct work_struct led_work; /* delayed LED worker */
- struct semaphore driver_lock; /* protects the current driver, except during input */
struct semaphore driver_input_lock; /* protects the current driver */
struct device dev; /* device */
struct hid_driver *driver;
@@ -542,16 +549,18 @@ struct hid_device { /* device report descriptor */
* battery is non-NULL.
*/
struct power_supply *battery;
+ __s32 battery_capacity;
__s32 battery_min;
__s32 battery_max;
__s32 battery_report_type;
__s32 battery_report_id;
+ bool battery_reported;
#endif
unsigned int status; /* see STAT flags above */
unsigned claimed; /* Claimed by hidinput, hiddev? */
unsigned quirks; /* Various quirks the device can pull on us */
- bool io_started; /* Protected by driver_lock. If IO has started */
+ bool io_started; /* If IO has started */
struct list_head inputs; /* The list of inputs */
void *hiddev; /* The hiddev structure */
@@ -777,6 +786,17 @@ struct hid_ll_driver {
int (*idle)(struct hid_device *hdev, int report, int idle, int reqtype);
};
+extern struct hid_ll_driver i2c_hid_ll_driver;
+extern struct hid_ll_driver hidp_hid_driver;
+extern struct hid_ll_driver uhid_hid_driver;
+extern struct hid_ll_driver usb_hid_driver;
+
+static inline bool hid_is_using_ll_driver(struct hid_device *hdev,
+ struct hid_ll_driver *driver)
+{
+ return hdev->ll_driver == driver;
+}
+
#define PM_HINT_FULLON 1<<5
#define PM_HINT_NORMAL 1<<1
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index b7d7bbec74e0..e4bbf7dc9932 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -124,10 +124,7 @@ struct hv_ring_buffer_info {
spinlock_t ring_lock;
u32 ring_datasize; /* < ring_size */
- u32 ring_data_startoffset;
- u32 priv_write_index;
u32 priv_read_index;
- u32 cached_read_index;
};
/*
@@ -180,19 +177,6 @@ static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi)
return write;
}
-static inline u32 hv_get_cached_bytes_to_write(
- const struct hv_ring_buffer_info *rbi)
-{
- u32 read_loc, write_loc, dsize, write;
-
- dsize = rbi->ring_datasize;
- read_loc = rbi->cached_read_index;
- write_loc = rbi->ring_buffer->write_index;
-
- write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
- read_loc - write_loc;
- return write;
-}
/*
* VMBUS version is 32 bit entity broken up into
* two 16 bit quantities: major_number. minor_number.
@@ -895,6 +879,8 @@ struct vmbus_channel {
*/
enum hv_numa_policy affinity_policy;
+ bool probe_done;
+
};
static inline bool is_hvsock_channel(const struct vmbus_channel *c)
@@ -1030,13 +1016,6 @@ extern int vmbus_sendpacket(struct vmbus_channel *channel,
enum vmbus_packet_type type,
u32 flags);
-extern int vmbus_sendpacket_ctl(struct vmbus_channel *channel,
- void *buffer,
- u32 bufferLen,
- u64 requestid,
- enum vmbus_packet_type type,
- u32 flags);
-
extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
struct hv_page_buffer pagebuffers[],
u32 pagecount,
@@ -1044,20 +1023,6 @@ extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
u32 bufferlen,
u64 requestid);
-extern int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
- struct hv_page_buffer pagebuffers[],
- u32 pagecount,
- void *buffer,
- u32 bufferlen,
- u64 requestid,
- u32 flags);
-
-extern int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
- struct hv_multipage_buffer *mpb,
- void *buffer,
- u32 bufferlen,
- u64 requestid);
-
extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
struct vmbus_packet_mpb_array *mpb,
u32 desc_size,
@@ -1474,55 +1439,6 @@ hv_get_ring_buffer(const struct hv_ring_buffer_info *ring_info)
}
/*
- * To optimize the flow management on the send-side,
- * when the sender is blocked because of lack of
- * sufficient space in the ring buffer, potential the
- * consumer of the ring buffer can signal the producer.
- * This is controlled by the following parameters:
- *
- * 1. pending_send_sz: This is the size in bytes that the
- * producer is trying to send.
- * 2. The feature bit feat_pending_send_sz set to indicate if
- * the consumer of the ring will signal when the ring
- * state transitions from being full to a state where
- * there is room for the producer to send the pending packet.
- */
-
-static inline void hv_signal_on_read(struct vmbus_channel *channel)
-{
- u32 cur_write_sz, cached_write_sz;
- u32 pending_sz;
- struct hv_ring_buffer_info *rbi = &channel->inbound;
-
- /*
- * Issue a full memory barrier before making the signaling decision.
- * Here is the reason for having this barrier:
- * If the reading of the pend_sz (in this function)
- * were to be reordered and read before we commit the new read
- * index (in the calling function) we could
- * have a problem. If the host were to set the pending_sz after we
- * have sampled pending_sz and go to sleep before we commit the
- * read index, we could miss sending the interrupt. Issue a full
- * memory barrier to address this.
- */
- virt_mb();
-
- pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
- /* If the other end is not blocked on write don't bother. */
- if (pending_sz == 0)
- return;
-
- cur_write_sz = hv_get_bytes_to_write(rbi);
-
- if (cur_write_sz < pending_sz)
- return;
-
- cached_write_sz = hv_get_cached_bytes_to_write(rbi);
- if (cached_write_sz < pending_sz)
- vmbus_setevent(channel);
-}
-
-/*
* Mask off host interrupt callback notifications
*/
static inline void hv_begin_read(struct hv_ring_buffer_info *rbi)
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 00ca5b86a753..d501d3956f13 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -689,7 +689,8 @@ i2c_unlock_adapter(struct i2c_adapter *adapter)
#define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */
#define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */
#define I2C_CLASS_SPD (1<<7) /* Memory modules */
-#define I2C_CLASS_DEPRECATED (1<<8) /* Warn users that adapter will stop using classes */
+/* Warn users that the adapter doesn't support classes anymore */
+#define I2C_CLASS_DEPRECATED (1<<8)
/* Internal numbers to terminate lists */
#define I2C_CLIENT_END 0xfffeU
diff --git a/include/linux/idr.h b/include/linux/idr.h
index bf70b3ef0a07..7c3a365f7e12 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -80,19 +80,75 @@ static inline void idr_set_cursor(struct idr *idr, unsigned int val)
*/
void idr_preload(gfp_t gfp_mask);
-int idr_alloc(struct idr *, void *entry, int start, int end, gfp_t);
+
+int idr_alloc_cmn(struct idr *idr, void *ptr, unsigned long *index,
+ unsigned long start, unsigned long end, gfp_t gfp,
+ bool ext);
+
+/**
+ * idr_alloc - allocate an id
+ * @idr: idr handle
+ * @ptr: pointer to be associated with the new id
+ * @start: the minimum id (inclusive)
+ * @end: the maximum id (exclusive)
+ * @gfp: memory allocation flags
+ *
+ * Allocates an unused ID in the range [start, end). Returns -ENOSPC
+ * if there are no unused IDs in that range.
+ *
+ * Note that @end is treated as max when <= 0. This is to always allow
+ * using @start + N as @end as long as N is inside integer range.
+ *
+ * Simultaneous modifications to the @idr are not allowed and should be
+ * prevented by the user, usually with a lock. idr_alloc() may be called
+ * concurrently with read-only accesses to the @idr, such as idr_find() and
+ * idr_for_each_entry().
+ */
+static inline int idr_alloc(struct idr *idr, void *ptr,
+ int start, int end, gfp_t gfp)
+{
+ unsigned long id;
+ int ret;
+
+ if (WARN_ON_ONCE(start < 0))
+ return -EINVAL;
+
+ ret = idr_alloc_cmn(idr, ptr, &id, start, end, gfp, false);
+
+ if (ret)
+ return ret;
+
+ return id;
+}
+
+static inline int idr_alloc_ext(struct idr *idr, void *ptr,
+ unsigned long *index,
+ unsigned long start,
+ unsigned long end,
+ gfp_t gfp)
+{
+ return idr_alloc_cmn(idr, ptr, index, start, end, gfp, true);
+}
+
int idr_alloc_cyclic(struct idr *, void *entry, int start, int end, gfp_t);
int idr_for_each(const struct idr *,
int (*fn)(int id, void *p, void *data), void *data);
void *idr_get_next(struct idr *, int *nextid);
+void *idr_get_next_ext(struct idr *idr, unsigned long *nextid);
void *idr_replace(struct idr *, void *, int id);
+void *idr_replace_ext(struct idr *idr, void *ptr, unsigned long id);
void idr_destroy(struct idr *);
-static inline void *idr_remove(struct idr *idr, int id)
+static inline void *idr_remove_ext(struct idr *idr, unsigned long id)
{
return radix_tree_delete_item(&idr->idr_rt, id, NULL);
}
+static inline void *idr_remove(struct idr *idr, int id)
+{
+ return idr_remove_ext(idr, id);
+}
+
static inline void idr_init(struct idr *idr)
{
INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER);
@@ -128,11 +184,16 @@ static inline void idr_preload_end(void)
* This function can be called under rcu_read_lock(), given that the leaf
* pointers lifetimes are correctly managed.
*/
-static inline void *idr_find(const struct idr *idr, int id)
+static inline void *idr_find_ext(const struct idr *idr, unsigned long id)
{
return radix_tree_lookup(&idr->idr_rt, id);
}
+static inline void *idr_find(const struct idr *idr, int id)
+{
+ return idr_find_ext(idr, id);
+}
+
/**
* idr_for_each_entry - iterate over an idr's elements of a given type
* @idr: idr handle
@@ -145,6 +206,8 @@ static inline void *idr_find(const struct idr *idr, int id)
*/
#define idr_for_each_entry(idr, entry, id) \
for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; ++id)
+#define idr_for_each_entry_ext(idr, entry, id) \
+ for (id = 0; ((entry) = idr_get_next_ext(idr, &(id))) != NULL; ++id)
/**
* idr_for_each_entry_continue - continue iteration over an idr's elements of a given type
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index 97caf1821de8..f8231854b5d6 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -118,7 +118,8 @@ extern int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
struct ip_msfilter __user *optval, int __user *optlen);
extern int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
struct group_filter __user *optval, int __user *optlen);
-extern int ip_mc_sf_allow(struct sock *sk, __be32 local, __be32 rmt, int dif);
+extern int ip_mc_sf_allow(struct sock *sk, __be32 local, __be32 rmt,
+ int dif, int sdif);
extern void ip_mc_init_dev(struct in_device *);
extern void ip_mc_destroy_dev(struct in_device *);
extern void ip_mc_up(struct in_device *);
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index 497f2b3a5a62..7b0fa8b5c120 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -105,6 +105,11 @@ struct st_sensor_fullscale {
struct st_sensor_fullscale_avl fs_avl[ST_SENSORS_FULLSCALE_AVL_MAX];
};
+struct st_sensor_sim {
+ u8 addr;
+ u8 value;
+};
+
/**
* struct st_sensor_bdu - ST sensor device block data update
* @addr: address of the register.
@@ -197,6 +202,7 @@ struct st_sensor_transfer_function {
* @bdu: Block data update register.
* @das: Data Alignment Selection register.
* @drdy_irq: Data ready register of the sensor.
+ * @sim: SPI serial interface mode register of the sensor.
* @multi_read_bit: Use or not particular bit for [I2C/SPI] multi-read.
* @bootime: samples to discard when sensor passing from power-down to power-up.
*/
@@ -213,6 +219,7 @@ struct st_sensor_settings {
struct st_sensor_bdu bdu;
struct st_sensor_das das;
struct st_sensor_data_ready_irq drdy_irq;
+ struct st_sensor_sim sim;
bool multi_read_bit;
unsigned int bootime;
};
@@ -325,4 +332,16 @@ ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev,
ssize_t st_sensors_sysfs_scale_avail(struct device *dev,
struct device_attribute *attr, char *buf);
+#ifdef CONFIG_OF
+void st_sensors_of_name_probe(struct device *dev,
+ const struct of_device_id *match,
+ char *name, int len);
+#else
+static inline void st_sensors_of_name_probe(struct device *dev,
+ const struct of_device_id *match,
+ char *name, int len)
+{
+}
+#endif
+
#endif /* ST_SENSORS_H */
diff --git a/include/linux/iio/common/st_sensors_i2c.h b/include/linux/iio/common/st_sensors_i2c.h
index 254de3c7dde8..0a2c25e06d1f 100644
--- a/include/linux/iio/common/st_sensors_i2c.h
+++ b/include/linux/iio/common/st_sensors_i2c.h
@@ -18,16 +18,6 @@
void st_sensors_i2c_configure(struct iio_dev *indio_dev,
struct i2c_client *client, struct st_sensor_data *sdata);
-#ifdef CONFIG_OF
-void st_sensors_of_i2c_probe(struct i2c_client *client,
- const struct of_device_id *match);
-#else
-static inline void st_sensors_of_i2c_probe(struct i2c_client *client,
- const struct of_device_id *match)
-{
-}
-#endif
-
#ifdef CONFIG_ACPI
int st_sensors_match_acpi_device(struct device *dev);
#else
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index d68bec297a45..c380daa40c0e 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -535,7 +535,7 @@ struct iio_buffer_setup_ops {
* @scan_timestamp: [INTERN] set if any buffers have requested timestamp
* @scan_index_timestamp:[INTERN] cache of the index to the timestamp
* @trig: [INTERN] current device trigger (buffer modes)
- * @trig_readonly [INTERN] mark the current trigger immutable
+ * @trig_readonly: [INTERN] mark the current trigger immutable
* @pollfunc: [DRIVER] function run on trigger being received
* @pollfunc_event: [DRIVER] function run on events trigger being received
* @channels: [DRIVER] channel specification structure table
diff --git a/include/linux/iio/timer/stm32-timer-trigger.h b/include/linux/iio/timer/stm32-timer-trigger.h
index fa7d786ed99e..d68add80ab86 100644
--- a/include/linux/iio/timer/stm32-timer-trigger.h
+++ b/include/linux/iio/timer/stm32-timer-trigger.h
@@ -55,10 +55,24 @@
#define TIM9_CH1 "tim9_ch1"
#define TIM9_CH2 "tim9_ch2"
+#define TIM10_OC1 "tim10_oc1"
+
+#define TIM11_OC1 "tim11_oc1"
+
#define TIM12_TRGO "tim12_trgo"
#define TIM12_CH1 "tim12_ch1"
#define TIM12_CH2 "tim12_ch2"
+#define TIM13_OC1 "tim13_oc1"
+
+#define TIM14_OC1 "tim14_oc1"
+
+#define TIM15_TRGO "tim15_trgo"
+
+#define TIM16_OC1 "tim16_oc1"
+
+#define TIM17_OC1 "tim17_oc1"
+
bool is_stm32_timer_trigger(struct iio_trigger *trig);
#endif
diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h
index ea08302f2d7b..7142d8d6e470 100644
--- a/include/linux/iio/trigger.h
+++ b/include/linux/iio/trigger.h
@@ -144,8 +144,8 @@ void devm_iio_trigger_unregister(struct device *dev,
/**
* iio_trigger_set_immutable() - set an immutable trigger on destination
*
- * @indio_dev - IIO device structure containing the device
- * @trig - trigger to assign to device
+ * @indio_dev: IIO device structure containing the device
+ * @trig: trigger to assign to device
*
**/
int iio_trigger_set_immutable(struct iio_dev *indio_dev, struct iio_trigger *trig);
diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h
index 65da430e260f..ee251c585854 100644
--- a/include/linux/inet_diag.h
+++ b/include/linux/inet_diag.h
@@ -25,6 +25,13 @@ struct inet_diag_handler {
struct inet_diag_msg *r,
void *info);
+ int (*idiag_get_aux)(struct sock *sk,
+ bool net_admin,
+ struct sk_buff *skb);
+
+ size_t (*idiag_get_aux_size)(struct sock *sk,
+ bool net_admin);
+
int (*destroy)(struct sk_buff *in_skb,
const struct inet_diag_req_v2 *req);
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index a2f6707e9fc0..0e849715e5be 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -126,17 +126,11 @@ extern struct group_info init_groups;
#endif
#ifdef CONFIG_PREEMPT_RCU
-#define INIT_TASK_RCU_TREE_PREEMPT() \
- .rcu_blocked_node = NULL,
-#else
-#define INIT_TASK_RCU_TREE_PREEMPT(tsk)
-#endif
-#ifdef CONFIG_PREEMPT_RCU
#define INIT_TASK_RCU_PREEMPT(tsk) \
.rcu_read_lock_nesting = 0, \
.rcu_read_unlock_special.s = 0, \
.rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \
- INIT_TASK_RCU_TREE_PREEMPT()
+ .rcu_blocked_node = NULL,
#else
#define INIT_TASK_RCU_PREEMPT(tsk)
#endif
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index a2fddddb0d60..59ba11661b6e 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -18,6 +18,7 @@
#include <linux/atomic.h>
#include <asm/ptrace.h>
#include <asm/irq.h>
+#include <asm/sections.h>
/*
* These correspond to the IORESOURCE_IRQ_* defines in
@@ -726,7 +727,6 @@ extern int early_irq_init(void);
extern int arch_probe_nr_irqs(void);
extern int arch_early_irq_init(void);
-#if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN)
/*
* We want to know which function is an entrypoint of a hardirq or a softirq.
*/
@@ -734,16 +734,4 @@ extern int arch_early_irq_init(void);
#define __softirq_entry \
__attribute__((__section__(".softirqentry.text")))
-/* Limits of hardirq entrypoints */
-extern char __irqentry_text_start[];
-extern char __irqentry_text_end[];
-/* Limits of softirq entrypoints */
-extern char __softirqentry_text_start[];
-extern char __softirqentry_text_end[];
-
-#else
-#define __irq_entry
-#define __softirq_entry
-#endif
-
#endif
diff --git a/include/linux/io.h b/include/linux/io.h
index 2195d9ea4aaa..32e30e8fb9db 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -157,6 +157,8 @@ enum {
MEMREMAP_WB = 1 << 0,
MEMREMAP_WT = 1 << 1,
MEMREMAP_WC = 1 << 2,
+ MEMREMAP_ENC = 1 << 3,
+ MEMREMAP_DEC = 1 << 4,
};
void *memremap(resource_size_t offset, size_t size, unsigned long flags);
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 2cb54adc4a33..176f7569d874 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -240,7 +240,7 @@ struct iommu_device {
struct list_head list;
const struct iommu_ops *ops;
struct fwnode_handle *fwnode;
- struct device dev;
+ struct device *dev;
};
int iommu_device_register(struct iommu_device *iommu);
@@ -265,6 +265,11 @@ static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
iommu->fwnode = fwnode;
}
+static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
+{
+ return (struct iommu_device *)dev_get_drvdata(dev);
+}
+
#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
#define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
#define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */
@@ -589,6 +594,11 @@ static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
{
}
+static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
+{
+ return NULL;
+}
+
static inline void iommu_device_unregister(struct iommu_device *iommu)
{
}
diff --git a/include/linux/ipc.h b/include/linux/ipc.h
index 5591f055e13f..fadd579d577d 100644
--- a/include/linux/ipc.h
+++ b/include/linux/ipc.h
@@ -23,6 +23,6 @@ struct kern_ipc_perm {
struct rcu_head rcu;
atomic_t refcount;
-} ____cacheline_aligned_in_smp;
+} ____cacheline_aligned_in_smp __randomize_layout;
#endif /* _LINUX_IPC_H */
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index 848e5796400e..65327ee0936b 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -61,7 +61,7 @@ struct ipc_namespace {
struct ucounts *ucounts;
struct ns_common ns;
-};
+} __randomize_layout;
extern struct ipc_namespace init_ipc_ns;
extern spinlock_t mq_lock;
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index e1b442996f81..ac2da4e11d5e 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -128,6 +128,7 @@ struct inet6_skb_parm {
#define IP6SKB_FRAGMENTED 16
#define IP6SKB_HOPBYHOP 32
#define IP6SKB_L3SLAVE 64
+#define IP6SKB_JUMBOGRAM 128
};
#if defined(CONFIG_NET_L3_MASTER_DEV)
@@ -152,6 +153,21 @@ static inline int inet6_iif(const struct sk_buff *skb)
return l3_slave ? skb->skb_iif : IP6CB(skb)->iif;
}
+static inline bool inet6_is_jumbogram(const struct sk_buff *skb)
+{
+ return !!(IP6CB(skb)->flags & IP6SKB_JUMBOGRAM);
+}
+
+/* can not be used in TCP layer after tcp_v6_fill_cb */
+static inline int inet6_sdif(const struct sk_buff *skb)
+{
+#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
+ if (skb && ipv6_l3mdev_skb(IP6CB(skb)->flags))
+ return IP6CB(skb)->iif;
+#endif
+ return 0;
+}
+
/* can not be used in TCP layer after tcp_v6_fill_cb */
static inline bool inet6_exact_dif_match(struct net *net, struct sk_buff *skb)
{
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 00db35b61e9e..b99a784635ff 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -388,7 +388,12 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
* @irq_mask_ack: ack and mask an interrupt source
* @irq_unmask: unmask an interrupt source
* @irq_eoi: end of interrupt
- * @irq_set_affinity: set the CPU affinity on SMP machines
+ * @irq_set_affinity: Set the CPU affinity on SMP machines. If the force
+ * argument is true, it tells the driver to
+ * unconditionally apply the affinity setting. Sanity
+ * checks against the supplied affinity mask are not
+ * required. This is used for CPU hotplug where the
+ * target CPU is not yet set in the cpu_online_mask.
* @irq_retrigger: resend an IRQ to the CPU
* @irq_set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ
* @irq_set_wake: enable/disable power-management wake-on of an IRQ
@@ -563,6 +568,8 @@ extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
extern int irq_chip_pm_get(struct irq_data *data);
extern int irq_chip_pm_put(struct irq_data *data);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+extern void handle_fasteoi_ack_irq(struct irq_desc *desc);
+extern void handle_fasteoi_mask_irq(struct irq_desc *desc);
extern void irq_chip_enable_parent(struct irq_data *data);
extern void irq_chip_disable_parent(struct irq_data *data);
extern void irq_chip_ack_parent(struct irq_data *data);
@@ -776,7 +783,10 @@ static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
static inline
struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
{
- return d->common->effective_affinity;
+ if (!cpumask_empty(d->common->effective_affinity))
+ return d->common->effective_affinity;
+
+ return d->common->affinity;
}
static inline void irq_data_update_effective_affinity(struct irq_data *d,
const struct cpumask *m)
diff --git a/include/linux/irq_sim.h b/include/linux/irq_sim.h
new file mode 100644
index 000000000000..0380d899b955
--- /dev/null
+++ b/include/linux/irq_sim.h
@@ -0,0 +1,44 @@
+#ifndef _LINUX_IRQ_SIM_H
+#define _LINUX_IRQ_SIM_H
+/*
+ * Copyright (C) 2017 Bartosz Golaszewski <brgl@bgdev.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/irq_work.h>
+#include <linux/device.h>
+
+/*
+ * Provides a framework for allocating simulated interrupts which can be
+ * requested like normal irqs and enqueued from process context.
+ */
+
+struct irq_sim_work_ctx {
+ struct irq_work work;
+ int irq;
+};
+
+struct irq_sim_irq_ctx {
+ int irqnum;
+ bool enabled;
+};
+
+struct irq_sim {
+ struct irq_sim_work_ctx work_ctx;
+ int irq_base;
+ unsigned int irq_count;
+ struct irq_sim_irq_ctx *irqs;
+};
+
+int irq_sim_init(struct irq_sim *sim, unsigned int num_irqs);
+int devm_irq_sim_init(struct device *dev, struct irq_sim *sim,
+ unsigned int num_irqs);
+void irq_sim_fini(struct irq_sim *sim);
+void irq_sim_fire(struct irq_sim *sim, unsigned int offset);
+int irq_sim_irqnum(struct irq_sim *sim, unsigned int offset);
+
+#endif /* _LINUX_IRQ_SIM_H */
diff --git a/include/linux/irqchip/arm-gic-common.h b/include/linux/irqchip/arm-gic-common.h
index c647b0547bcd..0a83b4379f34 100644
--- a/include/linux/irqchip/arm-gic-common.h
+++ b/include/linux/irqchip/arm-gic-common.h
@@ -27,6 +27,8 @@ struct gic_kvm_info {
unsigned int maint_irq;
/* Virtual control interface */
struct resource vctrl;
+ /* vlpi support */
+ bool has_v4;
};
const struct gic_kvm_info *gic_get_kvm_info(void);
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 6a1f87ff94e2..1ea576c8126f 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -204,6 +204,7 @@
#define GICR_TYPER_PLPIS (1U << 0)
#define GICR_TYPER_VLPIS (1U << 1)
+#define GICR_TYPER_DirectLPIS (1U << 3)
#define GICR_TYPER_LAST (1U << 4)
#define GIC_V3_REDIST_SIZE 0x20000
@@ -212,6 +213,69 @@
#define LPI_PROP_ENABLED (1 << 0)
/*
+ * Re-Distributor registers, offsets from VLPI_base
+ */
+#define GICR_VPROPBASER 0x0070
+
+#define GICR_VPROPBASER_IDBITS_MASK 0x1f
+
+#define GICR_VPROPBASER_SHAREABILITY_SHIFT (10)
+#define GICR_VPROPBASER_INNER_CACHEABILITY_SHIFT (7)
+#define GICR_VPROPBASER_OUTER_CACHEABILITY_SHIFT (56)
+
+#define GICR_VPROPBASER_SHAREABILITY_MASK \
+ GIC_BASER_SHAREABILITY(GICR_VPROPBASER, SHAREABILITY_MASK)
+#define GICR_VPROPBASER_INNER_CACHEABILITY_MASK \
+ GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, MASK)
+#define GICR_VPROPBASER_OUTER_CACHEABILITY_MASK \
+ GIC_BASER_CACHEABILITY(GICR_VPROPBASER, OUTER, MASK)
+#define GICR_VPROPBASER_CACHEABILITY_MASK \
+ GICR_VPROPBASER_INNER_CACHEABILITY_MASK
+
+#define GICR_VPROPBASER_InnerShareable \
+ GIC_BASER_SHAREABILITY(GICR_VPROPBASER, InnerShareable)
+
+#define GICR_VPROPBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nCnB)
+#define GICR_VPROPBASER_nC GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nC)
+#define GICR_VPROPBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWt)
+#define GICR_VPROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWt)
+#define GICR_VPROPBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWt)
+#define GICR_VPROPBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWb)
+#define GICR_VPROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWt)
+#define GICR_VPROPBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWb)
+
+#define GICR_VPENDBASER 0x0078
+
+#define GICR_VPENDBASER_SHAREABILITY_SHIFT (10)
+#define GICR_VPENDBASER_INNER_CACHEABILITY_SHIFT (7)
+#define GICR_VPENDBASER_OUTER_CACHEABILITY_SHIFT (56)
+#define GICR_VPENDBASER_SHAREABILITY_MASK \
+ GIC_BASER_SHAREABILITY(GICR_VPENDBASER, SHAREABILITY_MASK)
+#define GICR_VPENDBASER_INNER_CACHEABILITY_MASK \
+ GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, MASK)
+#define GICR_VPENDBASER_OUTER_CACHEABILITY_MASK \
+ GIC_BASER_CACHEABILITY(GICR_VPENDBASER, OUTER, MASK)
+#define GICR_VPENDBASER_CACHEABILITY_MASK \
+ GICR_VPENDBASER_INNER_CACHEABILITY_MASK
+
+#define GICR_VPENDBASER_NonShareable \
+ GIC_BASER_SHAREABILITY(GICR_VPENDBASER, NonShareable)
+
+#define GICR_VPENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nCnB)
+#define GICR_VPENDBASER_nC GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nC)
+#define GICR_VPENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt)
+#define GICR_VPENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt)
+#define GICR_VPENDBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWt)
+#define GICR_VPENDBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWb)
+#define GICR_VPENDBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWaWt)
+#define GICR_VPENDBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWaWb)
+
+#define GICR_VPENDBASER_Dirty (1ULL << 60)
+#define GICR_VPENDBASER_PendingLast (1ULL << 61)
+#define GICR_VPENDBASER_IDAI (1ULL << 62)
+#define GICR_VPENDBASER_Valid (1ULL << 63)
+
+/*
* ITS registers, offsets from ITS_base
*/
#define GITS_CTLR 0x0000
@@ -234,15 +298,21 @@
#define GITS_TRANSLATER 0x10040
#define GITS_CTLR_ENABLE (1U << 0)
+#define GITS_CTLR_ImDe (1U << 1)
+#define GITS_CTLR_ITS_NUMBER_SHIFT 4
+#define GITS_CTLR_ITS_NUMBER (0xFU << GITS_CTLR_ITS_NUMBER_SHIFT)
#define GITS_CTLR_QUIESCENT (1U << 31)
#define GITS_TYPER_PLPIS (1UL << 0)
+#define GITS_TYPER_VLPIS (1UL << 1)
#define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4
+#define GITS_TYPER_ITT_ENTRY_SIZE(r) ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
#define GITS_TYPER_IDBITS_SHIFT 8
#define GITS_TYPER_DEVBITS_SHIFT 13
#define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1)
#define GITS_TYPER_PTA (1UL << 19)
#define GITS_TYPER_HWCOLLCNT_SHIFT 24
+#define GITS_TYPER_VMOVP (1ULL << 37)
#define GITS_IIDR_REV_SHIFT 12
#define GITS_IIDR_REV_MASK (0xf << GITS_IIDR_REV_SHIFT)
@@ -342,6 +412,18 @@
#define GITS_CMD_SYNC 0x05
/*
+ * GICv4 ITS specific commands
+ */
+#define GITS_CMD_GICv4(x) ((x) | 0x20)
+#define GITS_CMD_VINVALL GITS_CMD_GICv4(GITS_CMD_INVALL)
+#define GITS_CMD_VMAPP GITS_CMD_GICv4(GITS_CMD_MAPC)
+#define GITS_CMD_VMAPTI GITS_CMD_GICv4(GITS_CMD_MAPTI)
+#define GITS_CMD_VMOVI GITS_CMD_GICv4(GITS_CMD_MOVI)
+#define GITS_CMD_VSYNC GITS_CMD_GICv4(GITS_CMD_SYNC)
+/* VMOVP is the odd one, as it doesn't have a physical counterpart */
+#define GITS_CMD_VMOVP GITS_CMD_GICv4(2)
+
+/*
* ITS error numbers
*/
#define E_ITS_MOVI_UNMAPPED_INTERRUPT 0x010107
@@ -487,6 +569,8 @@ struct rdists {
struct page *prop_page;
int id_bits;
u64 flags;
+ bool has_vlpis;
+ bool has_direct_lpi;
};
struct irq_domain;
diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h
new file mode 100644
index 000000000000..58a4d89aa82c
--- /dev/null
+++ b/include/linux/irqchip/arm-gic-v4.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2016,2017 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __LINUX_IRQCHIP_ARM_GIC_V4_H
+#define __LINUX_IRQCHIP_ARM_GIC_V4_H
+
+struct its_vpe;
+
+/* Embedded in kvm.arch */
+struct its_vm {
+ struct fwnode_handle *fwnode;
+ struct irq_domain *domain;
+ struct page *vprop_page;
+ struct its_vpe **vpes;
+ int nr_vpes;
+ irq_hw_number_t db_lpi_base;
+ unsigned long *db_bitmap;
+ int nr_db_lpis;
+};
+
+/* Embedded in kvm_vcpu.arch */
+struct its_vpe {
+ struct page *vpt_page;
+ struct its_vm *its_vm;
+ /* Doorbell interrupt */
+ int irq;
+ irq_hw_number_t vpe_db_lpi;
+ /* VPE proxy mapping */
+ int vpe_proxy_event;
+ /*
+ * This collection ID is used to indirect the target
+ * redistributor for this VPE. The ID itself isn't involved in
+ * programming of the ITS.
+ */
+ u16 col_idx;
+ /* Unique (system-wide) VPE identifier */
+ u16 vpe_id;
+ /* Implementation Defined Area Invalid */
+ bool idai;
+ /* Pending VLPIs on schedule out? */
+ bool pending_last;
+};
+
+/*
+ * struct its_vlpi_map: structure describing the mapping of a
+ * VLPI. Only to be interpreted in the context of a physical interrupt
+ * it complements. To be used as the vcpu_info passed to
+ * irq_set_vcpu_affinity().
+ *
+ * @vm: Pointer to the GICv4 notion of a VM
+ * @vpe: Pointer to the GICv4 notion of a virtual CPU (VPE)
+ * @vintid: Virtual LPI number
+ * @db_enabled: Is the VPE doorbell to be generated?
+ */
+struct its_vlpi_map {
+ struct its_vm *vm;
+ struct its_vpe *vpe;
+ u32 vintid;
+ bool db_enabled;
+};
+
+enum its_vcpu_info_cmd_type {
+ MAP_VLPI,
+ GET_VLPI,
+ PROP_UPDATE_VLPI,
+ PROP_UPDATE_AND_INV_VLPI,
+ SCHEDULE_VPE,
+ DESCHEDULE_VPE,
+ INVALL_VPE,
+};
+
+struct its_cmd_info {
+ enum its_vcpu_info_cmd_type cmd_type;
+ union {
+ struct its_vlpi_map *map;
+ u8 config;
+ };
+};
+
+int its_alloc_vcpu_irqs(struct its_vm *vm);
+void its_free_vcpu_irqs(struct its_vm *vm);
+int its_schedule_vpe(struct its_vpe *vpe, bool on);
+int its_invall_vpe(struct its_vpe *vpe);
+int its_map_vlpi(int irq, struct its_vlpi_map *map);
+int its_get_vlpi(int irq, struct its_vlpi_map *map);
+int its_unmap_vlpi(int irq);
+int its_prop_update_vlpi(int irq, u8 config, bool inv);
+
+int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops);
+
+#endif
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index cac77a5c5555..81e4889ca6dd 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -265,9 +265,11 @@ static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node)
return node ? &node->fwnode : NULL;
}
+extern const struct fwnode_operations irqchip_fwnode_ops;
+
static inline bool is_fwnode_irqchip(struct fwnode_handle *fwnode)
{
- return fwnode && fwnode->type == FWNODE_IRQCHIP;
+ return fwnode && fwnode->ops == &irqchip_fwnode_ops;
}
extern void irq_domain_update_bus_token(struct irq_domain *domain,
@@ -460,6 +462,9 @@ extern void irq_domain_free_irqs_common(struct irq_domain *domain,
extern void irq_domain_free_irqs_top(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs);
+extern int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg);
+extern int irq_domain_pop_irq(struct irq_domain *domain, int virq);
+
extern int irq_domain_alloc_irqs_parent(struct irq_domain *domain,
unsigned int irq_base,
unsigned int nr_irqs, void *arg);
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 5dd1272d1ab2..5fdd93bb9300 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -23,10 +23,26 @@
# define trace_softirq_context(p) ((p)->softirq_context)
# define trace_hardirqs_enabled(p) ((p)->hardirqs_enabled)
# define trace_softirqs_enabled(p) ((p)->softirqs_enabled)
-# define trace_hardirq_enter() do { current->hardirq_context++; } while (0)
-# define trace_hardirq_exit() do { current->hardirq_context--; } while (0)
-# define lockdep_softirq_enter() do { current->softirq_context++; } while (0)
-# define lockdep_softirq_exit() do { current->softirq_context--; } while (0)
+# define trace_hardirq_enter() \
+do { \
+ current->hardirq_context++; \
+ crossrelease_hist_start(XHLOCK_HARD); \
+} while (0)
+# define trace_hardirq_exit() \
+do { \
+ current->hardirq_context--; \
+ crossrelease_hist_end(XHLOCK_HARD); \
+} while (0)
+# define lockdep_softirq_enter() \
+do { \
+ current->softirq_context++; \
+ crossrelease_hist_start(XHLOCK_SOFT); \
+} while (0)
+# define lockdep_softirq_exit() \
+do { \
+ current->softirq_context--; \
+ crossrelease_hist_end(XHLOCK_SOFT); \
+} while (0)
# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1,
#else
# define trace_hardirqs_on() do { } while (0)
diff --git a/include/linux/jhash.h b/include/linux/jhash.h
index 348c6f47e4cc..8037850f3104 100644
--- a/include/linux/jhash.h
+++ b/include/linux/jhash.h
@@ -85,19 +85,18 @@ static inline u32 jhash(const void *key, u32 length, u32 initval)
k += 12;
}
/* Last block: affect all 32 bits of (c) */
- /* All the case statements fall through */
switch (length) {
- case 12: c += (u32)k[11]<<24;
- case 11: c += (u32)k[10]<<16;
- case 10: c += (u32)k[9]<<8;
- case 9: c += k[8];
- case 8: b += (u32)k[7]<<24;
- case 7: b += (u32)k[6]<<16;
- case 6: b += (u32)k[5]<<8;
- case 5: b += k[4];
- case 4: a += (u32)k[3]<<24;
- case 3: a += (u32)k[2]<<16;
- case 2: a += (u32)k[1]<<8;
+ case 12: c += (u32)k[11]<<24; /* fall through */
+ case 11: c += (u32)k[10]<<16; /* fall through */
+ case 10: c += (u32)k[9]<<8; /* fall through */
+ case 9: c += k[8]; /* fall through */
+ case 8: b += (u32)k[7]<<24; /* fall through */
+ case 7: b += (u32)k[6]<<16; /* fall through */
+ case 6: b += (u32)k[5]<<8; /* fall through */
+ case 5: b += k[4]; /* fall through */
+ case 4: a += (u32)k[3]<<24; /* fall through */
+ case 3: a += (u32)k[2]<<16; /* fall through */
+ case 2: a += (u32)k[1]<<8; /* fall through */
case 1: a += k[0];
__jhash_final(a, b, c);
case 0: /* Nothing left to add */
@@ -131,10 +130,10 @@ static inline u32 jhash2(const u32 *k, u32 length, u32 initval)
k += 3;
}
- /* Handle the last 3 u32's: all the case statements fall through */
+ /* Handle the last 3 u32's */
switch (length) {
- case 3: c += k[2];
- case 2: b += k[1];
+ case 3: c += k[2]; /* fall through */
+ case 2: b += k[1]; /* fall through */
case 1: a += k[0];
__jhash_final(a, b, c);
case 0: /* Nothing left to add */
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 2afd74b9d844..cd5861651b17 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -163,6 +163,8 @@ extern void jump_label_apply_nops(struct module *mod);
extern int static_key_count(struct static_key *key);
extern void static_key_enable(struct static_key *key);
extern void static_key_disable(struct static_key *key);
+extern void static_key_enable_cpuslocked(struct static_key *key);
+extern void static_key_disable_cpuslocked(struct static_key *key);
/*
* We should be using ATOMIC_INIT() for initializing .enabled, but
@@ -234,24 +236,29 @@ static inline int jump_label_apply_nops(struct module *mod)
static inline void static_key_enable(struct static_key *key)
{
- int count = static_key_count(key);
-
- WARN_ON_ONCE(count < 0 || count > 1);
+ STATIC_KEY_CHECK_USE();
- if (!count)
- static_key_slow_inc(key);
+ if (atomic_read(&key->enabled) != 0) {
+ WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
+ return;
+ }
+ atomic_set(&key->enabled, 1);
}
static inline void static_key_disable(struct static_key *key)
{
- int count = static_key_count(key);
-
- WARN_ON_ONCE(count < 0 || count > 1);
+ STATIC_KEY_CHECK_USE();
- if (count)
- static_key_slow_dec(key);
+ if (atomic_read(&key->enabled) != 1) {
+ WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
+ return;
+ }
+ atomic_set(&key->enabled, 0);
}
+#define static_key_enable_cpuslocked(k) static_key_enable((k))
+#define static_key_disable_cpuslocked(k) static_key_disable((k))
+
#define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) }
#define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) }
@@ -413,8 +420,10 @@ extern bool ____wrong_branch_error(void);
* Normal usage; boolean enable/disable.
*/
-#define static_branch_enable(x) static_key_enable(&(x)->key)
-#define static_branch_disable(x) static_key_disable(&(x)->key)
+#define static_branch_enable(x) static_key_enable(&(x)->key)
+#define static_branch_disable(x) static_key_disable(&(x)->key)
+#define static_branch_enable_cpuslocked(x) static_key_enable_cpuslocked(&(x)->key)
+#define static_branch_disable_cpuslocked(x) static_key_disable_cpuslocked(&(x)->key)
#endif /* __ASSEMBLY__ */
diff --git a/include/linux/kasan-checks.h b/include/linux/kasan-checks.h
index b7f8aced7870..41960fecf783 100644
--- a/include/linux/kasan-checks.h
+++ b/include/linux/kasan-checks.h
@@ -2,11 +2,13 @@
#define _LINUX_KASAN_CHECKS_H
#ifdef CONFIG_KASAN
-void kasan_check_read(const void *p, unsigned int size);
-void kasan_check_write(const void *p, unsigned int size);
+void kasan_check_read(const volatile void *p, unsigned int size);
+void kasan_check_write(const volatile void *p, unsigned int size);
#else
-static inline void kasan_check_read(const void *p, unsigned int size) { }
-static inline void kasan_check_write(const void *p, unsigned int size) { }
+static inline void kasan_check_read(const volatile void *p, unsigned int size)
+{ }
+static inline void kasan_check_write(const volatile void *p, unsigned int size)
+{ }
#endif
#endif
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index bd6d96cf80b1..6607225d0ea4 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -277,6 +277,13 @@ extern int oops_may_print(void);
void do_exit(long error_code) __noreturn;
void complete_and_exit(struct completion *, long) __noreturn;
+#ifdef CONFIG_ARCH_HAS_REFCOUNT
+void refcount_error_report(struct pt_regs *regs, const char *err);
+#else
+static inline void refcount_error_report(struct pt_regs *regs, const char *err)
+{ }
+#endif
+
/* Internal, do not use. */
int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res);
int __must_check _kstrtol(const char *s, unsigned int base, long *res);
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index dd056fab9e35..2b7590f5483a 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -327,6 +327,14 @@ static inline void *boot_phys_to_virt(unsigned long entry)
return phys_to_virt(boot_phys_to_phys(entry));
}
+#ifndef arch_kexec_post_alloc_pages
+static inline int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, gfp_t gfp) { return 0; }
+#endif
+
+#ifndef arch_kexec_pre_free_pages
+static inline void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages) { }
+#endif
+
#else /* !CONFIG_KEXEC_CORE */
struct pt_regs;
struct task_struct;
diff --git a/include/linux/key-type.h b/include/linux/key-type.h
index 8496cf64575c..9520fc3c3b9a 100644
--- a/include/linux/key-type.h
+++ b/include/linux/key-type.h
@@ -45,7 +45,7 @@ struct key_preparsed_payload {
size_t datalen; /* Raw datalen */
size_t quotalen; /* Quota length for proposed payload */
time_t expiry; /* Expiry time of key */
-};
+} __randomize_layout;
typedef int (*request_key_actor_t)(struct key_construction *key,
const char *op, void *aux);
@@ -158,7 +158,7 @@ struct key_type {
/* internal fields */
struct list_head link; /* link in types list */
struct lock_class_key lock_class; /* key->sem lock class */
-};
+} __randomize_layout;
extern struct key_type key_type_keyring;
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index c4e441e00db5..655082c88fd9 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -64,7 +64,7 @@ struct subprocess_info {
int (*init)(struct subprocess_info *info, struct cred *new);
void (*cleanup)(struct subprocess_info *info);
void *data;
-};
+} __randomize_layout;
extern int
call_usermodehelper(const char *path, char **argv, char **envp, int wait);
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index eeab34b0f589..e0a6205caa71 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -57,6 +57,8 @@ enum kobject_action {
KOBJ_MOVE,
KOBJ_ONLINE,
KOBJ_OFFLINE,
+ KOBJ_BIND,
+ KOBJ_UNBIND,
KOBJ_MAX
};
@@ -172,7 +174,7 @@ struct kset {
spinlock_t list_lock;
struct kobject kobj;
const struct kset_uevent_ops *uevent_ops;
-};
+} __randomize_layout;
extern void kset_init(struct kset *kset);
extern int __must_check kset_register(struct kset *kset);
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 4fec8b775895..82e197eeac91 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -15,7 +15,7 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
* @threadfn: the function to run in the thread
* @data: data pointer for @threadfn()
* @namefmt: printf-style format string for the thread name
- * @...: arguments for @namefmt.
+ * @arg...: arguments for @namefmt.
*
* This macro will create a kthread on the current node, leaving it in
* the stopped state. This is just a helper for kthread_create_on_node();
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 648b34cabb38..21a6fd6c44af 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -445,6 +445,7 @@ struct kvm {
struct kvm_stat_data **debugfs_stat_data;
struct srcu_struct srcu;
struct srcu_struct irq_srcu;
+ pid_t userspace_pid;
};
#define kvm_err(fmt, ...) \
@@ -476,7 +477,8 @@ struct kvm {
static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
{
return srcu_dereference_check(kvm->buses[idx], &kvm->srcu,
- lockdep_is_held(&kvm->slots_lock));
+ lockdep_is_held(&kvm->slots_lock) ||
+ !refcount_read(&kvm->users_count));
}
static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
@@ -569,7 +571,8 @@ void kvm_put_kvm(struct kvm *kvm);
static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
{
return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu,
- lockdep_is_held(&kvm->slots_lock));
+ lockdep_is_held(&kvm->slots_lock) ||
+ !refcount_read(&kvm->users_count));
}
static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
diff --git a/include/linux/lguest.h b/include/linux/lguest.h
deleted file mode 100644
index 6db19f35f7c5..000000000000
--- a/include/linux/lguest.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Things the lguest guest needs to know. Note: like all lguest interfaces,
- * this is subject to wild and random change between versions.
- */
-#ifndef _LINUX_LGUEST_H
-#define _LINUX_LGUEST_H
-
-#ifndef __ASSEMBLY__
-#include <linux/time.h>
-#include <asm/irq.h>
-#include <asm/lguest_hcall.h>
-
-#define LG_CLOCK_MIN_DELTA 100UL
-#define LG_CLOCK_MAX_DELTA ULONG_MAX
-
-/*G:031
- * The second method of communicating with the Host is to via "struct
- * lguest_data". Once the Guest's initialization hypercall tells the Host where
- * this is, the Guest and Host both publish information in it.
-:*/
-struct lguest_data {
- /*
- * 512 == enabled (same as eflags in normal hardware). The Guest
- * changes interrupts so often that a hypercall is too slow.
- */
- unsigned int irq_enabled;
- /* Fine-grained interrupt disabling by the Guest */
- DECLARE_BITMAP(blocked_interrupts, LGUEST_IRQS);
-
- /*
- * The Host writes the virtual address of the last page fault here,
- * which saves the Guest a hypercall. CR2 is the native register where
- * this address would normally be found.
- */
- unsigned long cr2;
-
- /* Wallclock time set by the Host. */
- struct timespec time;
-
- /*
- * Interrupt pending set by the Host. The Guest should do a hypercall
- * if it re-enables interrupts and sees this set (to X86_EFLAGS_IF).
- */
- int irq_pending;
-
- /*
- * Async hypercall ring. Instead of directly making hypercalls, we can
- * place them in here for processing the next time the Host wants.
- * This batching can be quite efficient.
- */
-
- /* 0xFF == done (set by Host), 0 == pending (set by Guest). */
- u8 hcall_status[LHCALL_RING_SIZE];
- /* The actual registers for the hypercalls. */
- struct hcall_args hcalls[LHCALL_RING_SIZE];
-
-/* Fields initialized by the Host at boot: */
- /* Memory not to try to access */
- unsigned long reserve_mem;
- /* KHz for the TSC clock. */
- u32 tsc_khz;
-
-/* Fields initialized by the Guest at boot: */
- /* Instruction to suppress interrupts even if enabled */
- unsigned long noirq_iret;
- /* Address above which page tables are all identical. */
- unsigned long kernel_address;
- /* The vector to try to use for system calls (0x40 or 0x80). */
- unsigned int syscall_vec;
-};
-extern struct lguest_data lguest_data;
-#endif /* __ASSEMBLY__ */
-#endif /* _LINUX_LGUEST_H */
diff --git a/include/linux/lguest_launcher.h b/include/linux/lguest_launcher.h
deleted file mode 100644
index acd5b12565cc..000000000000
--- a/include/linux/lguest_launcher.h
+++ /dev/null
@@ -1,44 +0,0 @@
-#ifndef _LINUX_LGUEST_LAUNCHER
-#define _LINUX_LGUEST_LAUNCHER
-/* Everything the "lguest" userspace program needs to know. */
-#include <linux/types.h>
-
-/*D:010
- * Drivers
- *
- * The Guest needs devices to do anything useful. Since we don't let it touch
- * real devices (think of the damage it could do!) we provide virtual devices.
- * We emulate a PCI bus with virtio devices on it; we used to have our own
- * lguest bus which was far simpler, but this tests the virtio 1.0 standard.
- *
- * Virtio devices are also used by kvm, so we can simply reuse their optimized
- * device drivers. And one day when everyone uses virtio, my plan will be
- * complete. Bwahahahah!
- */
-
-/* Write command first word is a request. */
-enum lguest_req
-{
- LHREQ_INITIALIZE, /* + base, pfnlimit, start */
- LHREQ_GETDMA, /* No longer used */
- LHREQ_IRQ, /* + irq */
- LHREQ_BREAK, /* No longer used */
- LHREQ_EVENTFD, /* No longer used. */
- LHREQ_GETREG, /* + offset within struct pt_regs (then read value). */
- LHREQ_SETREG, /* + offset within struct pt_regs, value. */
- LHREQ_TRAP, /* + trap number to deliver to guest. */
-};
-
-/*
- * This is what read() of the lguest fd populates. trap ==
- * LGUEST_TRAP_ENTRY for an LHCALL_NOTIFY (addr is the
- * argument), 14 for a page fault in the MMIO region (addr is
- * the trap address, insn is the instruction), or 13 for a GPF
- * (insn is the instruction).
- */
-struct lguest_pending {
- __u8 trap;
- __u8 insn[7];
- __u32 addr;
-};
-#endif /* _LINUX_LGUEST_LAUNCHER */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 55de3da58b1c..931c32f1f18d 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -435,7 +435,7 @@ enum {
ATA_HORKAGE_NOLPM = (1 << 20), /* don't use LPM */
ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */
ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */
- ATA_HORKAGE_NO_NCQ_LOG = (1 << 23), /* don't use NCQ for log read */
+ ATA_HORKAGE_NO_DMA_LOG = (1 << 23), /* don't use DMA for log read */
ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */
ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */
diff --git a/include/linux/llist.h b/include/linux/llist.h
index d11738110a7a..1957635e6d5f 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -93,6 +93,23 @@ static inline void init_llist_head(struct llist_head *list)
container_of(ptr, type, member)
/**
+ * member_address_is_nonnull - check whether the member address is not NULL
+ * @ptr: the object pointer (struct type * that contains the llist_node)
+ * @member: the name of the llist_node within the struct.
+ *
+ * This macro is conceptually the same as
+ * &ptr->member != NULL
+ * but it works around the fact that compilers can decide that taking a member
+ * address is never a NULL pointer.
+ *
+ * Real objects that start at a high address and have a member at NULL are
+ * unlikely to exist, but such pointers may be returned e.g. by the
+ * container_of() macro.
+ */
+#define member_address_is_nonnull(ptr, member) \
+ ((uintptr_t)(ptr) + offsetof(typeof(*(ptr)), member) != 0)
+
+/**
* llist_for_each - iterate over some deleted entries of a lock-less list
* @pos: the &struct llist_node to use as a loop cursor
* @node: the first entry of deleted list entries
@@ -145,7 +162,7 @@ static inline void init_llist_head(struct llist_head *list)
*/
#define llist_for_each_entry(pos, node, member) \
for ((pos) = llist_entry((node), typeof(*(pos)), member); \
- &(pos)->member != NULL; \
+ member_address_is_nonnull(pos, member); \
(pos) = llist_entry((pos)->member.next, typeof(*(pos)), member))
/**
@@ -167,7 +184,7 @@ static inline void init_llist_head(struct llist_head *list)
*/
#define llist_for_each_entry_safe(pos, n, node, member) \
for (pos = llist_entry((node), typeof(*pos), member); \
- &pos->member != NULL && \
+ member_address_is_nonnull(pos, member) && \
(n = llist_entry(pos->member.next, typeof(*n), member), true); \
pos = n)
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index fffe49f188e6..bfa8e0b0d6f1 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -18,6 +18,8 @@ extern int lock_stat;
#define MAX_LOCKDEP_SUBCLASSES 8UL
+#include <linux/types.h>
+
#ifdef CONFIG_LOCKDEP
#include <linux/linkage.h>
@@ -29,7 +31,7 @@ extern int lock_stat;
* We'd rather not expose kernel/lockdep_states.h this wide, but we do need
* the total number of states... :-(
*/
-#define XXX_LOCK_USAGE_STATES (1+3*4)
+#define XXX_LOCK_USAGE_STATES (1+2*4)
/*
* NR_LOCKDEP_CACHING_CLASSES ... Number of classes
@@ -155,6 +157,12 @@ struct lockdep_map {
int cpu;
unsigned long ip;
#endif
+#ifdef CONFIG_LOCKDEP_CROSSRELEASE
+ /*
+ * Whether it's a crosslock.
+ */
+ int cross;
+#endif
};
static inline void lockdep_copy_map(struct lockdep_map *to,
@@ -258,8 +266,95 @@ struct held_lock {
unsigned int hardirqs_off:1;
unsigned int references:12; /* 32 bits */
unsigned int pin_count;
+#ifdef CONFIG_LOCKDEP_CROSSRELEASE
+ /*
+ * Generation id.
+ *
+ * A value of cross_gen_id will be stored when holding this,
+ * which is globally increased whenever each crosslock is held.
+ */
+ unsigned int gen_id;
+#endif
+};
+
+#ifdef CONFIG_LOCKDEP_CROSSRELEASE
+#define MAX_XHLOCK_TRACE_ENTRIES 5
+
+/*
+ * This is for keeping locks waiting for commit so that true dependencies
+ * can be added at commit step.
+ */
+struct hist_lock {
+ /*
+ * Id for each entry in the ring buffer. This is used to
+ * decide whether the ring buffer was overwritten or not.
+ *
+ * For example,
+ *
+ * |<----------- hist_lock ring buffer size ------->|
+ * pppppppppppppppppppppiiiiiiiiiiiiiiiiiiiiiiiiiiiii
+ * wrapped > iiiiiiiiiiiiiiiiiiiiiiiiiii.......................
+ *
+ * where 'p' represents an acquisition in process
+ * context, 'i' represents an acquisition in irq
+ * context.
+ *
+ * In this example, the ring buffer was overwritten by
+ * acquisitions in irq context, that should be detected on
+ * rollback or commit.
+ */
+ unsigned int hist_id;
+
+ /*
+ * Seperate stack_trace data. This will be used at commit step.
+ */
+ struct stack_trace trace;
+ unsigned long trace_entries[MAX_XHLOCK_TRACE_ENTRIES];
+
+ /*
+ * Seperate hlock instance. This will be used at commit step.
+ *
+ * TODO: Use a smaller data structure containing only necessary
+ * data. However, we should make lockdep code able to handle the
+ * smaller one first.
+ */
+ struct held_lock hlock;
+};
+
+/*
+ * To initialize a lock as crosslock, lockdep_init_map_crosslock() should
+ * be called instead of lockdep_init_map().
+ */
+struct cross_lock {
+ /*
+ * When more than one acquisition of crosslocks are overlapped,
+ * we have to perform commit for them based on cross_gen_id of
+ * the first acquisition, which allows us to add more true
+ * dependencies.
+ *
+ * Moreover, when no acquisition of a crosslock is in progress,
+ * we should not perform commit because the lock might not exist
+ * any more, which might cause incorrect memory access. So we
+ * have to track the number of acquisitions of a crosslock.
+ */
+ int nr_acquire;
+
+ /*
+ * Seperate hlock instance. This will be used at commit step.
+ *
+ * TODO: Use a smaller data structure containing only necessary
+ * data. However, we should make lockdep code able to handle the
+ * smaller one first.
+ */
+ struct held_lock hlock;
};
+struct lockdep_map_cross {
+ struct lockdep_map map;
+ struct cross_lock xlock;
+};
+#endif
+
/*
* Initialization, self-test and debugging-output methods:
*/
@@ -282,13 +377,6 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, int subclass);
/*
- * To initialize a lockdep_map statically use this macro.
- * Note that _name must not be NULL.
- */
-#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
- { .name = (_name), .key = (void *)(_key), }
-
-/*
* Reinitialize a lock key - for cases where there is special locking or
* special initialization of locks so that the validator gets the scope
* of dependencies wrong: they are either too broad (they need a class-split)
@@ -363,10 +451,6 @@ static inline void lock_set_subclass(struct lockdep_map *lock,
extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);
-extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
-extern void lockdep_clear_current_reclaim_state(void);
-extern void lockdep_trace_alloc(gfp_t mask);
-
struct pin_cookie { unsigned int val; };
#define NIL_COOKIE (struct pin_cookie){ .val = 0U, }
@@ -375,7 +459,7 @@ extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
-# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
+# define INIT_LOCKDEP .lockdep_recursion = 0,
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
@@ -416,9 +500,6 @@ static inline void lockdep_on(void)
# define lock_downgrade(l, i) do { } while (0)
# define lock_set_class(l, n, k, s, i) do { } while (0)
# define lock_set_subclass(l, s, i) do { } while (0)
-# define lockdep_set_current_reclaim_state(g) do { } while (0)
-# define lockdep_clear_current_reclaim_state() do { } while (0)
-# define lockdep_trace_alloc(g) do { } while (0)
# define lockdep_info() do { } while (0)
# define lockdep_init_map(lock, name, key, sub) \
do { (void)(name); (void)(key); } while (0)
@@ -467,6 +548,58 @@ struct pin_cookie { };
#endif /* !LOCKDEP */
+enum xhlock_context_t {
+ XHLOCK_HARD,
+ XHLOCK_SOFT,
+ XHLOCK_CTX_NR,
+};
+
+#ifdef CONFIG_LOCKDEP_CROSSRELEASE
+extern void lockdep_init_map_crosslock(struct lockdep_map *lock,
+ const char *name,
+ struct lock_class_key *key,
+ int subclass);
+extern void lock_commit_crosslock(struct lockdep_map *lock);
+
+/*
+ * What we essencially have to initialize is 'nr_acquire'. Other members
+ * will be initialized in add_xlock().
+ */
+#define STATIC_CROSS_LOCK_INIT() \
+ { .nr_acquire = 0,}
+
+#define STATIC_CROSS_LOCKDEP_MAP_INIT(_name, _key) \
+ { .map.name = (_name), .map.key = (void *)(_key), \
+ .map.cross = 1, .xlock = STATIC_CROSS_LOCK_INIT(), }
+
+/*
+ * To initialize a lockdep_map statically use this macro.
+ * Note that _name must not be NULL.
+ */
+#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
+ { .name = (_name), .key = (void *)(_key), .cross = 0, }
+
+extern void crossrelease_hist_start(enum xhlock_context_t c);
+extern void crossrelease_hist_end(enum xhlock_context_t c);
+extern void lockdep_invariant_state(bool force);
+extern void lockdep_init_task(struct task_struct *task);
+extern void lockdep_free_task(struct task_struct *task);
+#else /* !CROSSRELEASE */
+#define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
+/*
+ * To initialize a lockdep_map statically use this macro.
+ * Note that _name must not be NULL.
+ */
+#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
+ { .name = (_name), .key = (void *)(_key), }
+
+static inline void crossrelease_hist_start(enum xhlock_context_t c) {}
+static inline void crossrelease_hist_end(enum xhlock_context_t c) {}
+static inline void lockdep_invariant_state(bool force) {}
+static inline void lockdep_init_task(struct task_struct *task) {}
+static inline void lockdep_free_task(struct task_struct *task) {}
+#endif /* CROSSRELEASE */
+
#ifdef CONFIG_LOCK_STAT
extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 7a86925ba8f3..3a90febadbe2 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -1912,7 +1912,7 @@ struct security_hook_heads {
struct list_head audit_rule_match;
struct list_head audit_rule_free;
#endif /* CONFIG_AUDIT */
-};
+} __randomize_layout;
/*
* Security module hook list structure.
@@ -1923,7 +1923,7 @@ struct security_hook_list {
struct list_head *head;
union security_list_options hook;
char *lsm;
-};
+} __randomize_layout;
/*
* Initializing a security_hook_list structure takes
diff --git a/include/linux/mcb.h b/include/linux/mcb.h
index 4097ac9ea13a..b1a0ad9d23b3 100644
--- a/include/linux/mcb.h
+++ b/include/linux/mcb.h
@@ -136,5 +136,7 @@ extern struct resource *mcb_request_mem(struct mcb_device *dev,
const char *name);
extern void mcb_release_mem(struct resource *mem);
extern int mcb_get_irq(struct mcb_device *dev);
+extern struct resource *mcb_get_resource(struct mcb_device *dev,
+ unsigned int type);
#endif /* _LINUX_MCB_H */
diff --git a/include/linux/mdio-mux.h b/include/linux/mdio-mux.h
index 61f5b21b31c7..a5d58f221939 100644
--- a/include/linux/mdio-mux.h
+++ b/include/linux/mdio-mux.h
@@ -12,7 +12,16 @@
#include <linux/device.h>
#include <linux/phy.h>
+/* mdio_mux_init() - Initialize a MDIO mux
+ * @dev The device owning the MDIO mux
+ * @mux_node The device node of the MDIO mux
+ * @switch_fn The function called for switching target MDIO child
+ * mux_handle A pointer to a (void *) used internaly by mdio-mux
+ * @data Private data used by switch_fn()
+ * @mux_bus An optional parent bus (Other case are to use parent_bus property)
+ */
int mdio_mux_init(struct device *dev,
+ struct device_node *mux_node,
int (*switch_fn) (int cur, int desired, void *data),
void **mux_handle,
void *data,
diff --git a/include/linux/mem_encrypt.h b/include/linux/mem_encrypt.h
new file mode 100644
index 000000000000..1255f09f5e42
--- /dev/null
+++ b/include/linux/mem_encrypt.h
@@ -0,0 +1,48 @@
+/*
+ * AMD Memory Encryption Support
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MEM_ENCRYPT_H__
+#define __MEM_ENCRYPT_H__
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_ARCH_HAS_MEM_ENCRYPT
+
+#include <asm/mem_encrypt.h>
+
+#else /* !CONFIG_ARCH_HAS_MEM_ENCRYPT */
+
+#define sme_me_mask 0UL
+
+#endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */
+
+static inline bool sme_active(void)
+{
+ return !!sme_me_mask;
+}
+
+static inline unsigned long sme_get_me_mask(void)
+{
+ return sme_me_mask;
+}
+
+/*
+ * The __sme_set() and __sme_clr() macros are useful for adding or removing
+ * the encryption mask from a value (e.g. when dealing with pagetable
+ * entries).
+ */
+#define __sme_set(x) ((unsigned long)(x) | sme_me_mask)
+#define __sme_clr(x) ((unsigned long)(x) & ~sme_me_mask)
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __MEM_ENCRYPT_H__ */
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 77d427974f57..bae11c7e7bf3 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -61,6 +61,7 @@ extern int memblock_debug;
#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
#define __init_memblock __meminit
#define __initdata_memblock __meminitdata
+void memblock_discard(void);
#else
#define __init_memblock
#define __initdata_memblock
@@ -74,8 +75,6 @@ phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align,
int nid, ulong flags);
phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
phys_addr_t size, phys_addr_t align);
-phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr);
-phys_addr_t get_allocated_memblock_memory_regions_info(phys_addr_t *addr);
void memblock_allow_resize(void);
int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
int memblock_add(phys_addr_t base, phys_addr_t size);
@@ -110,6 +109,9 @@ void __next_mem_range_rev(u64 *idx, int nid, ulong flags,
void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
phys_addr_t *out_end);
+void __memblock_free_early(phys_addr_t base, phys_addr_t size);
+void __memblock_free_late(phys_addr_t base, phys_addr_t size);
+
/**
* for_each_mem_range - iterate through memblock areas from type_a and not
* included in type_b. Or just type_a if type_b is NULL.
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 3914e3dd6168..69966c461d1c 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -484,11 +484,13 @@ bool mem_cgroup_oom_synchronize(bool wait);
extern int do_swap_account;
#endif
-void lock_page_memcg(struct page *page);
+struct mem_cgroup *lock_page_memcg(struct page *page);
+void __unlock_page_memcg(struct mem_cgroup *memcg);
void unlock_page_memcg(struct page *page);
+/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx)
+ int idx)
{
long val = 0;
int cpu;
@@ -502,15 +504,17 @@ static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
return val;
}
+/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void __mod_memcg_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx, int val)
+ int idx, int val)
{
if (!mem_cgroup_disabled())
__this_cpu_add(memcg->stat->count[idx], val);
}
+/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void mod_memcg_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx, int val)
+ int idx, int val)
{
if (!mem_cgroup_disabled())
this_cpu_add(memcg->stat->count[idx], val);
@@ -534,14 +538,14 @@ static inline void mod_memcg_state(struct mem_cgroup *memcg,
* Kernel pages are an exception to this, since they'll never move.
*/
static inline void __mod_memcg_page_state(struct page *page,
- enum memcg_stat_item idx, int val)
+ int idx, int val)
{
if (page->mem_cgroup)
__mod_memcg_state(page->mem_cgroup, idx, val);
}
static inline void mod_memcg_page_state(struct page *page,
- enum memcg_stat_item idx, int val)
+ int idx, int val)
{
if (page->mem_cgroup)
mod_memcg_state(page->mem_cgroup, idx, val);
@@ -631,8 +635,9 @@ static inline void count_memcg_events(struct mem_cgroup *memcg,
this_cpu_add(memcg->stat->events[idx], count);
}
+/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void count_memcg_page_event(struct page *page,
- enum memcg_stat_item idx)
+ int idx)
{
if (page->mem_cgroup)
count_memcg_events(page->mem_cgroup, idx, 1);
@@ -809,7 +814,12 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
{
}
-static inline void lock_page_memcg(struct page *page)
+static inline struct mem_cgroup *lock_page_memcg(struct page *page)
+{
+ return NULL;
+}
+
+static inline void __unlock_page_memcg(struct mem_cgroup *memcg)
{
}
@@ -840,31 +850,31 @@ static inline bool mem_cgroup_oom_synchronize(bool wait)
}
static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx)
+ int idx)
{
return 0;
}
static inline void __mod_memcg_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx,
+ int idx,
int nr)
{
}
static inline void mod_memcg_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx,
+ int idx,
int nr)
{
}
static inline void __mod_memcg_page_state(struct page *page,
- enum memcg_stat_item idx,
+ int idx,
int nr)
{
}
static inline void mod_memcg_page_state(struct page *page,
- enum memcg_stat_item idx,
+ int idx,
int nr)
{
}
@@ -918,7 +928,7 @@ static inline void count_memcg_events(struct mem_cgroup *memcg,
}
static inline void count_memcg_page_event(struct page *page,
- enum memcg_stat_item idx)
+ int idx)
{
}
@@ -928,26 +938,30 @@ void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
}
#endif /* CONFIG_MEMCG */
+/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void __inc_memcg_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx)
+ int idx)
{
__mod_memcg_state(memcg, idx, 1);
}
+/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void __dec_memcg_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx)
+ int idx)
{
__mod_memcg_state(memcg, idx, -1);
}
+/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void __inc_memcg_page_state(struct page *page,
- enum memcg_stat_item idx)
+ int idx)
{
__mod_memcg_page_state(page, idx, 1);
}
+/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void __dec_memcg_page_state(struct page *page,
- enum memcg_stat_item idx)
+ int idx)
{
__mod_memcg_page_state(page, idx, -1);
}
@@ -976,26 +990,30 @@ static inline void __dec_lruvec_page_state(struct page *page,
__mod_lruvec_page_state(page, idx, -1);
}
+/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void inc_memcg_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx)
+ int idx)
{
mod_memcg_state(memcg, idx, 1);
}
+/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void dec_memcg_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx)
+ int idx)
{
mod_memcg_state(memcg, idx, -1);
}
+/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void inc_memcg_page_state(struct page *page,
- enum memcg_stat_item idx)
+ int idx)
{
mod_memcg_page_state(page, idx, 1);
}
+/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void dec_memcg_page_state(struct page *page,
- enum memcg_stat_item idx)
+ int idx)
{
mod_memcg_page_state(page, idx, -1);
}
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index c8a5056a5ae0..5e6e4cc36ff4 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -319,6 +319,6 @@ extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
unsigned long pnum);
extern bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages,
int online_type);
-extern struct zone *default_zone_for_pfn(int nid, unsigned long pfn,
+extern struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
unsigned long nr_pages);
#endif /* __LINUX_MEMORY_HOTPLUG_H */
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h
index 190c8f4afa02..2b16e95b9bb8 100644
--- a/include/linux/mfd/cros_ec_commands.h
+++ b/include/linux/mfd/cros_ec_commands.h
@@ -285,6 +285,11 @@ enum host_event_code {
EC_HOST_EVENT_HANG_DETECT = 20,
/* Hang detect logic detected a hang and warm rebooted the AP */
EC_HOST_EVENT_HANG_REBOOT = 21,
+ /* PD MCU triggering host event */
+ EC_HOST_EVENT_PD_MCU = 22,
+
+ /* EC desires to change state of host-controlled USB mux */
+ EC_HOST_EVENT_USB_MUX = 28,
/*
* The high bit of the event mask is not used as a host event code. If
@@ -2905,6 +2910,76 @@ struct ec_params_usb_pd_control {
uint8_t mux;
} __packed;
+#define PD_CTRL_RESP_ENABLED_COMMS (1 << 0) /* Communication enabled */
+#define PD_CTRL_RESP_ENABLED_CONNECTED (1 << 1) /* Device connected */
+#define PD_CTRL_RESP_ENABLED_PD_CAPABLE (1 << 2) /* Partner is PD capable */
+
+struct ec_response_usb_pd_control_v1 {
+ uint8_t enabled;
+ uint8_t role;
+ uint8_t polarity;
+ char state[32];
+} __packed;
+
+#define EC_CMD_USB_PD_PORTS 0x102
+
+struct ec_response_usb_pd_ports {
+ uint8_t num_ports;
+} __packed;
+
+#define EC_CMD_USB_PD_POWER_INFO 0x103
+
+#define PD_POWER_CHARGING_PORT 0xff
+struct ec_params_usb_pd_power_info {
+ uint8_t port;
+} __packed;
+
+enum usb_chg_type {
+ USB_CHG_TYPE_NONE,
+ USB_CHG_TYPE_PD,
+ USB_CHG_TYPE_C,
+ USB_CHG_TYPE_PROPRIETARY,
+ USB_CHG_TYPE_BC12_DCP,
+ USB_CHG_TYPE_BC12_CDP,
+ USB_CHG_TYPE_BC12_SDP,
+ USB_CHG_TYPE_OTHER,
+ USB_CHG_TYPE_VBUS,
+ USB_CHG_TYPE_UNKNOWN,
+};
+
+struct usb_chg_measures {
+ uint16_t voltage_max;
+ uint16_t voltage_now;
+ uint16_t current_max;
+ uint16_t current_lim;
+} __packed;
+
+struct ec_response_usb_pd_power_info {
+ uint8_t role;
+ uint8_t type;
+ uint8_t dualrole;
+ uint8_t reserved1;
+ struct usb_chg_measures meas;
+ uint32_t max_power;
+} __packed;
+
+/* Get info about USB-C SS muxes */
+#define EC_CMD_USB_PD_MUX_INFO 0x11a
+
+struct ec_params_usb_pd_mux_info {
+ uint8_t port; /* USB-C port number */
+} __packed;
+
+/* Flags representing mux state */
+#define USB_PD_MUX_USB_ENABLED (1 << 0)
+#define USB_PD_MUX_DP_ENABLED (1 << 1)
+#define USB_PD_MUX_POLARITY_INVERTED (1 << 2)
+#define USB_PD_MUX_HPD_IRQ (1 << 3)
+
+struct ec_response_usb_pd_mux_info {
+ uint8_t flags; /* USB_PD_MUX_*-encoded USB mux state */
+} __packed;
+
/*****************************************************************************/
/*
* Passthru commands
diff --git a/include/linux/mfd/da9052/da9052.h b/include/linux/mfd/da9052/da9052.h
index ce9230af09c2..ae5b663836d0 100644
--- a/include/linux/mfd/da9052/da9052.h
+++ b/include/linux/mfd/da9052/da9052.h
@@ -45,6 +45,12 @@
#define DA9052_ADC_TJUNC 8
#define DA9052_ADC_VBBAT 9
+/* TSI channel has its own 4 channel mux */
+#define DA9052_ADC_TSI_XP 70
+#define DA9052_ADC_TSI_XN 71
+#define DA9052_ADC_TSI_YP 72
+#define DA9052_ADC_TSI_YN 73
+
#define DA9052_IRQ_DCIN 0
#define DA9052_IRQ_VBUS 1
#define DA9052_IRQ_DCINREM 2
diff --git a/include/linux/mfd/da9052/reg.h b/include/linux/mfd/da9052/reg.h
index 5010f978725c..76780ea8849c 100644
--- a/include/linux/mfd/da9052/reg.h
+++ b/include/linux/mfd/da9052/reg.h
@@ -690,7 +690,10 @@
/* TSI CONTROL REGISTER B BITS */
#define DA9052_TSICONTB_ADCREF 0X80
#define DA9052_TSICONTB_TSIMAN 0X40
-#define DA9052_TSICONTB_TSIMUX 0X30
+#define DA9052_TSICONTB_TSIMUX_XP 0X00
+#define DA9052_TSICONTB_TSIMUX_YP 0X10
+#define DA9052_TSICONTB_TSIMUX_XN 0X20
+#define DA9052_TSICONTB_TSIMUX_YN 0X30
#define DA9052_TSICONTB_TSISEL3 0X08
#define DA9052_TSICONTB_TSISEL2 0X04
#define DA9052_TSICONTB_TSISEL1 0X02
@@ -705,8 +708,14 @@
/* TSI CO-ORDINATE LSB RESULT REGISTER BITS */
#define DA9052_TSILSB_PENDOWN 0X40
#define DA9052_TSILSB_TSIZL 0X30
+#define DA9052_TSILSB_TSIZL_SHIFT 4
+#define DA9052_TSILSB_TSIZL_BITS 2
#define DA9052_TSILSB_TSIYL 0X0C
+#define DA9052_TSILSB_TSIYL_SHIFT 2
+#define DA9052_TSILSB_TSIYL_BITS 2
#define DA9052_TSILSB_TSIXL 0X03
+#define DA9052_TSILSB_TSIXL_SHIFT 0
+#define DA9052_TSILSB_TSIXL_BITS 2
/* TSI Z MEASUREMENT MSB RESULT REGISTER BIT */
#define DA9052_TSIZMSB_TSIZM 0XFF
diff --git a/include/linux/mfd/ds1wm.h b/include/linux/mfd/ds1wm.h
index 38a372a0e285..2227c6a75d84 100644
--- a/include/linux/mfd/ds1wm.h
+++ b/include/linux/mfd/ds1wm.h
@@ -1,13 +1,28 @@
-/* MFD cell driver data for the DS1WM driver */
+/* MFD cell driver data for the DS1WM driver
+ *
+ * to be defined in the MFD device that is
+ * using this driver for one of his sub devices
+ */
struct ds1wm_driver_data {
int active_high;
int clock_rate;
- /* in milliseconds, the amount of time to */
- /* sleep following a reset pulse. Zero */
- /* should work if your bus devices recover*/
- /* time respects the 1-wire spec since the*/
- /* ds1wm implements the precise timings of*/
- /* a reset pulse/presence detect sequence.*/
+ /* in milliseconds, the amount of time to
+ * sleep following a reset pulse. Zero
+ * should work if your bus devices recover
+ * time respects the 1-wire spec since the
+ * ds1wm implements the precise timings of
+ * a reset pulse/presence detect sequence.
+ */
unsigned int reset_recover_delay;
+
+ /* Say 1 here for big endian Hardware
+ * (only relevant with bus-shift > 0
+ */
+ bool is_hw_big_endian;
+
+ /* left shift of register number to get register address offsett.
+ * Only 0,1,2 allowed for 8,16 or 32 bit bus width respectively
+ */
+ unsigned int bus_shift;
};
diff --git a/include/linux/mfd/rn5t618.h b/include/linux/mfd/rn5t618.h
index e5a6cdeb77db..d61bc58aba8a 100644
--- a/include/linux/mfd/rn5t618.h
+++ b/include/linux/mfd/rn5t618.h
@@ -226,11 +226,17 @@ enum {
RN5T618_DCDC2,
RN5T618_DCDC3,
RN5T618_DCDC4,
+ RN5T618_DCDC5,
RN5T618_LDO1,
RN5T618_LDO2,
RN5T618_LDO3,
RN5T618_LDO4,
RN5T618_LDO5,
+ RN5T618_LDO6,
+ RN5T618_LDO7,
+ RN5T618_LDO8,
+ RN5T618_LDO9,
+ RN5T618_LDO10,
RN5T618_LDORTC1,
RN5T618_LDORTC2,
RN5T618_REG_NUM,
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index d5bed0875d30..b0a57e043fa3 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -224,6 +224,7 @@ enum {
MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT = 1ULL << 35,
MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP = 1ULL << 36,
MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT = 1ULL << 37,
+ MLX4_DEV_CAP_FLAG2_USER_MAC_EN = 1ULL << 38,
};
enum {
@@ -428,6 +429,12 @@ enum mlx4_steer_type {
MLX4_NUM_STEERS
};
+enum mlx4_resource_usage {
+ MLX4_RES_USAGE_NONE,
+ MLX4_RES_USAGE_DRIVER,
+ MLX4_RES_USAGE_USER_VERBS,
+};
+
enum {
MLX4_NUM_FEXCH = 64 * 1024,
};
@@ -518,6 +525,14 @@ struct mlx4_phys_caps {
u32 base_tunnel_sqpn;
};
+struct mlx4_spec_qps {
+ u32 qp0_qkey;
+ u32 qp0_proxy;
+ u32 qp0_tunnel;
+ u32 qp1_proxy;
+ u32 qp1_tunnel;
+};
+
struct mlx4_caps {
u64 fw_ver;
u32 function;
@@ -547,11 +562,7 @@ struct mlx4_caps {
int max_qp_init_rdma;
int max_qp_dest_rdma;
int max_tc_eth;
- u32 *qp0_qkey;
- u32 *qp0_proxy;
- u32 *qp1_proxy;
- u32 *qp0_tunnel;
- u32 *qp1_tunnel;
+ struct mlx4_spec_qps *spec_qps;
int num_srqs;
int max_srq_wqes;
int max_srq_sge;
@@ -620,6 +631,7 @@ struct mlx4_caps {
u32 dmfs_high_rate_qpn_base;
u32 dmfs_high_rate_qpn_range;
u32 vf_caps;
+ bool wol_port[MLX4_MAX_PORTS + 1];
struct mlx4_rate_limit_caps rl_caps;
};
@@ -748,6 +760,7 @@ struct mlx4_cq {
} tasklet_ctx;
int reset_notify_added;
struct list_head reset_notify;
+ u8 usage;
};
struct mlx4_qp {
@@ -757,6 +770,7 @@ struct mlx4_qp {
atomic_t refcount;
struct completion free;
+ u8 usage;
};
struct mlx4_srq {
@@ -1068,7 +1082,7 @@ static inline int mlx4_is_eth(struct mlx4_dev *dev, int port)
}
int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
- struct mlx4_buf *buf, gfp_t gfp);
+ struct mlx4_buf *buf);
void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset)
{
@@ -1105,10 +1119,9 @@ int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw);
int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
int start_index, int npages, u64 *page_list);
int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
- struct mlx4_buf *buf, gfp_t gfp);
+ struct mlx4_buf *buf);
-int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order,
- gfp_t gfp);
+int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order);
void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db);
int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
@@ -1121,11 +1134,10 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
unsigned vector, int collapsed, int timestamp_en);
void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
- int *base, u8 flags);
+ int *base, u8 flags, u8 usage);
void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
-int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp,
- gfp_t gfp);
+int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp);
void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp);
int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcdn,
@@ -1374,6 +1386,7 @@ int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port);
int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
+int mlx4_SET_PORT_user_mac(struct mlx4_dev *dev, u8 port, u8 *user_mac);
int mlx4_SET_PORT_user_mtu(struct mlx4_dev *dev, u8 port, u16 user_mtu);
int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
u8 promisc);
@@ -1419,7 +1432,7 @@ int mlx4_get_phys_port_id(struct mlx4_dev *dev);
int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port);
int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port);
-int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx);
+int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx, u8 usage);
void mlx4_counter_free(struct mlx4_dev *dev, u32 idx);
int mlx4_get_default_counter_index(struct mlx4_dev *dev, int port);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index f31a0b5377e1..eaf4ad209c8f 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -48,7 +48,7 @@
/* helper macros */
#define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0)
#define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld)
-#define __mlx5_bit_off(typ, fld) ((unsigned)(unsigned long)(&(__mlx5_nullp(typ)->fld)))
+#define __mlx5_bit_off(typ, fld) (offsetof(struct mlx5_ifc_##typ##_bits, fld))
#define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32)
#define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64)
#define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0x1f))
@@ -290,6 +290,7 @@ enum mlx5_event {
MLX5_EVENT_TYPE_GPIO_EVENT = 0x15,
MLX5_EVENT_TYPE_PORT_MODULE_EVENT = 0x16,
MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19,
+ MLX5_EVENT_TYPE_GENERAL_EVENT = 0x22,
MLX5_EVENT_TYPE_PPS_EVENT = 0x25,
MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a,
@@ -305,6 +306,10 @@ enum mlx5_event {
};
enum {
+ MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT = 0x1,
+};
+
+enum {
MLX5_PORT_CHANGE_SUBTYPE_DOWN = 1,
MLX5_PORT_CHANGE_SUBTYPE_ACTIVE = 4,
MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED = 5,
@@ -709,7 +714,7 @@ static inline int mlx5_get_cqe_format(struct mlx5_cqe64 *cqe)
return (cqe->op_own >> 2) & 0x3;
}
-static inline int get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
+static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
{
return (cqe->lro_tcppsh_abort_dupack >> 6) & 1;
}
@@ -968,7 +973,7 @@ enum mlx5_cap_type {
MLX5_CAP_ATOMIC,
MLX5_CAP_ROCE,
MLX5_CAP_IPOIB_OFFLOADS,
- MLX5_CAP_EOIB_OFFLOADS,
+ MLX5_CAP_IPOIB_ENHANCED_OFFLOADS,
MLX5_CAP_FLOW_TABLE,
MLX5_CAP_ESWITCH_FLOW_TABLE,
MLX5_CAP_ESWITCH,
@@ -1011,6 +1016,10 @@ enum mlx5_mcam_feature_groups {
MLX5_GET(per_protocol_networking_offload_caps,\
mdev->caps.hca_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
+#define MLX5_CAP_IPOIB_ENHANCED(mdev, cap) \
+ MLX5_GET(per_protocol_networking_offload_caps,\
+ mdev->caps.hca_cur[MLX5_CAP_IPOIB_ENHANCED_OFFLOADS], cap)
+
#define MLX5_CAP_ROCE(mdev, cap) \
MLX5_GET(roce_cap, mdev->caps.hca_cur[MLX5_CAP_ROCE], cap)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index df6ce59a1f95..02ff700e4f30 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -162,6 +162,13 @@ enum dbg_rsc_type {
MLX5_DBG_RSC_CQ,
};
+enum port_state_policy {
+ MLX5_POLICY_DOWN = 0,
+ MLX5_POLICY_UP = 1,
+ MLX5_POLICY_FOLLOW = 2,
+ MLX5_POLICY_INVALID = 0xffffffff
+};
+
struct mlx5_field_desc {
struct dentry *dent;
int i;
@@ -185,6 +192,7 @@ enum mlx5_dev_event {
MLX5_DEV_EVENT_GUID_CHANGE,
MLX5_DEV_EVENT_CLIENT_REREG,
MLX5_DEV_EVENT_PPS,
+ MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT,
};
enum mlx5_port_status {
@@ -291,7 +299,7 @@ struct mlx5_cmd {
struct semaphore pages_sem;
int mode;
struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
- struct pci_pool *pool;
+ struct dma_pool *pool;
struct mlx5_cmd_debug dbg;
struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES];
int checksum_disabled;
@@ -410,6 +418,7 @@ enum mlx5_res_type {
MLX5_RES_SQ = MLX5_EVENT_QUEUE_TYPE_SQ,
MLX5_RES_SRQ = 3,
MLX5_RES_XSRQ = 4,
+ MLX5_RES_XRQ = 5,
};
struct mlx5_core_rsc_common {
@@ -525,6 +534,9 @@ struct mlx5_mkey_table {
struct mlx5_vf_context {
int enabled;
+ u64 port_guid;
+ u64 node_guid;
+ enum port_state_policy policy;
};
struct mlx5_core_sriov {
@@ -534,7 +546,6 @@ struct mlx5_core_sriov {
};
struct mlx5_irq_info {
- cpumask_var_t mask;
char name[MLX5_MAX_IRQ_NAME];
};
@@ -550,6 +561,7 @@ struct mlx5_fc_stats {
unsigned long sampling_interval; /* jiffies */
};
+struct mlx5_mpfs;
struct mlx5_eswitch;
struct mlx5_lag;
struct mlx5_pagefault;
@@ -597,7 +609,6 @@ struct mlx5_port_module_event_stats {
struct mlx5_priv {
char name[MLX5_MAX_NAME_LEN];
struct mlx5_eq_table eq_table;
- struct msix_entry *msix_arr;
struct mlx5_irq_info *irq_info;
/* pages stuff */
@@ -646,7 +657,11 @@ struct mlx5_priv {
struct list_head ctx_list;
spinlock_t ctx_lock;
+ struct list_head waiting_events_list;
+ bool is_accum_events;
+
struct mlx5_flow_steering *steering;
+ struct mlx5_mpfs *mpfs;
struct mlx5_eswitch *eswitch;
struct mlx5_core_sriov sriov;
struct mlx5_lag *lag;
@@ -673,9 +688,7 @@ enum mlx5_device_state {
};
enum mlx5_interface_state {
- MLX5_INTERFACE_STATE_DOWN = BIT(0),
- MLX5_INTERFACE_STATE_UP = BIT(1),
- MLX5_INTERFACE_STATE_SHUTDOWN = BIT(2),
+ MLX5_INTERFACE_STATE_UP = BIT(0),
};
enum mlx5_pci_status {
@@ -842,13 +855,6 @@ struct mlx5_pas {
u8 log_sz;
};
-enum port_state_policy {
- MLX5_POLICY_DOWN = 0,
- MLX5_POLICY_UP = 1,
- MLX5_POLICY_FOLLOW = 2,
- MLX5_POLICY_INVALID = 0xffffffff
-};
-
enum phy_port_state {
MLX5_AAA_111
};
@@ -885,8 +891,6 @@ static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset)
return buf->direct.buf + offset;
}
-extern struct workqueue_struct *mlx5_core_wq;
-
#define STRUCT_FIELD(header, field) \
.struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
.struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field
@@ -1091,7 +1095,7 @@ enum {
};
enum {
- MAX_UMR_CACHE_ENTRY = 20,
+ MR_CACHE_LAST_STD_ENTRY = 20,
MLX5_IMR_MTT_CACHE_ENTRY,
MLX5_IMR_KSM_CACHE_ENTRY,
MAX_MR_CACHE_ENTRIES
@@ -1185,4 +1189,10 @@ enum {
MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
};
+static inline const struct cpumask *
+mlx5_get_vector_affinity(struct mlx5_core_dev *dev, int vector)
+{
+ return pci_irq_get_affinity(dev->pdev, MLX5_EQ_VEC_COMP_BASE + vector);
+}
+
#endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 87869c04849a..a528b35a022e 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -200,6 +200,7 @@ enum {
MLX5_CMD_OP_QUERY_SQ = 0x907,
MLX5_CMD_OP_CREATE_RQ = 0x908,
MLX5_CMD_OP_MODIFY_RQ = 0x909,
+ MLX5_CMD_OP_SET_DELAY_DROP_PARAMS = 0x910,
MLX5_CMD_OP_DESTROY_RQ = 0x90a,
MLX5_CMD_OP_QUERY_RQ = 0x90b,
MLX5_CMD_OP_CREATE_RMP = 0x90c,
@@ -294,8 +295,10 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
u8 inner_tcp_dport[0x1];
u8 inner_tcp_flags[0x1];
u8 reserved_at_37[0x9];
+ u8 reserved_at_40[0x1a];
+ u8 bth_dst_qp[0x1];
- u8 reserved_at_40[0x40];
+ u8 reserved_at_5b[0x25];
};
struct mlx5_ifc_flow_table_prop_layout_bits {
@@ -431,7 +434,9 @@ struct mlx5_ifc_fte_match_set_misc_bits {
u8 reserved_at_100[0xc];
u8 inner_ipv6_flow_label[0x14];
- u8 reserved_at_120[0xe0];
+ u8 reserved_at_120[0x28];
+ u8 bth_dst_qp[0x18];
+ u8 reserved_at_160[0xa0];
};
struct mlx5_ifc_cmd_pas_bits {
@@ -599,10 +604,10 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 rss_ind_tbl_cap[0x4];
u8 reg_umr_sq[0x1];
u8 scatter_fcs[0x1];
- u8 reserved_at_1a[0x1];
+ u8 enhanced_multi_pkt_send_wqe[0x1];
u8 tunnel_lso_const_out_ip_id[0x1];
u8 reserved_at_1c[0x2];
- u8 tunnel_statless_gre[0x1];
+ u8 tunnel_stateless_gre[0x1];
u8 tunnel_stateless_vxlan[0x1];
u8 swp[0x1];
@@ -840,7 +845,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 retransmission_q_counters[0x1];
u8 reserved_at_183[0x1];
u8 modify_rq_counter_set_id[0x1];
- u8 reserved_at_185[0x1];
+ u8 rq_delay_drop[0x1];
u8 max_qp_cnt[0xa];
u8 pkey_table_size[0x10];
@@ -857,7 +862,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 pcam_reg[0x1];
u8 local_ca_ack_delay[0x5];
u8 port_module_event[0x1];
- u8 reserved_at_1b1[0x1];
+ u8 enhanced_error_q_counters[0x1];
u8 ports_check[0x1];
u8 reserved_at_1b3[0x1];
u8 disable_link_up[0x1];
@@ -873,7 +878,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 max_tc[0x4];
u8 reserved_at_1d0[0x1];
u8 dcbx[0x1];
- u8 reserved_at_1d2[0x3];
+ u8 general_notification_event[0x1];
+ u8 reserved_at_1d3[0x2];
u8 fpga[0x1];
u8 rol_s[0x1];
u8 rol_g[0x1];
@@ -963,7 +969,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_2a0[0x10];
u8 max_wqe_sz_rq[0x10];
- u8 reserved_at_2c0[0x10];
+ u8 max_flow_counter_31_16[0x10];
u8 max_wqe_sz_sq_dc[0x10];
u8 reserved_at_2e0[0x7];
@@ -981,7 +987,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_340[0x8];
u8 log_max_flow_counter_bulk[0x8];
- u8 max_flow_counter[0x10];
+ u8 max_flow_counter_15_0[0x10];
u8 reserved_at_360[0x3];
@@ -1016,7 +1022,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_max_wq_sz[0x5];
u8 nic_vport_change_event[0x1];
- u8 reserved_at_3e1[0xa];
+ u8 disable_local_lb[0x1];
+ u8 reserved_at_3e2[0x9];
u8 log_max_vlan_list[0x5];
u8 reserved_at_3f0[0x3];
u8 log_max_current_mc_list[0x5];
@@ -1071,9 +1078,7 @@ struct mlx5_ifc_dest_format_struct_bits {
};
struct mlx5_ifc_flow_counter_list_bits {
- u8 clear[0x1];
- u8 num_of_counters[0xf];
- u8 flow_counter_id[0x10];
+ u8 flow_counter_id[0x20];
u8 reserved_at_20[0x20];
};
@@ -1187,7 +1192,8 @@ struct mlx5_ifc_cong_control_r_roce_ecn_np_bits {
u8 reserved_at_c0[0x12];
u8 cnp_dscp[0x6];
- u8 reserved_at_d8[0x5];
+ u8 reserved_at_d8[0x4];
+ u8 cnp_prio_mode[0x1];
u8 cnp_802p_prio[0x3];
u8 reserved_at_e0[0x720];
@@ -1540,7 +1546,17 @@ struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits {
u8 port_transmit_wait_low[0x20];
- u8 reserved_at_40[0x780];
+ u8 reserved_at_40[0x100];
+
+ u8 rx_buffer_almost_full_high[0x20];
+
+ u8 rx_buffer_almost_full_low[0x20];
+
+ u8 rx_buffer_full_high[0x20];
+
+ u8 rx_buffer_full_low[0x20];
+
+ u8 reserved_at_1c0[0x600];
};
struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits {
@@ -1856,7 +1872,19 @@ struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits {
u8 crc_error_tlp[0x20];
- u8 reserved_at_140[0x680];
+ u8 tx_overflow_buffer_pkt_high[0x20];
+
+ u8 tx_overflow_buffer_pkt_low[0x20];
+
+ u8 outbound_stalled_reads[0x20];
+
+ u8 outbound_stalled_writes[0x20];
+
+ u8 outbound_stalled_reads_events[0x20];
+
+ u8 outbound_stalled_writes_events[0x20];
+
+ u8 reserved_at_200[0x5c0];
};
struct mlx5_ifc_cmd_inter_comp_event_bits {
@@ -2015,6 +2043,10 @@ enum {
};
enum {
+ MLX5_QPC_OFFLOAD_TYPE_RNDV = 0x1,
+};
+
+enum {
MLX5_QPC_END_PADDING_MODE_SCATTER_AS_IS = 0x0,
MLX5_QPC_END_PADDING_MODE_PAD_TO_CACHE_LINE_ALIGNMENT = 0x1,
};
@@ -2057,7 +2089,8 @@ struct mlx5_ifc_qpc_bits {
u8 st[0x8];
u8 reserved_at_10[0x3];
u8 pm_state[0x2];
- u8 reserved_at_15[0x7];
+ u8 reserved_at_15[0x3];
+ u8 offload_type[0x4];
u8 end_padding_mode[0x2];
u8 reserved_at_1e[0x2];
@@ -2437,7 +2470,7 @@ struct mlx5_ifc_sqc_bits {
u8 cd_master[0x1];
u8 fre[0x1];
u8 flush_in_error_en[0x1];
- u8 reserved_at_4[0x1];
+ u8 allow_multi_pkt_send_wqe[0x1];
u8 min_wqe_inline_mode[0x3];
u8 state[0x4];
u8 reg_umr[0x1];
@@ -2515,7 +2548,7 @@ enum {
struct mlx5_ifc_rqc_bits {
u8 rlky[0x1];
- u8 reserved_at_1[0x1];
+ u8 delay_drop_en[0x1];
u8 scatter_fcs[0x1];
u8 vsd[0x1];
u8 mem_rq_type[0x4];
@@ -2562,7 +2595,9 @@ struct mlx5_ifc_rmpc_bits {
struct mlx5_ifc_nic_vport_context_bits {
u8 reserved_at_0[0x5];
u8 min_wqe_inline_mode[0x3];
- u8 reserved_at_8[0x17];
+ u8 reserved_at_8[0x15];
+ u8 disable_mc_local_lb[0x1];
+ u8 disable_uc_local_lb[0x1];
u8 roce_en[0x1];
u8 arm_change_event[0x1];
@@ -3000,7 +3035,7 @@ struct mlx5_ifc_xrqc_bits {
struct mlx5_ifc_tag_matching_topology_context_bits tag_matching_topology_context;
- u8 reserved_at_180[0x880];
+ u8 reserved_at_180[0x280];
struct mlx5_ifc_wq_bits wq;
};
@@ -3947,7 +3982,47 @@ struct mlx5_ifc_query_q_counter_out_bits {
u8 local_ack_timeout_err[0x20];
- u8 reserved_at_320[0x4e0];
+ u8 reserved_at_320[0xa0];
+
+ u8 resp_local_length_error[0x20];
+
+ u8 req_local_length_error[0x20];
+
+ u8 resp_local_qp_error[0x20];
+
+ u8 local_operation_error[0x20];
+
+ u8 resp_local_protection[0x20];
+
+ u8 req_local_protection[0x20];
+
+ u8 resp_cqe_error[0x20];
+
+ u8 req_cqe_error[0x20];
+
+ u8 req_mw_binding[0x20];
+
+ u8 req_bad_response[0x20];
+
+ u8 req_remote_invalid_request[0x20];
+
+ u8 resp_remote_invalid_request[0x20];
+
+ u8 req_remote_access_errors[0x20];
+
+ u8 resp_remote_access_errors[0x20];
+
+ u8 req_remote_operation_errors[0x20];
+
+ u8 req_transport_retries_exceeded[0x20];
+
+ u8 cq_overflow[0x20];
+
+ u8 resp_cqe_flush_error[0x20];
+
+ u8 req_cqe_flush_error[0x20];
+
+ u8 reserved_at_620[0x1e0];
};
struct mlx5_ifc_query_q_counter_in_bits {
@@ -4403,8 +4478,7 @@ struct mlx5_ifc_query_flow_counter_in_bits {
u8 reserved_at_c1[0xf];
u8 num_of_counters[0x10];
- u8 reserved_at_e0[0x10];
- u8 flow_counter_id[0x10];
+ u8 flow_counter_id[0x20];
};
struct mlx5_ifc_query_esw_vport_context_out_bits {
@@ -5229,7 +5303,9 @@ struct mlx5_ifc_modify_nic_vport_context_out_bits {
};
struct mlx5_ifc_modify_nic_vport_field_select_bits {
- u8 reserved_at_0[0x16];
+ u8 reserved_at_0[0x14];
+ u8 disable_uc_local_lb[0x1];
+ u8 disable_mc_local_lb[0x1];
u8 node_guid[0x1];
u8 port_guid[0x1];
u8 min_inline[0x1];
@@ -5847,6 +5923,28 @@ struct mlx5_ifc_destroy_rq_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_set_delay_drop_params_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x10];
+ u8 delay_drop_timeout[0x10];
+};
+
+struct mlx5_ifc_set_delay_drop_params_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
struct mlx5_ifc_destroy_rmp_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -6272,8 +6370,7 @@ struct mlx5_ifc_dealloc_flow_counter_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x10];
- u8 flow_counter_id[0x10];
+ u8 flow_counter_id[0x20];
u8 reserved_at_60[0x20];
};
@@ -7098,8 +7195,7 @@ struct mlx5_ifc_alloc_flow_counter_out_bits {
u8 syndrome[0x20];
- u8 reserved_at_40[0x10];
- u8 flow_counter_id[0x10];
+ u8 flow_counter_id[0x20];
u8 reserved_at_60[0x20];
};
@@ -7718,8 +7814,9 @@ struct mlx5_ifc_peir_reg_bits {
};
struct mlx5_ifc_pcam_enhanced_features_bits {
- u8 reserved_at_0[0x7c];
+ u8 reserved_at_0[0x7b];
+ u8 rx_buffer_fullness_counters[0x1];
u8 ptys_connector_type[0x1];
u8 reserved_at_7d[0x1];
u8 ppcnt_discard_group[0x1];
@@ -7749,8 +7846,11 @@ struct mlx5_ifc_pcam_reg_bits {
};
struct mlx5_ifc_mcam_enhanced_features_bits {
- u8 reserved_at_0[0x7f];
-
+ u8 reserved_at_0[0x7b];
+ u8 pcie_outbound_stalled[0x1];
+ u8 tx_overflow_buffer_pkt[0x1];
+ u8 mtpps_enh_out_per_adj[0x1];
+ u8 mtpps_fs[0x1];
u8 pcie_performance_group[0x1];
};
@@ -8159,7 +8259,8 @@ struct mlx5_ifc_mtpps_reg_bits {
u8 reserved_at_78[0x4];
u8 cap_pin_4_mode[0x4];
- u8 reserved_at_80[0x80];
+ u8 field_select[0x20];
+ u8 reserved_at_a0[0x60];
u8 enable[0x1];
u8 reserved_at_101[0xb];
@@ -8174,8 +8275,9 @@ struct mlx5_ifc_mtpps_reg_bits {
u8 out_pulse_duration[0x10];
u8 out_periodic_adjustment[0x10];
+ u8 enhanced_out_periodic_adjustment[0x20];
- u8 reserved_at_1a0[0x60];
+ u8 reserved_at_1c0[0x20];
};
struct mlx5_ifc_mtppse_reg_bits {
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 6f41270d80c0..66d19b611fe4 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -212,7 +212,6 @@ struct mlx5_wqe_ctrl_seg {
#define MLX5_WQE_CTRL_OPCODE_MASK 0xff
#define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00
#define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8
-#define MLX5_WQE_AV_EXT 0x80000000
enum {
MLX5_ETH_WQE_L3_INNER_CSUM = 1 << 4,
@@ -561,6 +560,9 @@ int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
u32 *out, int outlen);
+int mlx5_core_set_delay_drop(struct mlx5_core_dev *dev,
+ u32 timeout_usec);
+
int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn);
int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn);
void mlx5_init_qp_table(struct mlx5_core_dev *dev);
diff --git a/include/linux/mlx5/srq.h b/include/linux/mlx5/srq.h
index 1cde0fd53f90..24ff23e27c8a 100644
--- a/include/linux/mlx5/srq.h
+++ b/include/linux/mlx5/srq.h
@@ -38,6 +38,7 @@
enum {
MLX5_SRQ_FLAG_ERR = (1 << 0),
MLX5_SRQ_FLAG_WQ_SIG = (1 << 1),
+ MLX5_SRQ_FLAG_RNDV = (1 << 2),
};
struct mlx5_srq_attr {
@@ -56,6 +57,10 @@ struct mlx5_srq_attr {
u32 user_index;
u64 db_record;
__be64 *pas;
+ u32 tm_log_list_size;
+ u32 tm_next_tag;
+ u32 tm_hw_phase_cnt;
+ u32 tm_sw_phase_cnt;
};
struct mlx5_core_dev;
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index 656c70b65dd2..aaa0bb9e7655 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -114,5 +114,6 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
u8 other_vport, u8 port_num,
int vf,
struct mlx5_hca_vport_context *req);
-
+int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable);
+int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status);
#endif /* __MLX5_VPORT_H__ */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 46b9ac5e8569..39db8e54c5d5 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -189,7 +189,7 @@ extern unsigned int kobjsize(const void *objp);
#define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */
#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
#define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
-#define VM_ARCH_2 0x02000000
+#define VM_WIPEONFORK 0x02000000 /* Wipe VMA contents in child. */
#define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
#ifdef CONFIG_MEM_SOFT_DIRTY
@@ -208,10 +208,12 @@ extern unsigned int kobjsize(const void *objp);
#define VM_HIGH_ARCH_BIT_1 33 /* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_BIT_2 34 /* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_BIT_3 35 /* bit only usable on 64-bit architectures */
+#define VM_HIGH_ARCH_BIT_4 36 /* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0)
#define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1)
#define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2)
#define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3)
+#define VM_HIGH_ARCH_4 BIT(VM_HIGH_ARCH_BIT_4)
#endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */
#if defined(CONFIG_X86)
@@ -235,9 +237,11 @@ extern unsigned int kobjsize(const void *objp);
# define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */
#endif
-#if defined(CONFIG_X86)
+#if defined(CONFIG_X86_INTEL_MPX)
/* MPX specific bounds table or bounds directory */
-# define VM_MPX VM_ARCH_2
+# define VM_MPX VM_HIGH_ARCH_BIT_4
+#else
+# define VM_MPX VM_NONE
#endif
#ifndef VM_GROWSUP
@@ -1260,6 +1264,7 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
void unmap_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen, int even_cows);
int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
+ unsigned long *start, unsigned long *end,
pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
int follow_pfn(struct vm_area_struct *vma, unsigned long address,
unsigned long *pfn);
@@ -2293,6 +2298,8 @@ int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, pgprot_t pgprot);
int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
pfn_t pfn);
+int vm_insert_mixed_mkwrite(struct vm_area_struct *vma, unsigned long addr,
+ pfn_t pfn);
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
@@ -2505,7 +2512,7 @@ enum mf_action_page_type {
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
extern void clear_huge_page(struct page *page,
- unsigned long addr,
+ unsigned long addr_hint,
unsigned int pages_per_huge_page);
extern void copy_user_huge_page(struct page *dst, struct page *src,
unsigned long addr, struct vm_area_struct *vma,
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index e030a68ead7e..25438b2b6f22 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -126,4 +126,10 @@ static __always_inline enum lru_list page_lru(struct page *page)
#define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
+#ifdef arch_unmap_kpfn
+extern void arch_unmap_kpfn(unsigned long pfn);
+#else
+static __always_inline void arch_unmap_kpfn(unsigned long pfn) { }
+#endif
+
#endif
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 45cdb27791a3..f45ad815b7d7 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -335,6 +335,7 @@ struct vm_area_struct {
struct file * vm_file; /* File we map to (can be NULL). */
void * vm_private_data; /* was vm_pte (shared mem) */
+ atomic_long_t swap_readahead_info;
#ifndef CONFIG_MMU
struct vm_region *vm_region; /* NOMMU mapping region */
#endif
@@ -342,7 +343,7 @@ struct vm_area_struct {
struct mempolicy *vm_policy; /* NUMA policy for the VMA */
#endif
struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
-};
+} __randomize_layout;
struct core_thread {
struct task_struct *task;
@@ -487,20 +488,22 @@ struct mm_struct {
/* numa_scan_seq prevents two threads setting pte_numa */
int numa_scan_seq;
#endif
-#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
/*
* An operation with batched TLB flushing is going on. Anything that
* can move process memory needs to flush the TLB when moving a
* PROT_NONE or PROT_NUMA mapped page.
*/
- bool tlb_flush_pending;
+ atomic_t tlb_flush_pending;
+#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+ /* See flush_tlb_batched_pending() */
+ bool tlb_flush_batched;
#endif
struct uprobes_state uprobes_state;
#ifdef CONFIG_HUGETLB_PAGE
atomic_long_t hugetlb_usage;
#endif
struct work_struct async_put_work;
-};
+} __randomize_layout;
extern struct mm_struct init_mm;
@@ -518,46 +521,95 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
return mm->cpu_vm_mask_var;
}
-#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
-/*
- * Memory barriers to keep this state in sync are graciously provided by
- * the page table locks, outside of which no page table modifications happen.
- * The barriers below prevent the compiler from re-ordering the instructions
- * around the memory barriers that are already present in the code.
- */
-static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
+struct mmu_gather;
+extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+ unsigned long start, unsigned long end);
+extern void tlb_finish_mmu(struct mmu_gather *tlb,
+ unsigned long start, unsigned long end);
+
+static inline void init_tlb_flush_pending(struct mm_struct *mm)
{
- barrier();
- return mm->tlb_flush_pending;
+ atomic_set(&mm->tlb_flush_pending, 0);
}
-static inline void set_tlb_flush_pending(struct mm_struct *mm)
-{
- mm->tlb_flush_pending = true;
+static inline void inc_tlb_flush_pending(struct mm_struct *mm)
+{
+ atomic_inc(&mm->tlb_flush_pending);
/*
- * Guarantee that the tlb_flush_pending store does not leak into the
- * critical section updating the page tables
+ * The only time this value is relevant is when there are indeed pages
+ * to flush. And we'll only flush pages after changing them, which
+ * requires the PTL.
+ *
+ * So the ordering here is:
+ *
+ * atomic_inc(&mm->tlb_flush_pending);
+ * spin_lock(&ptl);
+ * ...
+ * set_pte_at();
+ * spin_unlock(&ptl);
+ *
+ * spin_lock(&ptl)
+ * mm_tlb_flush_pending();
+ * ....
+ * spin_unlock(&ptl);
+ *
+ * flush_tlb_range();
+ * atomic_dec(&mm->tlb_flush_pending);
+ *
+ * Where the increment if constrained by the PTL unlock, it thus
+ * ensures that the increment is visible if the PTE modification is
+ * visible. After all, if there is no PTE modification, nobody cares
+ * about TLB flushes either.
+ *
+ * This very much relies on users (mm_tlb_flush_pending() and
+ * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
+ * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
+ * locks (PPC) the unlock of one doesn't order against the lock of
+ * another PTL.
+ *
+ * The decrement is ordered by the flush_tlb_range(), such that
+ * mm_tlb_flush_pending() will not return false unless all flushes have
+ * completed.
*/
- smp_mb__before_spinlock();
}
-/* Clearing is done after a TLB flush, which also provides a barrier. */
-static inline void clear_tlb_flush_pending(struct mm_struct *mm)
+
+static inline void dec_tlb_flush_pending(struct mm_struct *mm)
{
- barrier();
- mm->tlb_flush_pending = false;
+ /*
+ * See inc_tlb_flush_pending().
+ *
+ * This cannot be smp_mb__before_atomic() because smp_mb() simply does
+ * not order against TLB invalidate completion, which is what we need.
+ *
+ * Therefore we must rely on tlb_flush_*() to guarantee order.
+ */
+ atomic_dec(&mm->tlb_flush_pending);
}
-#else
+
static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
{
- return false;
-}
-static inline void set_tlb_flush_pending(struct mm_struct *mm)
-{
+ /*
+ * Must be called after having acquired the PTL; orders against that
+ * PTLs release and therefore ensures that if we observe the modified
+ * PTE we must also observe the increment from inc_tlb_flush_pending().
+ *
+ * That is, it only guarantees to return true if there is a flush
+ * pending for _this_ PTL.
+ */
+ return atomic_read(&mm->tlb_flush_pending);
}
-static inline void clear_tlb_flush_pending(struct mm_struct *mm)
+
+static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
{
+ /*
+ * Similar to mm_tlb_flush_pending(), we must have acquired the PTL
+ * for which there is a TLB flush pending in order to guarantee
+ * we've seen both that PTE modification and the increment.
+ *
+ * (no requirement on actually still holding the PTL, that is irrelevant)
+ */
+ return atomic_read(&mm->tlb_flush_pending) > 1;
}
-#endif
struct vm_fault;
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index b733eb404ffc..abacd5484bc0 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -39,6 +39,7 @@
#define SDIO_DEVICE_ID_BROADCOM_43455 0xa9bf
#define SDIO_DEVICE_ID_BROADCOM_4354 0x4354
#define SDIO_DEVICE_ID_BROADCOM_4356 0x4356
+#define SDIO_DEVICE_ID_CYPRESS_4373 0x4373
#define SDIO_VENDOR_ID_INTEL 0x0089
#define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX 0x1402
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index c91b3bcd158f..7b2e31b1745a 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -95,17 +95,6 @@ struct mmu_notifier_ops {
pte_t pte);
/*
- * Before this is invoked any secondary MMU is still ok to
- * read/write to the page previously pointed to by the Linux
- * pte because the page hasn't been freed yet and it won't be
- * freed until this returns. If required set_page_dirty has to
- * be called internally to this method.
- */
- void (*invalidate_page)(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long address);
-
- /*
* invalidate_range_start() and invalidate_range_end() must be
* paired and are called only when the mmap_sem and/or the
* locks protecting the reverse maps are held. If the subsystem
@@ -220,8 +209,6 @@ extern int __mmu_notifier_test_young(struct mm_struct *mm,
unsigned long address);
extern void __mmu_notifier_change_pte(struct mm_struct *mm,
unsigned long address, pte_t pte);
-extern void __mmu_notifier_invalidate_page(struct mm_struct *mm,
- unsigned long address);
extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
unsigned long start, unsigned long end);
extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
@@ -268,13 +255,6 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm,
__mmu_notifier_change_pte(mm, address, pte);
}
-static inline void mmu_notifier_invalidate_page(struct mm_struct *mm,
- unsigned long address)
-{
- if (mm_has_notifiers(mm))
- __mmu_notifier_invalidate_page(mm, address);
-}
-
static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
@@ -442,11 +422,6 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm,
{
}
-static inline void mmu_notifier_invalidate_page(struct mm_struct *mm,
- unsigned long address)
-{
-}
-
static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index fc14b8b3f6ce..e7e92c8f4883 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -770,8 +770,7 @@ static inline bool is_dev_zone(const struct zone *zone)
#include <linux/memory_hotplug.h>
-extern struct mutex zonelists_mutex;
-void build_all_zonelists(pg_data_t *pgdat, struct zone *zone);
+void build_all_zonelists(pg_data_t *pgdat);
void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
int classzone_idx, unsigned int alloc_flags,
@@ -896,7 +895,7 @@ int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
extern int numa_zonelist_order_handler(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
extern char numa_zonelist_order[];
-#define NUMA_ZONELIST_ORDER_LEN 16 /* string buffer size */
+#define NUMA_ZONELIST_ORDER_LEN 16
#ifndef CONFIG_NEED_MULTIPLE_NODES
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 3f74ef2281e8..694cebb50f72 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -674,8 +674,6 @@ struct ulpi_device_id {
* struct fsl_mc_device_id - MC object device identifier
* @vendor: vendor ID
* @obj_type: MC object type
- * @ver_major: MC object version major number
- * @ver_minor: MC object version minor number
*
* Type of entries in the "device Id" table for MC object devices supported by
* a MC object device driver. The last entry of the table has vendor set to 0x0
diff --git a/include/linux/module.h b/include/linux/module.h
index 8eb9a1e693e5..e7bdd549e527 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -45,7 +45,7 @@ struct module_kobject {
struct kobject *drivers_dir;
struct module_param_attrs *mp;
struct completion *kobj_completion;
-};
+} __randomize_layout;
struct module_attribute {
struct attribute attr;
@@ -475,7 +475,7 @@ struct module {
ctor_fn_t *ctors;
unsigned int num_ctors;
#endif
-} ____cacheline_aligned;
+} ____cacheline_aligned __randomize_layout;
#ifndef MODULE_ARCH_INIT
#define MODULE_ARCH_INIT {}
#endif
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 8e0352af06b7..1ce85e6fd95f 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -67,7 +67,7 @@ struct vfsmount {
struct dentry *mnt_root; /* root of the mounted tree */
struct super_block *mnt_sb; /* pointer to superblock */
int mnt_flags;
-};
+} __randomize_layout;
struct file; /* forward dec */
struct path;
diff --git a/include/linux/msg.h b/include/linux/msg.h
index f3f302f9c197..a001305f5a79 100644
--- a/include/linux/msg.h
+++ b/include/linux/msg.h
@@ -29,7 +29,7 @@ struct msg_queue {
struct list_head q_messages;
struct list_head q_receivers;
struct list_head q_senders;
-};
+} __randomize_layout;
/* Helper routines for sys_msgsnd and sys_msgrcv */
extern long do_msgsnd(int msqid, long mtype, void __user *mtext,
diff --git a/include/linux/msi.h b/include/linux/msi.h
index df6d59201d31..80e3b562bef6 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -66,6 +66,7 @@ struct fsl_mc_msi_desc {
* @mask_pos: [PCI MSI] Mask register position
* @mask_base: [PCI MSI-X] Mask register base address
* @platform: [platform] Platform device specific msi descriptor data
+ * @fsl_mc: [fsl-mc] FSL MC device specific msi descriptor data
*/
struct msi_desc {
/* Shared device/bus type independent data */
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 892148c448cc..5216d2eb2289 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -681,10 +681,10 @@ struct nand_buffers {
* @tWW_min: WP# transition to WE# low
*/
struct nand_sdr_timings {
- u32 tBERS_max;
+ u64 tBERS_max;
u32 tCCS_min;
- u32 tPROG_max;
- u32 tR_max;
+ u64 tPROG_max;
+ u64 tR_max;
u32 tALH_min;
u32 tADL_min;
u32 tALS_min;
diff --git a/include/linux/mux/consumer.h b/include/linux/mux/consumer.h
index 5577e1b773c4..ea96d4c82be7 100644
--- a/include/linux/mux/consumer.h
+++ b/include/linux/mux/consumer.h
@@ -13,6 +13,8 @@
#ifndef _LINUX_MUX_CONSUMER_H
#define _LINUX_MUX_CONSUMER_H
+#include <linux/compiler.h>
+
struct device;
struct mux_control;
diff --git a/include/linux/net.h b/include/linux/net.h
index dda2cc939a53..d97d80d7fdf8 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -37,7 +37,7 @@ struct net;
/* Historically, SOCKWQ_ASYNC_NOSPACE & SOCKWQ_ASYNC_WAITDATA were located
* in sock->flags, but moved into sk->sk_wq->flags to be RCU protected.
- * Eventually all flags will be in sk->sk_wq_flags.
+ * Eventually all flags will be in sk->sk_wq->flags.
*/
#define SOCKWQ_ASYNC_NOSPACE 0
#define SOCKWQ_ASYNC_WAITDATA 1
@@ -190,8 +190,16 @@ struct proto_ops {
struct pipe_inode_info *pipe, size_t len, unsigned int flags);
int (*set_peek_off)(struct sock *sk, int val);
int (*peek_len)(struct socket *sock);
+
+ /* The following functions are called internally by kernel with
+ * sock lock already held.
+ */
int (*read_sock)(struct sock *sk, read_descriptor_t *desc,
sk_read_actor_t recv_actor);
+ int (*sendpage_locked)(struct sock *sk, struct page *page,
+ int offset, size_t size, int flags);
+ int (*sendmsg_locked)(struct sock *sk, struct msghdr *msg,
+ size_t size);
};
#define DECLARE_SOCKADDR(type, dst, src) \
@@ -279,6 +287,8 @@ do { \
int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
size_t num, size_t len);
+int kernel_sendmsg_locked(struct sock *sk, struct msghdr *msg,
+ struct kvec *vec, size_t num, size_t len);
int kernel_recvmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
size_t num, size_t len, int flags);
@@ -297,6 +307,8 @@ int kernel_setsockopt(struct socket *sock, int level, int optname, char *optval,
unsigned int optlen);
int kernel_sendpage(struct socket *sock, struct page *page, int offset,
size_t size, int flags);
+int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset,
+ size_t size, int flags);
int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg);
int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how);
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 1d4737cffc71..dc8b4896b77b 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -36,7 +36,6 @@ enum {
/**/NETIF_F_GSO_SHIFT, /* keep the order of SKB_GSO_* bits */
NETIF_F_TSO_BIT /* ... TCPv4 segmentation */
= NETIF_F_GSO_SHIFT,
- NETIF_F_UFO_BIT, /* ... UDPv4 fragmentation */
NETIF_F_GSO_ROBUST_BIT, /* ... ->SKB_GSO_DODGY */
NETIF_F_TSO_ECN_BIT, /* ... TCP ECN support */
NETIF_F_TSO_MANGLEID_BIT, /* ... IPV4 ID mangling allowed */
@@ -76,6 +75,7 @@ enum {
NETIF_F_HW_TC_BIT, /* Offload TC infrastructure */
NETIF_F_HW_ESP_BIT, /* Hardware ESP transformation offload */
NETIF_F_HW_ESP_TX_CSUM_BIT, /* ESP with TX checksum offload */
+ NETIF_F_RX_UDP_TUNNEL_PORT_BIT, /* Offload of RX port for UDP tunnels */
/*
* Add your fresh new feature above and remember to update
@@ -118,7 +118,6 @@ enum {
#define NETIF_F_TSO6 __NETIF_F(TSO6)
#define NETIF_F_TSO_ECN __NETIF_F(TSO_ECN)
#define NETIF_F_TSO __NETIF_F(TSO)
-#define NETIF_F_UFO __NETIF_F(UFO)
#define NETIF_F_VLAN_CHALLENGED __NETIF_F(VLAN_CHALLENGED)
#define NETIF_F_RXFCS __NETIF_F(RXFCS)
#define NETIF_F_RXALL __NETIF_F(RXALL)
@@ -140,6 +139,7 @@ enum {
#define NETIF_F_HW_TC __NETIF_F(HW_TC)
#define NETIF_F_HW_ESP __NETIF_F(HW_ESP)
#define NETIF_F_HW_ESP_TX_CSUM __NETIF_F(HW_ESP_TX_CSUM)
+#define NETIF_F_RX_UDP_TUNNEL_PORT __NETIF_F(RX_UDP_TUNNEL_PORT)
#define for_each_netdev_feature(mask_addr, bit) \
for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT)
@@ -172,7 +172,7 @@ enum {
NETIF_F_FSO)
/* List of features with software fallbacks. */
-#define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | NETIF_F_UFO | \
+#define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | \
NETIF_F_GSO_SCTP)
/*
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 779b23595596..f535779d9dc1 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -35,7 +35,6 @@
#include <linux/percpu.h>
#include <linux/rculist.h>
-#include <linux/dmaengine.h>
#include <linux/workqueue.h>
#include <linux/dynamic_queue_limits.h>
@@ -66,6 +65,7 @@ struct mpls_dev;
/* UDP Tunnel offloads */
struct udp_tunnel_info;
struct bpf_prog;
+struct xdp_buff;
void netdev_set_default_ethtool_ops(struct net_device *dev,
const struct ethtool_ops *ops);
@@ -693,10 +693,9 @@ struct netdev_rx_queue {
*/
struct rx_queue_attribute {
struct attribute attr;
- ssize_t (*show)(struct netdev_rx_queue *queue,
- struct rx_queue_attribute *attr, char *buf);
+ ssize_t (*show)(struct netdev_rx_queue *queue, char *buf);
ssize_t (*store)(struct netdev_rx_queue *queue,
- struct rx_queue_attribute *attr, const char *buf, size_t len);
+ const char *buf, size_t len);
};
#ifdef CONFIG_XPS
@@ -770,31 +769,14 @@ static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
struct sk_buff *skb);
-/* These structures hold the attributes of qdisc and classifiers
- * that are being passed to the netdevice through the setup_tc op.
- */
-enum {
+enum tc_setup_type {
TC_SETUP_MQPRIO,
TC_SETUP_CLSU32,
TC_SETUP_CLSFLOWER,
- TC_SETUP_MATCHALL,
+ TC_SETUP_CLSMATCHALL,
TC_SETUP_CLSBPF,
};
-struct tc_cls_u32_offload;
-
-struct tc_to_netdev {
- unsigned int type;
- union {
- struct tc_cls_u32_offload *cls_u32;
- struct tc_cls_flower_offload *cls_flower;
- struct tc_cls_matchall_offload *cls_mall;
- struct tc_cls_bpf_offload *cls_bpf;
- struct tc_mqprio_qopt *mqprio;
- };
- bool egress_dev;
-};
-
/* These structures hold the attributes of xdp state that are being passed
* to the netdevice through the xdp op.
*/
@@ -977,8 +959,8 @@ struct xfrmdev_ops {
* with PF and querying it may introduce a theoretical security risk.
* int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting);
* int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
- * int (*ndo_setup_tc)(struct net_device *dev, u32 handle, u32 chain_index,
- * __be16 protocol, struct tc_to_netdev *tc);
+ * int (*ndo_setup_tc)(struct net_device *dev, enum tc_setup_type type,
+ * void *type_data);
* Called to setup any 'tc' scheduler, classifier or action on @dev.
* This is always called from the stack with the rtnl lock held and netif
* tx queues stopped. This allows the netdevice to perform queue
@@ -1138,7 +1120,12 @@ struct xfrmdev_ops {
* int (*ndo_xdp)(struct net_device *dev, struct netdev_xdp *xdp);
* This function is used to set or query state related to XDP on the
* netdevice. See definition of enum xdp_netdev_command for details.
- *
+ * int (*ndo_xdp_xmit)(struct net_device *dev, struct xdp_buff *xdp);
+ * This function is used to submit a XDP packet for transmit on a
+ * netdevice.
+ * void (*ndo_xdp_flush)(struct net_device *dev);
+ * This function is used to inform the driver to flush a particular
+ * xdp tx queue. Must be called on same CPU as xdp_xmit.
*/
struct net_device_ops {
int (*ndo_init)(struct net_device *dev);
@@ -1221,9 +1208,8 @@ struct net_device_ops {
struct net_device *dev,
int vf, bool setting);
int (*ndo_setup_tc)(struct net_device *dev,
- u32 handle, u32 chain_index,
- __be16 protocol,
- struct tc_to_netdev *tc);
+ enum tc_setup_type type,
+ void *type_data);
#if IS_ENABLED(CONFIG_FCOE)
int (*ndo_fcoe_enable)(struct net_device *dev);
int (*ndo_fcoe_disable)(struct net_device *dev);
@@ -1323,6 +1309,9 @@ struct net_device_ops {
int needed_headroom);
int (*ndo_xdp)(struct net_device *dev,
struct netdev_xdp *xdp);
+ int (*ndo_xdp_xmit)(struct net_device *dev,
+ struct xdp_buff *xdp);
+ void (*ndo_xdp_flush)(struct net_device *dev);
};
/**
@@ -1802,7 +1791,7 @@ struct net_device {
#endif
struct netdev_queue __rcu *ingress_queue;
#ifdef CONFIG_NETFILTER_INGRESS
- struct nf_hook_entry __rcu *nf_hooks_ingress;
+ struct nf_hook_entries __rcu *nf_hooks_ingress;
#endif
unsigned char broadcast[MAX_ADDR_LEN];
@@ -2308,6 +2297,7 @@ struct netdev_lag_lower_state_info {
#define NETDEV_PRECHANGEUPPER 0x001A
#define NETDEV_CHANGELOWERSTATE 0x001B
#define NETDEV_UDP_TUNNEL_PUSH_INFO 0x001C
+#define NETDEV_UDP_TUNNEL_DROP_INFO 0x001D
#define NETDEV_CHANGE_TX_QUEUE_LEN 0x001E
int register_netdevice_notifier(struct notifier_block *nb);
@@ -2423,8 +2413,8 @@ struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
struct net_device *__dev_get_by_name(struct net *net, const char *name);
int dev_alloc_name(struct net_device *dev, const char *name);
int dev_open(struct net_device *dev);
-int dev_close(struct net_device *dev);
-int dev_close_many(struct list_head *head, bool unlink);
+void dev_close(struct net_device *dev);
+void dev_close_many(struct list_head *head, bool unlink);
void dev_disable_lro(struct net_device *dev);
int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
int dev_queue_xmit(struct sk_buff *skb);
@@ -2774,7 +2764,7 @@ struct softnet_data {
unsigned int input_queue_head ____cacheline_aligned_in_smp;
/* Elements below can be accessed between CPUs for RPS/RFS */
- struct call_single_data csd ____cacheline_aligned_in_smp;
+ call_single_data_t csd ____cacheline_aligned_in_smp;
struct softnet_data *rps_ipi_next;
unsigned int cpu;
unsigned int input_queue_tail;
@@ -3251,6 +3241,8 @@ static inline void dev_consume_skb_any(struct sk_buff *skb)
__dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
}
+void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog);
+int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb);
int netif_rx(struct sk_buff *skb);
int netif_rx_ni(struct sk_buff *skb);
int netif_receive_skb(struct sk_buff *skb);
@@ -3866,6 +3858,8 @@ int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
struct net_device *upper_dev);
+bool netdev_has_any_upper_dev(struct net_device *dev);
+
void *netdev_lower_get_next_private(struct net_device *dev,
struct list_head **iter);
void *netdev_lower_get_next_private_rcu(struct net_device *dev,
@@ -4019,22 +4013,22 @@ static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_devi
return rc;
}
-int netdev_class_create_file_ns(struct class_attribute *class_attr,
+int netdev_class_create_file_ns(const struct class_attribute *class_attr,
const void *ns);
-void netdev_class_remove_file_ns(struct class_attribute *class_attr,
+void netdev_class_remove_file_ns(const struct class_attribute *class_attr,
const void *ns);
-static inline int netdev_class_create_file(struct class_attribute *class_attr)
+static inline int netdev_class_create_file(const struct class_attribute *class_attr)
{
return netdev_class_create_file_ns(class_attr, NULL);
}
-static inline void netdev_class_remove_file(struct class_attribute *class_attr)
+static inline void netdev_class_remove_file(const struct class_attribute *class_attr)
{
netdev_class_remove_file_ns(class_attr, NULL);
}
-extern struct kobj_ns_type_operations net_ns_type_operations;
+extern const struct kobj_ns_type_operations net_ns_type_operations;
const char *netdev_drivername(const struct net_device *dev);
@@ -4089,7 +4083,6 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type)
/* check flags correspondence */
BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
- BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index a4b97be30b28..f84bca1703cd 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -61,8 +61,6 @@ typedef unsigned int nf_hookfn(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state);
struct nf_hook_ops {
- struct list_head list;
-
/* User fills in from here down. */
nf_hookfn *hook;
struct net_device *dev;
@@ -74,25 +72,32 @@ struct nf_hook_ops {
};
struct nf_hook_entry {
- struct nf_hook_entry __rcu *next;
nf_hookfn *hook;
void *priv;
- const struct nf_hook_ops *orig_ops;
};
-static inline void
-nf_hook_entry_init(struct nf_hook_entry *entry, const struct nf_hook_ops *ops)
-{
- entry->next = NULL;
- entry->hook = ops->hook;
- entry->priv = ops->priv;
- entry->orig_ops = ops;
-}
+struct nf_hook_entries {
+ u16 num_hook_entries;
+ /* padding */
+ struct nf_hook_entry hooks[];
+
+ /* trailer: pointers to original orig_ops of each hook.
+ *
+ * This is not part of struct nf_hook_entry since its only
+ * needed in slow path (hook register/unregister).
+ *
+ * const struct nf_hook_ops *orig_ops[]
+ */
+};
-static inline int
-nf_hook_entry_priority(const struct nf_hook_entry *entry)
+static inline struct nf_hook_ops **nf_hook_entries_get_hook_ops(const struct nf_hook_entries *e)
{
- return entry->orig_ops->priority;
+ unsigned int n = e->num_hook_entries;
+ const void *hook_end;
+
+ hook_end = &e->hooks[n]; /* this is *past* ->hooks[]! */
+
+ return (struct nf_hook_ops **)hook_end;
}
static inline int
@@ -102,12 +107,6 @@ nf_hook_entry_hookfn(const struct nf_hook_entry *entry, struct sk_buff *skb,
return entry->hook(entry->priv, skb, state);
}
-static inline const struct nf_hook_ops *
-nf_hook_entry_ops(const struct nf_hook_entry *entry)
-{
- return entry->orig_ops;
-}
-
static inline void nf_hook_state_init(struct nf_hook_state *p,
unsigned int hook,
u_int8_t pf,
@@ -160,13 +159,6 @@ int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
unsigned int n);
-int nf_register_hook(struct nf_hook_ops *reg);
-void nf_unregister_hook(struct nf_hook_ops *reg);
-int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n);
-void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n);
-int _nf_register_hooks(struct nf_hook_ops *reg, unsigned int n);
-void _nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n);
-
/* Functions to register get/setsockopt ranges (non-inclusive). You
need to check permissions yourself! */
int nf_register_sockopt(struct nf_sockopt_ops *reg);
@@ -177,7 +169,7 @@ extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
#endif
int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state,
- struct nf_hook_entry *entry);
+ const struct nf_hook_entries *e, unsigned int i);
/**
* nf_hook - call a netfilter hook
@@ -191,7 +183,7 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
struct net_device *indev, struct net_device *outdev,
int (*okfn)(struct net *, struct sock *, struct sk_buff *))
{
- struct nf_hook_entry *hook_head;
+ struct nf_hook_entries *hook_head;
int ret = 1;
#ifdef HAVE_JUMP_LABEL
@@ -209,7 +201,7 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
nf_hook_state_init(&state, hook, pf, indev, outdev,
sk, net, okfn);
- ret = nf_hook_slow(skb, &state, hook_head);
+ ret = nf_hook_slow(skb, &state, hook_head, 0);
}
rcu_read_unlock();
diff --git a/include/linux/netfilter/xt_hashlimit.h b/include/linux/netfilter/xt_hashlimit.h
index 074790c0cf74..0fc458bde80b 100644
--- a/include/linux/netfilter/xt_hashlimit.h
+++ b/include/linux/netfilter/xt_hashlimit.h
@@ -5,5 +5,6 @@
#define XT_HASHLIMIT_ALL (XT_HASHLIMIT_HASH_DIP | XT_HASHLIMIT_HASH_DPT | \
XT_HASHLIMIT_HASH_SIP | XT_HASHLIMIT_HASH_SPT | \
- XT_HASHLIMIT_INVERT | XT_HASHLIMIT_BYTES)
+ XT_HASHLIMIT_INVERT | XT_HASHLIMIT_BYTES |\
+ XT_HASHLIMIT_RATE_MATCH)
#endif /*_XT_HASHLIMIT_H*/
diff --git a/include/linux/netfilter_ingress.h b/include/linux/netfilter_ingress.h
index 59476061de86..8d5dae1e2ff8 100644
--- a/include/linux/netfilter_ingress.h
+++ b/include/linux/netfilter_ingress.h
@@ -17,7 +17,7 @@ static inline bool nf_hook_ingress_active(const struct sk_buff *skb)
/* caller must hold rcu_read_lock */
static inline int nf_hook_ingress(struct sk_buff *skb)
{
- struct nf_hook_entry *e = rcu_dereference(skb->dev->nf_hooks_ingress);
+ struct nf_hook_entries *e = rcu_dereference(skb->dev->nf_hooks_ingress);
struct nf_hook_state state;
int ret;
@@ -30,7 +30,7 @@ static inline int nf_hook_ingress(struct sk_buff *skb)
nf_hook_state_init(&state, NF_NETDEV_INGRESS,
NFPROTO_NETDEV, skb->dev, NULL, NULL,
dev_net(skb->dev), NULL);
- ret = nf_hook_slow(skb, &state, e);
+ ret = nf_hook_slow(skb, &state, e, 0);
if (ret == 0)
return -1;
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index e52cc55ac300..5cc91d6381a3 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -51,7 +51,7 @@ struct nfs_access_entry {
struct list_head lru;
unsigned long jiffies;
struct rpc_cred * cred;
- int mask;
+ __u32 mask;
struct rcu_head rcu_head;
};
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index ca3bcc4ed4e5..62cbcb842f99 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1235,7 +1235,7 @@ struct nfs41_state_protection {
struct nfs41_exchange_id_args {
struct nfs_client *client;
- nfs4_verifier *verifier;
+ nfs4_verifier verifier;
u32 flags;
struct nfs41_state_protection state_protect;
};
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index 8aa01fd859fb..a36abe2da13e 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -168,6 +168,14 @@ extern int sysctl_hardlockup_all_cpu_backtrace;
#define sysctl_softlockup_all_cpu_backtrace 0
#define sysctl_hardlockup_all_cpu_backtrace 0
#endif
+
+#if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \
+ defined(CONFIG_HARDLOCKUP_DETECTOR)
+void watchdog_update_hrtimer_threshold(u64 period);
+#else
+static inline void watchdog_update_hrtimer_threshold(u64 period) { }
+#endif
+
extern bool is_hardlockup(void);
struct ctl_table;
extern int proc_watchdog(struct ctl_table *, int ,
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h
index 6c8c5d8041b7..2591878c1d48 100644
--- a/include/linux/nvme-fc-driver.h
+++ b/include/linux/nvme-fc-driver.h
@@ -346,6 +346,11 @@ struct nvme_fc_remote_port {
* indicating an FC transport Aborted status.
* Entrypoint is Mandatory.
*
+ * @defer_rcv: Called by the transport to signal the LLLD that it has
+ * begun processing of a previously received NVME CMD IU. The LLDD
+ * is now free to re-use the rcv buffer associated with the
+ * nvmefc_tgt_fcp_req.
+ *
* @max_hw_queues: indicates the maximum number of hw queues the LLDD
* supports for cpu affinitization.
* Value is Mandatory. Must be at least 1.
@@ -846,6 +851,8 @@ struct nvmet_fc_target_template {
struct nvmefc_tgt_fcp_req *fcpreq);
void (*fcp_req_release)(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *fcpreq);
+ void (*defer_rcv)(struct nvmet_fc_target_port *tgtport,
+ struct nvmefc_tgt_fcp_req *fcpreq);
u32 max_hw_queues;
u16 max_sgl_segments;
diff --git a/include/linux/nvme-fc.h b/include/linux/nvme-fc.h
index 21c37e39e41a..36cca93a5ff2 100644
--- a/include/linux/nvme-fc.h
+++ b/include/linux/nvme-fc.h
@@ -334,5 +334,24 @@ struct fcnvme_ls_disconnect_acc {
#define NVME_FC_LS_TIMEOUT_SEC 2 /* 2 seconds */
#define NVME_FC_TGTOP_TIMEOUT_SEC 2 /* 2 seconds */
+/*
+ * TRADDR string must be of form "nn-<16hexdigits>:pn-<16hexdigits>"
+ * the string is allowed to be specified with or without a "0x" prefix
+ * infront of the <16hexdigits>. Without is considered the "min" string
+ * and with is considered the "max" string. The hexdigits may be upper
+ * or lower case.
+ */
+#define NVME_FC_TRADDR_NNLEN 3 /* "?n-" */
+#define NVME_FC_TRADDR_OXNNLEN 5 /* "?n-0x" */
+#define NVME_FC_TRADDR_HEXNAMELEN 16
+#define NVME_FC_TRADDR_MINLENGTH \
+ (2 * (NVME_FC_TRADDR_NNLEN + NVME_FC_TRADDR_HEXNAMELEN) + 1)
+#define NVME_FC_TRADDR_MAXLENGTH \
+ (2 * (NVME_FC_TRADDR_OXNNLEN + NVME_FC_TRADDR_HEXNAMELEN) + 1)
+#define NVME_FC_TRADDR_MIN_PN_OFFSET \
+ (NVME_FC_TRADDR_NNLEN + NVME_FC_TRADDR_HEXNAMELEN + 1)
+#define NVME_FC_TRADDR_MAX_PN_OFFSET \
+ (NVME_FC_TRADDR_OXNNLEN + NVME_FC_TRADDR_HEXNAMELEN + 1)
+
#endif /* _NVME_FC_H */
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 6b8ee9e628e1..8efff888bd9b 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -254,7 +254,7 @@ enum {
NVME_CTRL_VWC_PRESENT = 1 << 0,
NVME_CTRL_OACS_SEC_SUPP = 1 << 0,
NVME_CTRL_OACS_DIRECTIVES = 1 << 5,
- NVME_CTRL_OACS_DBBUF_SUPP = 1 << 7,
+ NVME_CTRL_OACS_DBBUF_SUPP = 1 << 8,
};
struct nvme_lbaf {
@@ -963,14 +963,14 @@ struct nvme_dbbuf {
};
struct streams_directive_params {
- __u16 msl;
- __u16 nssa;
- __u16 nsso;
+ __le16 msl;
+ __le16 nssa;
+ __le16 nsso;
__u8 rsvd[10];
- __u32 sws;
- __u16 sgs;
- __u16 nsa;
- __u16 nso;
+ __le32 sws;
+ __le16 sgs;
+ __le16 nsa;
+ __le16 nso;
__u8 rsvd2[6];
};
@@ -1006,7 +1006,7 @@ static inline bool nvme_is_write(struct nvme_command *cmd)
* Why can't we simply have a Fabrics In and Fabrics out command?
*/
if (unlikely(cmd->common.opcode == nvme_fabrics_command))
- return cmd->fabrics.opcode & 1;
+ return cmd->fabrics.fctype & 1;
return cmd->common.opcode & 1;
}
diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h
index c2256d746543..4e85447f7860 100644
--- a/include/linux/nvmem-consumer.h
+++ b/include/linux/nvmem-consumer.h
@@ -12,6 +12,9 @@
#ifndef _LINUX_NVMEM_CONSUMER_H
#define _LINUX_NVMEM_CONSUMER_H
+#include <linux/err.h>
+#include <linux/errno.h>
+
struct device;
struct device_node;
/* consumer cookie */
@@ -35,6 +38,7 @@ void nvmem_cell_put(struct nvmem_cell *cell);
void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell);
void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len);
int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len);
+int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val);
/* direct nvmem device read/write interface */
struct nvmem_device *nvmem_device_get(struct device *dev, const char *name);
@@ -85,6 +89,12 @@ static inline int nvmem_cell_write(struct nvmem_cell *cell,
return -ENOSYS;
}
+static inline int nvmem_cell_read_u32(struct device *dev,
+ const char *cell_id, u32 *val)
+{
+ return -ENOSYS;
+}
+
static inline struct nvmem_device *nvmem_device_get(struct device *dev,
const char *name)
{
diff --git a/include/linux/of.h b/include/linux/of.h
index 4a8a70916237..cfc34117fc92 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -104,7 +104,6 @@ extern const struct fwnode_operations of_fwnode_ops;
static inline void of_node_init(struct device_node *node)
{
kobject_init(&node->kobj, &of_node_ktype);
- node->fwnode.type = FWNODE_OF;
node->fwnode.ops = &of_fwnode_ops;
}
@@ -152,7 +151,7 @@ void of_core_init(void);
static inline bool is_of_node(const struct fwnode_handle *fwnode)
{
- return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_OF;
+ return !IS_ERR_OR_NULL(fwnode) && fwnode->ops == &of_fwnode_ops;
}
#define to_of_node(__fwnode) \
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 8a266e2be5a6..76aac4ce39bc 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -6,6 +6,8 @@
#include <linux/types.h>
#include <linux/nodemask.h>
#include <uapi/linux/oom.h>
+#include <linux/sched/coredump.h> /* MMF_* */
+#include <linux/mm.h> /* VM_FAULT* */
struct zonelist;
struct notifier_block;
@@ -63,6 +65,26 @@ static inline bool tsk_is_oom_victim(struct task_struct * tsk)
return tsk->signal->oom_mm;
}
+/*
+ * Checks whether a page fault on the given mm is still reliable.
+ * This is no longer true if the oom reaper started to reap the
+ * address space which is reflected by MMF_UNSTABLE flag set in
+ * the mm. At that moment any !shared mapping would lose the content
+ * and could cause a memory corruption (zero pages instead of the
+ * original content).
+ *
+ * User should call this before establishing a page table entry for
+ * a !shared mapping and under the proper page table lock.
+ *
+ * Return 0 when the PF is safe VM_FAULT_SIGBUS otherwise.
+ */
+static inline int check_stable_address_space(struct mm_struct *mm)
+{
+ if (unlikely(test_bit(MMF_UNSTABLE, &mm->flags)))
+ return VM_FAULT_SIGBUS;
+ return 0;
+}
+
extern unsigned long oom_badness(struct task_struct *p,
struct mem_cgroup *memcg, const nodemask_t *nodemask,
unsigned long totalpages);
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index d33e3280c8ad..ba2d470d2d0a 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -303,8 +303,8 @@ PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
* Only test-and-set exist for PG_writeback. The unconditional operators are
* risky: they bypass page accounting.
*/
-TESTPAGEFLAG(Writeback, writeback, PF_NO_COMPOUND)
- TESTSCFLAG(Writeback, writeback, PF_NO_COMPOUND)
+TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL)
+ TESTSCFLAG(Writeback, writeback, PF_NO_TAIL)
PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
/* PG_readahead is only used for reads; PG_reclaim is only for writes */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index baa9344dcd10..5bbd6780f205 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -163,8 +163,6 @@ void release_pages(struct page **pages, int nr, bool cold);
*/
static inline int page_cache_get_speculative(struct page *page)
{
- VM_BUG_ON(in_interrupt());
-
#ifdef CONFIG_TINY_RCU
# ifdef CONFIG_PREEMPT_COUNT
VM_BUG_ON(!in_atomic() && !irqs_disabled());
@@ -355,8 +353,16 @@ struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
unsigned int nr_entries, struct page **entries,
pgoff_t *indices);
-unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
- unsigned int nr_pages, struct page **pages);
+unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
+ pgoff_t end, unsigned int nr_pages,
+ struct page **pages);
+static inline unsigned find_get_pages(struct address_space *mapping,
+ pgoff_t *start, unsigned int nr_pages,
+ struct page **pages)
+{
+ return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages,
+ pages);
+}
unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
unsigned int nr_pages, struct page **pages);
unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
index b45d391b4540..4dcd5506f1ed 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/pagevec.h
@@ -27,8 +27,16 @@ unsigned pagevec_lookup_entries(struct pagevec *pvec,
pgoff_t start, unsigned nr_entries,
pgoff_t *indices);
void pagevec_remove_exceptionals(struct pagevec *pvec);
-unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
- pgoff_t start, unsigned nr_pages);
+unsigned pagevec_lookup_range(struct pagevec *pvec,
+ struct address_space *mapping,
+ pgoff_t *start, pgoff_t end);
+static inline unsigned pagevec_lookup(struct pagevec *pvec,
+ struct address_space *mapping,
+ pgoff_t *start)
+{
+ return pagevec_lookup_range(pvec, mapping, start, (pgoff_t)-1);
+}
+
unsigned pagevec_lookup_tag(struct pagevec *pvec,
struct address_space *mapping, pgoff_t *index, int tag,
unsigned nr_pages);
diff --git a/include/linux/path.h b/include/linux/path.h
index d1372186f431..cde895cc4af4 100644
--- a/include/linux/path.h
+++ b/include/linux/path.h
@@ -7,7 +7,7 @@ struct vfsmount;
struct path {
struct vfsmount *mnt;
struct dentry *dentry;
-};
+} __randomize_layout;
extern void path_get(const struct path *);
extern void path_put(const struct path *);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 4869e66dd659..da05e5db06ac 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -188,6 +188,8 @@ enum pci_dev_flags {
* the direct_complete optimization.
*/
PCI_DEV_FLAGS_NEEDS_RESUME = (__force pci_dev_flags_t) (1 << 11),
+ /* Don't use Relaxed Ordering for TLPs directed at this device */
+ PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 12),
};
enum pci_irq_reroute_variant {
@@ -729,6 +731,7 @@ struct pci_driver {
void (*shutdown) (struct pci_dev *dev);
int (*sriov_configure) (struct pci_dev *dev, int num_vfs); /* PF pdev */
const struct pci_error_handlers *err_handler;
+ const struct attribute_group **groups;
struct device_driver driver;
struct pci_dynids dynids;
};
@@ -1067,6 +1070,7 @@ void pcie_flr(struct pci_dev *dev);
int __pci_reset_function(struct pci_dev *dev);
int __pci_reset_function_locked(struct pci_dev *dev);
int pci_reset_function(struct pci_dev *dev);
+int pci_reset_function_locked(struct pci_dev *dev);
int pci_try_reset_function(struct pci_dev *dev);
int pci_probe_reset_slot(struct pci_slot *slot);
int pci_reset_slot(struct pci_slot *slot);
@@ -1125,6 +1129,7 @@ bool pci_check_pme_status(struct pci_dev *dev);
void pci_pme_wakeup_bus(struct pci_bus *bus);
void pci_d3cold_enable(struct pci_dev *dev);
void pci_d3cold_disable(struct pci_dev *dev);
+bool pcie_relaxed_ordering_enabled(struct pci_dev *dev);
/* PCI Virtual Channel */
int pci_save_vc_state(struct pci_dev *dev);
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 491b3f5a5f8a..6a5fb939d3e5 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -21,6 +21,25 @@
/* minimum unit size, also is the maximum supported allocation size */
#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10)
+/* minimum allocation size and shift in bytes */
+#define PCPU_MIN_ALLOC_SHIFT 2
+#define PCPU_MIN_ALLOC_SIZE (1 << PCPU_MIN_ALLOC_SHIFT)
+
+/* number of bits per page, used to trigger a scan if blocks are > PAGE_SIZE */
+#define PCPU_BITS_PER_PAGE (PAGE_SIZE >> PCPU_MIN_ALLOC_SHIFT)
+
+/*
+ * This determines the size of each metadata block. There are several subtle
+ * constraints around this constant. The reserved region must be a multiple of
+ * PCPU_BITMAP_BLOCK_SIZE. Additionally, PCPU_BITMAP_BLOCK_SIZE must be a
+ * multiple of PAGE_SIZE or PAGE_SIZE must be a multiple of
+ * PCPU_BITMAP_BLOCK_SIZE to align with the populated page map. The unit_size
+ * also has to be a multiple of PCPU_BITMAP_BLOCK_SIZE to ensure full blocks.
+ */
+#define PCPU_BITMAP_BLOCK_SIZE PAGE_SIZE
+#define PCPU_BITMAP_BLOCK_BITS (PCPU_BITMAP_BLOCK_SIZE >> \
+ PCPU_MIN_ALLOC_SHIFT)
+
/*
* Percpu allocator can serve percpu allocations before slab is
* initialized which allows slab to depend on the percpu allocator.
@@ -116,7 +135,6 @@ extern bool is_kernel_percpu_address(unsigned long addr);
#if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
extern void __init setup_per_cpu_areas(void);
#endif
-extern void __init percpu_init_late(void);
extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp);
extern void __percpu *__alloc_percpu(size_t size, size_t align);
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 1360dd6d5e61..af0f44effd44 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -24,10 +24,14 @@
* interrupt and passed the address of the low level handler,
* and can be used to implement any platform specific handling
* before or after calling it.
+ *
+ * @irq_flags: if non-zero, these flags will be passed to request_irq
+ * when requesting interrupts for this PMU device.
*/
struct arm_pmu_platdata {
irqreturn_t (*handle_irq)(int irq, void *dev,
irq_handler_t pmu_handler);
+ unsigned long irq_flags;
};
#ifdef CONFIG_ARM_PMU
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index a3b873fc59e4..8e22f24ded6a 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -139,17 +139,6 @@ struct hw_perf_event {
/* for tp_event->class */
struct list_head tp_list;
};
- struct { /* intel_cqm */
- int cqm_state;
- u32 cqm_rmid;
- int is_group_event;
- struct list_head cqm_events_entry;
- struct list_head cqm_groups_entry;
- struct list_head cqm_group_entry;
- };
- struct { /* itrace */
- int itrace_started;
- };
struct { /* amd_power */
u64 pwr_acc;
u64 ptsc;
@@ -310,8 +299,8 @@ struct pmu {
* Notification that the event was mapped or unmapped. Called
* in the context of the mapping task.
*/
- void (*event_mapped) (struct perf_event *event); /*optional*/
- void (*event_unmapped) (struct perf_event *event); /*optional*/
+ void (*event_mapped) (struct perf_event *event, struct mm_struct *mm); /* optional */
+ void (*event_unmapped) (struct perf_event *event, struct mm_struct *mm); /* optional */
/*
* Flags for ->add()/->del()/ ->start()/->stop(). There are
@@ -417,11 +406,6 @@ struct pmu {
/*
- * Return the count value for a counter.
- */
- u64 (*count) (struct perf_event *event); /*optional*/
-
- /*
* Set up pmu-private data structures for an AUX area
*/
void *(*setup_aux) (int cpu, void **pages,
@@ -541,6 +525,7 @@ struct swevent_hlist {
#define PERF_ATTACH_GROUP 0x02
#define PERF_ATTACH_TASK 0x04
#define PERF_ATTACH_TASK_DATA 0x08
+#define PERF_ATTACH_ITRACE 0x10
struct perf_cgroup;
struct ring_buffer;
@@ -864,6 +849,7 @@ extern int perf_aux_output_skip(struct perf_output_handle *handle,
unsigned long size);
extern void *perf_get_aux(struct perf_output_handle *handle);
extern void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags);
+extern void perf_event_itrace_started(struct perf_event *event);
extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
extern void perf_pmu_unregister(struct pmu *pmu);
@@ -944,6 +930,8 @@ struct perf_sample_data {
struct perf_regs regs_intr;
u64 stack_user_size;
+
+ u64 phys_addr;
} ____cacheline_aligned;
/* default value for data source */
@@ -1111,11 +1099,6 @@ static inline void perf_event_task_sched_out(struct task_struct *prev,
__perf_event_task_sched_out(prev, next);
}
-static inline u64 __perf_event_count(struct perf_event *event)
-{
- return local64_read(&event->count) + atomic64_read(&event->child_count);
-}
-
extern void perf_event_mmap(struct vm_area_struct *vma);
extern struct perf_guest_info_callbacks *perf_guest_cbs;
extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
@@ -1201,7 +1184,7 @@ extern void perf_event_init(void);
extern void perf_tp_event(u16 event_type, u64 count, void *record,
int entry_size, struct pt_regs *regs,
struct hlist_head *head, int rctx,
- struct task_struct *task);
+ struct task_struct *task, struct perf_event *event);
extern void perf_bp_event(struct perf_event *event, void *data);
#ifndef perf_misc_flags
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 2a9567bb8186..d78cd01ea513 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -182,6 +182,7 @@ static inline const char *phy_modes(phy_interface_t interface)
#define MII_ADDR_C45 (1<<30)
struct device;
+struct phylink;
struct sk_buff;
/*
@@ -469,11 +470,13 @@ struct phy_device {
struct mutex lock;
+ struct phylink *phylink;
struct net_device *attached_dev;
u8 mdix;
u8 mdix_ctrl;
+ void (*phy_link_change)(struct phy_device *, bool up, bool do_carrier);
void (*adjust_link)(struct net_device *dev);
};
#define to_phy_device(d) container_of(to_mdio_device(d), \
@@ -667,6 +670,24 @@ struct phy_fixup {
int (*run)(struct phy_device *phydev);
};
+const char *phy_speed_to_str(int speed);
+const char *phy_duplex_to_str(unsigned int duplex);
+
+/* A structure for mapping a particular speed and duplex
+ * combination to a particular SUPPORTED and ADVERTISED value
+ */
+struct phy_setting {
+ u32 speed;
+ u8 duplex;
+ u8 bit;
+};
+
+const struct phy_setting *
+phy_lookup_setting(int speed, int duplex, const unsigned long *mask,
+ size_t maxbit, bool exact);
+size_t phy_speeds(unsigned int *speeds, size_t size,
+ unsigned long *mask, size_t maxbit);
+
/**
* phy_read_mmd - Convenience function for reading a register
* from an MMD on a given PHY.
@@ -830,7 +851,7 @@ static inline int phy_read_status(struct phy_device *phydev)
dev_err(&_phydev->mdio.dev, format, ##args)
#define phydev_dbg(_phydev, format, args...) \
- dev_dbg(&_phydev->mdio.dev, format, ##args);
+ dev_dbg(&_phydev->mdio.dev, format, ##args)
static inline const char *phydev_name(const struct phy_device *phydev)
{
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index 78bb0d7f6b11..e694d4008c4a 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -27,6 +27,8 @@ enum phy_mode {
PHY_MODE_USB_HOST,
PHY_MODE_USB_DEVICE,
PHY_MODE_USB_OTG,
+ PHY_MODE_SGMII,
+ PHY_MODE_10GKR,
};
/**
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
new file mode 100644
index 000000000000..af67edd4ae38
--- /dev/null
+++ b/include/linux/phylink.h
@@ -0,0 +1,148 @@
+#ifndef NETDEV_PCS_H
+#define NETDEV_PCS_H
+
+#include <linux/phy.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+struct device_node;
+struct ethtool_cmd;
+struct net_device;
+
+enum {
+ MLO_PAUSE_NONE,
+ MLO_PAUSE_ASYM = BIT(0),
+ MLO_PAUSE_SYM = BIT(1),
+ MLO_PAUSE_RX = BIT(2),
+ MLO_PAUSE_TX = BIT(3),
+ MLO_PAUSE_TXRX_MASK = MLO_PAUSE_TX | MLO_PAUSE_RX,
+ MLO_PAUSE_AN = BIT(4),
+
+ MLO_AN_PHY = 0, /* Conventional PHY */
+ MLO_AN_FIXED, /* Fixed-link mode */
+ MLO_AN_SGMII, /* Cisco SGMII protocol */
+ MLO_AN_8023Z, /* 1000base-X protocol */
+};
+
+static inline bool phylink_autoneg_inband(unsigned int mode)
+{
+ return mode == MLO_AN_SGMII || mode == MLO_AN_8023Z;
+}
+
+struct phylink_link_state {
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising);
+ phy_interface_t interface; /* PHY_INTERFACE_xxx */
+ int speed;
+ int duplex;
+ int pause;
+ unsigned int link:1;
+ unsigned int an_enabled:1;
+ unsigned int an_complete:1;
+};
+
+struct phylink_mac_ops {
+ /**
+ * validate: validate and update the link configuration
+ * @ndev: net_device structure associated with MAC
+ * @config: configuration to validate
+ *
+ * Update the %config->supported and %config->advertised masks
+ * clearing bits that can not be supported.
+ *
+ * Note: the PHY may be able to transform from one connection
+ * technology to another, so, eg, don't clear 1000BaseX just
+ * because the MAC is unable to support it. This is more about
+ * clearing unsupported speeds and duplex settings.
+ *
+ * If the %config->interface mode is %PHY_INTERFACE_MODE_1000BASEX
+ * or %PHY_INTERFACE_MODE_2500BASEX, select the appropriate mode
+ * based on %config->advertised and/or %config->speed.
+ */
+ void (*validate)(struct net_device *ndev, unsigned long *supported,
+ struct phylink_link_state *state);
+
+ /* Read the current link state from the hardware */
+ int (*mac_link_state)(struct net_device *, struct phylink_link_state *);
+
+ /* Configure the MAC */
+ /**
+ * mac_config: configure the MAC for the selected mode and state
+ * @ndev: net_device structure for the MAC
+ * @mode: one of MLO_AN_FIXED, MLO_AN_PHY, MLO_AN_8023Z, MLO_AN_SGMII
+ * @state: state structure
+ *
+ * The action performed depends on the currently selected mode:
+ *
+ * %MLO_AN_FIXED, %MLO_AN_PHY:
+ * set the specified speed, duplex, pause mode, and phy interface
+ * mode in the provided @state.
+ * %MLO_AN_8023Z:
+ * place the link in 1000base-X mode, advertising the parameters
+ * given in advertising in @state.
+ * %MLO_AN_SGMII:
+ * place the link in Cisco SGMII mode - there is no advertisment
+ * to make as the PHY communicates the speed and duplex to the
+ * MAC over the in-band control word. Configuration of the pause
+ * mode is as per MLO_AN_PHY since this is not included.
+ */
+ void (*mac_config)(struct net_device *ndev, unsigned int mode,
+ const struct phylink_link_state *state);
+
+ /**
+ * mac_an_restart: restart 802.3z BaseX autonegotiation
+ * @ndev: net_device structure for the MAC
+ */
+ void (*mac_an_restart)(struct net_device *ndev);
+
+ void (*mac_link_down)(struct net_device *, unsigned int mode);
+ void (*mac_link_up)(struct net_device *, unsigned int mode,
+ struct phy_device *);
+};
+
+struct phylink *phylink_create(struct net_device *, struct device_node *,
+ phy_interface_t iface, const struct phylink_mac_ops *ops);
+void phylink_destroy(struct phylink *);
+
+int phylink_connect_phy(struct phylink *, struct phy_device *);
+int phylink_of_phy_connect(struct phylink *, struct device_node *);
+void phylink_disconnect_phy(struct phylink *);
+
+void phylink_mac_change(struct phylink *, bool up);
+
+void phylink_start(struct phylink *);
+void phylink_stop(struct phylink *);
+
+void phylink_ethtool_get_wol(struct phylink *, struct ethtool_wolinfo *);
+int phylink_ethtool_set_wol(struct phylink *, struct ethtool_wolinfo *);
+
+int phylink_ethtool_ksettings_get(struct phylink *,
+ struct ethtool_link_ksettings *);
+int phylink_ethtool_ksettings_set(struct phylink *,
+ const struct ethtool_link_ksettings *);
+int phylink_ethtool_nway_reset(struct phylink *);
+void phylink_ethtool_get_pauseparam(struct phylink *,
+ struct ethtool_pauseparam *);
+int phylink_ethtool_set_pauseparam(struct phylink *,
+ struct ethtool_pauseparam *);
+int phylink_ethtool_get_module_info(struct phylink *, struct ethtool_modinfo *);
+int phylink_ethtool_get_module_eeprom(struct phylink *,
+ struct ethtool_eeprom *, u8 *);
+int phylink_init_eee(struct phylink *, bool);
+int phylink_get_eee_err(struct phylink *);
+int phylink_ethtool_get_eee(struct phylink *, struct ethtool_eee *);
+int phylink_ethtool_set_eee(struct phylink *, struct ethtool_eee *);
+int phylink_mii_ioctl(struct phylink *, struct ifreq *, int);
+
+#define phylink_zero(bm) \
+ bitmap_zero(bm, __ETHTOOL_LINK_MODE_MASK_NBITS)
+#define __phylink_do_bit(op, bm, mode) \
+ op(ETHTOOL_LINK_MODE_ ## mode ## _BIT, bm)
+
+#define phylink_set(bm, mode) __phylink_do_bit(__set_bit, bm, mode)
+#define phylink_clear(bm, mode) __phylink_do_bit(__clear_bit, bm, mode)
+#define phylink_test(bm, mode) __phylink_do_bit(test_bit, bm, mode)
+
+void phylink_set_port_modes(unsigned long *bits);
+
+#endif
diff --git a/include/linux/pid.h b/include/linux/pid.h
index 4d179316e431..719582744a2e 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -8,7 +8,9 @@ enum pid_type
PIDTYPE_PID,
PIDTYPE_PGID,
PIDTYPE_SID,
- PIDTYPE_MAX
+ PIDTYPE_MAX,
+ /* only valid to __task_pid_nr_ns() */
+ __PIDTYPE_TGID
};
/*
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index c2a989dee876..b09136f88cf4 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -52,7 +52,7 @@ struct pid_namespace {
int hide_pid;
int reboot; /* group exit code if this pidns was rebooted */
struct ns_common ns;
-};
+} __randomize_layout;
extern struct pid_namespace init_pid_ns;
diff --git a/include/linux/pinctrl/machine.h b/include/linux/pinctrl/machine.h
index e5b1716f98cc..7fa5d87190c2 100644
--- a/include/linux/pinctrl/machine.h
+++ b/include/linux/pinctrl/machine.h
@@ -152,12 +152,12 @@ struct pinctrl_map {
#ifdef CONFIG_PINCTRL
-extern int pinctrl_register_mappings(struct pinctrl_map const *map,
+extern int pinctrl_register_mappings(const struct pinctrl_map *map,
unsigned num_maps);
extern void pinctrl_provide_dummies(void);
#else
-static inline int pinctrl_register_mappings(struct pinctrl_map const *map,
+static inline int pinctrl_register_mappings(const struct pinctrl_map *map,
unsigned num_maps)
{
return 0;
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index 231d3075815a..5d8bc7f21c2a 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -81,11 +81,12 @@
* it.
* @PIN_CONFIG_OUTPUT: this will configure the pin as an output and drive a
* value on the line. Use argument 1 to indicate high level, argument 0 to
- * indicate low level. (Please see Documentation/pinctrl.txt, section
- * "GPIO mode pitfalls" for a discussion around this parameter.)
+ * indicate low level. (Please see Documentation/driver-api/pinctl.rst,
+ * section "GPIO mode pitfalls" for a discussion around this parameter.)
* @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power
* supplies, the argument to this parameter (on a custom format) tells
* the driver which alternative power source to use.
+ * @PIN_CONFIG_SLEEP_HARDWARE_STATE: indicate this is sleep related state.
* @PIN_CONFIG_SLEW_RATE: if the pin can select slew rate, the argument to
* this parameter (on a custom format) tells the driver which alternative
* slew rate to use.
@@ -114,6 +115,7 @@ enum pin_config_param {
PIN_CONFIG_OUTPUT_ENABLE,
PIN_CONFIG_OUTPUT,
PIN_CONFIG_POWER_SOURCE,
+ PIN_CONFIG_SLEEP_HARDWARE_STATE,
PIN_CONFIG_SLEW_RATE,
PIN_CONFIG_END = 0x7F,
PIN_CONFIG_MAX = 0xFF,
diff --git a/include/linux/platform_data/hsmmc-omap.h b/include/linux/platform_data/hsmmc-omap.h
index 8e981be2e2c2..0ff1e0dba720 100644
--- a/include/linux/platform_data/hsmmc-omap.h
+++ b/include/linux/platform_data/hsmmc-omap.h
@@ -55,9 +55,6 @@ struct omap_hsmmc_platform_data {
u32 caps; /* Used for the MMC driver on 2430 and later */
u32 pm_caps; /* PM capabilities of the mmc */
- /* use the internal clock */
- unsigned internal_clock:1;
-
/* nonremovable e.g. eMMC */
unsigned nonremovable:1;
@@ -73,13 +70,6 @@ struct omap_hsmmc_platform_data {
int gpio_cd; /* gpio (card detect) */
int gpio_cod; /* gpio (cover detect) */
int gpio_wp; /* gpio (write protect) */
-
- int (*set_power)(struct device *dev, int power_on, int vdd);
- void (*remux)(struct device *dev, int power_on);
- /* Call back before enabling / disabling regulators */
- void (*before_set_reg)(struct device *dev, int power_on, int vdd);
- /* Call back after enabling / disabling regulators */
- void (*after_set_reg)(struct device *dev, int power_on, int vdd);
/* if we have special card, init it using this callback */
void (*init_card)(struct mmc_card *card);
diff --git a/include/linux/platform_data/mdio-bcm-unimac.h b/include/linux/platform_data/mdio-bcm-unimac.h
new file mode 100644
index 000000000000..8a5f9f0b2c52
--- /dev/null
+++ b/include/linux/platform_data/mdio-bcm-unimac.h
@@ -0,0 +1,13 @@
+#ifndef __MDIO_BCM_UNIMAC_PDATA_H
+#define __MDIO_BCM_UNIMAC_PDATA_H
+
+struct unimac_mdio_pdata {
+ u32 phy_mask;
+ int (*wait_func)(void *data);
+ void *wait_func_data;
+ const char *bus_name;
+};
+
+#define UNIMAC_MDIO_DRV_NAME "unimac-mdio"
+
+#endif /* __MDIO_BCM_UNIMAC_PDATA_H */
diff --git a/include/linux/platform_data/omap_drm.h b/include/linux/platform_data/omap_drm.h
deleted file mode 100644
index f4e4a237ebd2..000000000000
--- a/include/linux/platform_data/omap_drm.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * DRM/KMS platform data for TI OMAP platforms
- *
- * Copyright (C) 2012 Texas Instruments
- * Author: Rob Clark <rob.clark@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __PLATFORM_DATA_OMAP_DRM_H__
-#define __PLATFORM_DATA_OMAP_DRM_H__
-
-/*
- * Optional platform data to configure the default configuration of which
- * pipes/overlays/CRTCs are used.. if this is not provided, then instead the
- * first CONFIG_DRM_OMAP_NUM_CRTCS are used, and they are each connected to
- * one manager, with priority given to managers that are connected to
- * detected devices. Remaining overlays are used as video planes. This
- * should be a good default behavior for most cases, but yet there still
- * might be times when you wish to do something different.
- */
-struct omap_kms_platform_data {
- /* overlays to use as CRTCs: */
- int ovl_cnt;
- const int *ovl_ids;
-
- /* overlays to use as video planes: */
- int pln_cnt;
- const int *pln_ids;
-
- int mgr_cnt;
- const int *mgr_ids;
-
- int dev_cnt;
- const char **dev_names;
-};
-
-struct omap_drm_platform_data {
- uint32_t omaprev;
- struct omap_kms_platform_data *kms_pdata;
-};
-
-#endif /* __PLATFORM_DATA_OMAP_DRM_H__ */
diff --git a/include/linux/platform_data/st_sensors_pdata.h b/include/linux/platform_data/st_sensors_pdata.h
index 79b0e4cdb814..f8274b0c6888 100644
--- a/include/linux/platform_data/st_sensors_pdata.h
+++ b/include/linux/platform_data/st_sensors_pdata.h
@@ -17,10 +17,12 @@
* Available only for accelerometer and pressure sensors.
* Accelerometer DRDY on LSM330 available only on pin 1 (see datasheet).
* @open_drain: set the interrupt line to be open drain if possible.
+ * @spi_3wire: enable spi-3wire mode.
*/
struct st_sensors_platform_data {
u8 drdy_int_pin;
bool open_drain;
+ bool spi_3wire;
};
#endif /* ST_SENSORS_PDATA_H */
diff --git a/include/linux/platform_data/x86/apple.h b/include/linux/platform_data/x86/apple.h
new file mode 100644
index 000000000000..079e816c3c21
--- /dev/null
+++ b/include/linux/platform_data/x86/apple.h
@@ -0,0 +1,13 @@
+#ifndef PLATFORM_DATA_X86_APPLE_H
+#define PLATFORM_DATA_X86_APPLE_H
+
+#ifdef CONFIG_X86
+/**
+ * x86_apple_machine - whether the machine is an x86 Apple Macintosh
+ */
+extern bool x86_apple_machine;
+#else
+#define x86_apple_machine false
+#endif
+
+#endif
diff --git a/include/linux/pm.h b/include/linux/pm.h
index b8b4df09fd8f..47ded8aa8a5d 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -689,6 +689,8 @@ struct dev_pm_domain {
extern void device_pm_lock(void);
extern void dpm_resume_start(pm_message_t state);
extern void dpm_resume_end(pm_message_t state);
+extern void dpm_noirq_resume_devices(pm_message_t state);
+extern void dpm_noirq_end(void);
extern void dpm_resume_noirq(pm_message_t state);
extern void dpm_resume_early(pm_message_t state);
extern void dpm_resume(pm_message_t state);
@@ -697,6 +699,8 @@ extern void dpm_complete(pm_message_t state);
extern void device_pm_unlock(void);
extern int dpm_suspend_end(pm_message_t state);
extern int dpm_suspend_start(pm_message_t state);
+extern void dpm_noirq_begin(void);
+extern int dpm_noirq_suspend_devices(pm_message_t state);
extern int dpm_suspend_noirq(pm_message_t state);
extern int dpm_suspend_late(pm_message_t state);
extern int dpm_suspend(pm_message_t state);
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 41004d97cefa..84f423d5633e 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -43,6 +43,7 @@ struct genpd_power_state {
s64 power_on_latency_ns;
s64 residency_ns;
struct fwnode_handle *fwnode;
+ ktime_t idle_time;
};
struct genpd_lock_ops;
@@ -78,6 +79,8 @@ struct generic_pm_domain {
unsigned int state_count; /* number of states */
unsigned int state_idx; /* state that genpd will go to when off */
void *free; /* Free the state that was allocated for default */
+ ktime_t on_time;
+ ktime_t accounting_time;
const struct genpd_lock_ops *lock_ops;
union {
struct mutex mlock;
diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
index 58ab28d81fc2..06844b54dfc1 100644
--- a/include/linux/proc_ns.h
+++ b/include/linux/proc_ns.h
@@ -21,7 +21,7 @@ struct proc_ns_operations {
int (*install)(struct nsproxy *nsproxy, struct ns_common *ns);
struct user_namespace *(*owner)(struct ns_common *ns);
struct ns_common *(*get_parent)(struct ns_common *ns);
-};
+} __randomize_layout;
extern const struct proc_ns_operations netns_operations;
extern const struct proc_ns_operations utsns_operations;
diff --git a/include/linux/property.h b/include/linux/property.h
index 7e77039e6b81..6bebee13c5e0 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -51,46 +51,52 @@ int device_property_read_string(struct device *dev, const char *propname,
int device_property_match_string(struct device *dev,
const char *propname, const char *string);
-bool fwnode_device_is_available(struct fwnode_handle *fwnode);
-bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname);
-int fwnode_property_read_u8_array(struct fwnode_handle *fwnode,
+bool fwnode_device_is_available(const struct fwnode_handle *fwnode);
+bool fwnode_property_present(const struct fwnode_handle *fwnode,
+ const char *propname);
+int fwnode_property_read_u8_array(const struct fwnode_handle *fwnode,
const char *propname, u8 *val,
size_t nval);
-int fwnode_property_read_u16_array(struct fwnode_handle *fwnode,
+int fwnode_property_read_u16_array(const struct fwnode_handle *fwnode,
const char *propname, u16 *val,
size_t nval);
-int fwnode_property_read_u32_array(struct fwnode_handle *fwnode,
+int fwnode_property_read_u32_array(const struct fwnode_handle *fwnode,
const char *propname, u32 *val,
size_t nval);
-int fwnode_property_read_u64_array(struct fwnode_handle *fwnode,
+int fwnode_property_read_u64_array(const struct fwnode_handle *fwnode,
const char *propname, u64 *val,
size_t nval);
-int fwnode_property_read_string_array(struct fwnode_handle *fwnode,
+int fwnode_property_read_string_array(const struct fwnode_handle *fwnode,
const char *propname, const char **val,
size_t nval);
-int fwnode_property_read_string(struct fwnode_handle *fwnode,
+int fwnode_property_read_string(const struct fwnode_handle *fwnode,
const char *propname, const char **val);
-int fwnode_property_match_string(struct fwnode_handle *fwnode,
+int fwnode_property_match_string(const struct fwnode_handle *fwnode,
const char *propname, const char *string);
+int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode,
+ const char *prop, const char *nargs_prop,
+ unsigned int nargs, unsigned int index,
+ struct fwnode_reference_args *args);
-struct fwnode_handle *fwnode_get_parent(struct fwnode_handle *fwnode);
-struct fwnode_handle *fwnode_get_next_parent(struct fwnode_handle *fwnode);
-struct fwnode_handle *fwnode_get_next_child_node(struct fwnode_handle *fwnode,
- struct fwnode_handle *child);
+struct fwnode_handle *fwnode_get_parent(const struct fwnode_handle *fwnode);
+struct fwnode_handle *fwnode_get_next_parent(
+ struct fwnode_handle *fwnode);
+struct fwnode_handle *fwnode_get_next_child_node(
+ const struct fwnode_handle *fwnode, struct fwnode_handle *child);
#define fwnode_for_each_child_node(fwnode, child) \
for (child = fwnode_get_next_child_node(fwnode, NULL); child; \
child = fwnode_get_next_child_node(fwnode, child))
-struct fwnode_handle *device_get_next_child_node(struct device *dev,
- struct fwnode_handle *child);
+struct fwnode_handle *device_get_next_child_node(
+ struct device *dev, struct fwnode_handle *child);
#define device_for_each_child_node(dev, child) \
for (child = device_get_next_child_node(dev, NULL); child; \
child = device_get_next_child_node(dev, child))
-struct fwnode_handle *fwnode_get_named_child_node(struct fwnode_handle *fwnode,
- const char *childname);
+struct fwnode_handle *fwnode_get_named_child_node(
+ const struct fwnode_handle *fwnode, const char *childname);
struct fwnode_handle *device_get_named_child_node(struct device *dev,
const char *childname);
@@ -129,31 +135,31 @@ static inline int device_property_read_u64(struct device *dev,
return device_property_read_u64_array(dev, propname, val, 1);
}
-static inline bool fwnode_property_read_bool(struct fwnode_handle *fwnode,
+static inline bool fwnode_property_read_bool(const struct fwnode_handle *fwnode,
const char *propname)
{
return fwnode_property_present(fwnode, propname);
}
-static inline int fwnode_property_read_u8(struct fwnode_handle *fwnode,
+static inline int fwnode_property_read_u8(const struct fwnode_handle *fwnode,
const char *propname, u8 *val)
{
return fwnode_property_read_u8_array(fwnode, propname, val, 1);
}
-static inline int fwnode_property_read_u16(struct fwnode_handle *fwnode,
+static inline int fwnode_property_read_u16(const struct fwnode_handle *fwnode,
const char *propname, u16 *val)
{
return fwnode_property_read_u16_array(fwnode, propname, val, 1);
}
-static inline int fwnode_property_read_u32(struct fwnode_handle *fwnode,
+static inline int fwnode_property_read_u32(const struct fwnode_handle *fwnode,
const char *propname, u32 *val)
{
return fwnode_property_read_u32_array(fwnode, propname, val, 1);
}
-static inline int fwnode_property_read_u64(struct fwnode_handle *fwnode,
+static inline int fwnode_property_read_u64(const struct fwnode_handle *fwnode,
const char *propname, u64 *val)
{
return fwnode_property_read_u64_array(fwnode, propname, val, 1);
@@ -274,19 +280,20 @@ int device_get_phy_mode(struct device *dev);
void *device_get_mac_address(struct device *dev, char *addr, int alen);
struct fwnode_handle *fwnode_graph_get_next_endpoint(
- struct fwnode_handle *fwnode, struct fwnode_handle *prev);
+ const struct fwnode_handle *fwnode, struct fwnode_handle *prev);
struct fwnode_handle *
-fwnode_graph_get_port_parent(struct fwnode_handle *fwnode);
+fwnode_graph_get_port_parent(const struct fwnode_handle *fwnode);
struct fwnode_handle *fwnode_graph_get_remote_port_parent(
- struct fwnode_handle *fwnode);
+ const struct fwnode_handle *fwnode);
struct fwnode_handle *fwnode_graph_get_remote_port(
- struct fwnode_handle *fwnode);
+ const struct fwnode_handle *fwnode);
struct fwnode_handle *fwnode_graph_get_remote_endpoint(
- struct fwnode_handle *fwnode);
-struct fwnode_handle *fwnode_graph_get_remote_node(struct fwnode_handle *fwnode,
- u32 port, u32 endpoint);
+ const struct fwnode_handle *fwnode);
+struct fwnode_handle *
+fwnode_graph_get_remote_node(const struct fwnode_handle *fwnode, u32 port,
+ u32 endpoint);
-int fwnode_graph_parse_endpoint(struct fwnode_handle *fwnode,
+int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_endpoint *endpoint);
#endif /* _LINUX_PROPERTY_H_ */
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index a026bfd089db..51349d124ee5 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -99,6 +99,11 @@ struct system_device_crosststamp;
* parameter func: the desired function to use.
* parameter chan: the function channel index to use.
*
+ * @do_work: Request driver to perform auxiliary (periodic) operations
+ * Driver should return delay of the next auxiliary work scheduling
+ * time (>=0) or negative value in case further scheduling
+ * is not required.
+ *
* Drivers should embed their ptp_clock_info within a private
* structure, obtaining a reference to it using container_of().
*
@@ -126,6 +131,7 @@ struct ptp_clock_info {
struct ptp_clock_request *request, int on);
int (*verify)(struct ptp_clock_info *ptp, unsigned int pin,
enum ptp_pin_function func, unsigned int chan);
+ long (*do_aux_work)(struct ptp_clock_info *ptp);
};
struct ptp_clock;
@@ -211,6 +217,16 @@ extern int ptp_clock_index(struct ptp_clock *ptp);
int ptp_find_pin(struct ptp_clock *ptp,
enum ptp_pin_function func, unsigned int chan);
+/**
+ * ptp_schedule_worker() - schedule ptp auxiliary work
+ *
+ * @ptp: The clock obtained from ptp_clock_register().
+ * @delay: number of jiffies to wait before queuing
+ * See kthread_queue_delayed_work() for more info.
+ */
+
+int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay);
+
#else
static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
struct device *parent)
@@ -225,6 +241,10 @@ static inline int ptp_clock_index(struct ptp_clock *ptp)
static inline int ptp_find_pin(struct ptp_clock *ptp,
enum ptp_pin_function func, unsigned int chan)
{ return -1; }
+static inline int ptp_schedule_worker(struct ptp_clock *ptp,
+ unsigned long delay)
+{ return -EOPNOTSUPP; }
+
#endif
#endif
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index d8c97ec8a8e6..37b4bb2545b3 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -436,9 +436,9 @@ static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r,
__PTR_RING_PEEK_CALL_v; \
})
-static inline void **__ptr_ring_init_queue_alloc(int size, gfp_t gfp)
+static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp)
{
- return kzalloc(ALIGN(size * sizeof(void *), SMP_CACHE_BYTES), gfp);
+ return kcalloc(size, sizeof(void *), gfp);
}
static inline void __ptr_ring_set_size(struct ptr_ring *r, int size)
@@ -582,7 +582,8 @@ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp,
* In particular if you consume ring in interrupt or BH context, you must
* disable interrupts/BH when doing so.
*/
-static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings,
+static inline int ptr_ring_resize_multiple(struct ptr_ring **rings,
+ unsigned int nrings,
int size,
gfp_t gfp, void (*destroy)(void *))
{
@@ -590,7 +591,7 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings,
void ***queues;
int i;
- queues = kmalloc(nrings * sizeof *queues, gfp);
+ queues = kmalloc_array(nrings, sizeof(*queues), gfp);
if (!queues)
goto noqueues;
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index 0eef0a2b1901..d60de4a39810 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -323,6 +323,7 @@ struct qed_eth_ops {
int (*configure_arfs_searcher)(struct qed_dev *cdev,
bool en_searcher);
+ int (*get_coalesce)(struct qed_dev *cdev, u16 *coal, void *handle);
};
const struct qed_eth_ops *qed_get_eth_ops(void);
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index ef39c7f40ae6..cc646ca97974 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -161,6 +161,18 @@ enum qed_nvm_images {
QED_NVM_IMAGE_FCOE_CFG,
};
+struct qed_link_eee_params {
+ u32 tx_lpi_timer;
+#define QED_EEE_1G_ADV BIT(0)
+#define QED_EEE_10G_ADV BIT(1)
+
+ /* Capabilities are represented using QED_EEE_*_ADV values */
+ u8 adv_caps;
+ u8 lp_adv_caps;
+ bool enable;
+ bool tx_lpi_enable;
+};
+
enum qed_led_mode {
QED_LED_MODE_OFF,
QED_LED_MODE_ON,
@@ -172,8 +184,9 @@ enum qed_led_mode {
#define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
-#define QED_COALESCE_MAX 0xFF
+#define QED_COALESCE_MAX 0x1FF
#define QED_DEFAULT_RX_USECS 12
+#define QED_DEFAULT_TX_USECS 48
/* forward */
struct qed_dev;
@@ -408,6 +421,7 @@ struct qed_link_params {
#define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED BIT(2)
#define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3)
#define QED_LINK_OVERRIDE_LOOPBACK_MODE BIT(4)
+#define QED_LINK_OVERRIDE_EEE_CONFIG BIT(5)
u32 override_flags;
bool autoneg;
u32 adv_speeds;
@@ -422,6 +436,7 @@ struct qed_link_params {
#define QED_LINK_LOOPBACK_EXT BIT(3)
#define QED_LINK_LOOPBACK_MAC BIT(4)
u32 loopback_mode;
+ struct qed_link_eee_params eee;
};
struct qed_link_output {
@@ -437,6 +452,12 @@ struct qed_link_output {
u8 port; /* In PORT defs */
bool autoneg;
u32 pause_config;
+
+ /* EEE - capability & param */
+ bool eee_supported;
+ bool eee_active;
+ u8 sup_caps;
+ struct qed_link_eee_params eee;
};
struct qed_probe_params {
@@ -654,16 +675,6 @@ struct qed_common_ops {
enum qed_nvm_images type, u8 *buf, u16 len);
/**
- * @brief get_coalesce - Get coalesce parameters in usec
- *
- * @param cdev
- * @param rx_coal - Rx coalesce value in usec
- * @param tx_coal - Tx coalesce value in usec
- *
- */
- void (*get_coalesce)(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal);
-
-/**
* @brief set_coalesce - Configure Rx coalesce value in usec
*
* @param cdev
@@ -674,8 +685,8 @@ struct qed_common_ops {
*
* @return 0 on success, error otherwise.
*/
- int (*set_coalesce)(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
- u16 qid, u16 sb_id);
+ int (*set_coalesce)(struct qed_dev *cdev,
+ u16 rx_coal, u16 tx_coal, void *handle);
/**
* @brief set_led - Configure LED mode
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 3e5735064b71..567ebb5eaab0 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -357,8 +357,25 @@ int radix_tree_split(struct radix_tree_root *, unsigned long index,
unsigned new_order);
int radix_tree_join(struct radix_tree_root *, unsigned long index,
unsigned new_order, void *);
-void __rcu **idr_get_free(struct radix_tree_root *, struct radix_tree_iter *,
- gfp_t, int end);
+
+void __rcu **idr_get_free_cmn(struct radix_tree_root *root,
+ struct radix_tree_iter *iter, gfp_t gfp,
+ unsigned long max);
+static inline void __rcu **idr_get_free(struct radix_tree_root *root,
+ struct radix_tree_iter *iter,
+ gfp_t gfp,
+ int end)
+{
+ return idr_get_free_cmn(root, iter, gfp, end > 0 ? end - 1 : INT_MAX);
+}
+
+static inline void __rcu **idr_get_free_ext(struct radix_tree_root *root,
+ struct radix_tree_iter *iter,
+ gfp_t gfp,
+ unsigned long end)
+{
+ return idr_get_free_cmn(root, iter, gfp, end - 1);
+}
enum {
RADIX_TREE_ITER_TAG_MASK = 0x0f, /* tag index in lower nybble */
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index 30f945329818..583cdd3d49ca 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -121,6 +121,7 @@ extern const struct raid6_recov_calls raid6_recov_ssse3;
extern const struct raid6_recov_calls raid6_recov_avx2;
extern const struct raid6_recov_calls raid6_recov_avx512;
extern const struct raid6_recov_calls raid6_recov_s390xc;
+extern const struct raid6_recov_calls raid6_recov_neon;
extern const struct raid6_calls raid6_neonx1;
extern const struct raid6_calls raid6_neonx2;
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index f816fc72b51e..96f1baf62ab8 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -58,8 +58,6 @@ void call_rcu(struct rcu_head *head, rcu_callback_t func);
void call_rcu_bh(struct rcu_head *head, rcu_callback_t func);
void call_rcu_sched(struct rcu_head *head, rcu_callback_t func);
void synchronize_sched(void);
-void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
-void synchronize_rcu_tasks(void);
void rcu_barrier_tasks(void);
#ifdef CONFIG_PREEMPT_RCU
@@ -105,11 +103,13 @@ static inline int rcu_preempt_depth(void)
/* Internal to kernel */
void rcu_init(void);
+extern int rcu_scheduler_active __read_mostly;
void rcu_sched_qs(void);
void rcu_bh_qs(void);
void rcu_check_callbacks(int user);
void rcu_report_dead(unsigned int cpu);
void rcu_cpu_starting(unsigned int cpu);
+void rcutree_migrate_callbacks(int cpu);
#ifdef CONFIG_RCU_STALL_COMMON
void rcu_sysrq_start(void);
@@ -164,8 +164,6 @@ static inline void rcu_init_nohz(void) { }
* macro rather than an inline function to avoid #include hell.
*/
#ifdef CONFIG_TASKS_RCU
-#define TASKS_RCU(x) x
-extern struct srcu_struct tasks_rcu_exit_srcu;
#define rcu_note_voluntary_context_switch_lite(t) \
do { \
if (READ_ONCE((t)->rcu_tasks_holdout)) \
@@ -176,10 +174,17 @@ extern struct srcu_struct tasks_rcu_exit_srcu;
rcu_all_qs(); \
rcu_note_voluntary_context_switch_lite(t); \
} while (0)
+void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
+void synchronize_rcu_tasks(void);
+void exit_tasks_rcu_start(void);
+void exit_tasks_rcu_finish(void);
#else /* #ifdef CONFIG_TASKS_RCU */
-#define TASKS_RCU(x) do { } while (0)
#define rcu_note_voluntary_context_switch_lite(t) do { } while (0)
#define rcu_note_voluntary_context_switch(t) rcu_all_qs()
+#define call_rcu_tasks call_rcu_sched
+#define synchronize_rcu_tasks synchronize_sched
+static inline void exit_tasks_rcu_start(void) { }
+static inline void exit_tasks_rcu_finish(void) { }
#endif /* #else #ifdef CONFIG_TASKS_RCU */
/**
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 5becbbccb998..b3dbf9502fd0 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -116,13 +116,11 @@ static inline void rcu_irq_exit_irqson(void) { }
static inline void rcu_irq_enter_irqson(void) { }
static inline void rcu_irq_exit(void) { }
static inline void exit_rcu(void) { }
-
-#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU)
-extern int rcu_scheduler_active __read_mostly;
+#ifdef CONFIG_SRCU
void rcu_scheduler_starting(void);
-#else /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) */
+#else /* #ifndef CONFIG_SRCU */
static inline void rcu_scheduler_starting(void) { }
-#endif /* #else #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) */
+#endif /* #else #ifndef CONFIG_SRCU */
static inline void rcu_end_inkernel_boot(void) { }
static inline bool rcu_is_watching(void) { return true; }
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index 591792c8e5b0..48b7c9c68c4d 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -53,6 +53,9 @@ extern __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r);
extern __must_check bool refcount_dec_and_test(refcount_t *r);
extern void refcount_dec(refcount_t *r);
#else
+# ifdef CONFIG_ARCH_HAS_REFCOUNT
+# include <asm/refcount.h>
+# else
static inline __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r)
{
return atomic_add_unless(&r->refs, i, 0);
@@ -87,6 +90,7 @@ static inline void refcount_dec(refcount_t *r)
{
atomic_dec(&r->refs);
}
+# endif /* !CONFIG_ARCH_HAS_REFCOUNT */
#endif /* CONFIG_REFCOUNT_FULL */
extern __must_check bool refcount_dec_if_one(refcount_t *r);
diff --git a/include/linux/regulator/mt6380-regulator.h b/include/linux/regulator/mt6380-regulator.h
new file mode 100644
index 000000000000..465182da6315
--- /dev/null
+++ b/include/linux/regulator/mt6380-regulator.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Chenglin Xu <chenglin.xu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_REGULATOR_mt6380_H
+#define __LINUX_REGULATOR_mt6380_H
+
+enum {
+ MT6380_ID_VCPU = 0,
+ MT6380_ID_VCORE,
+ MT6380_ID_VRF,
+ MT6380_ID_VMLDO,
+ MT6380_ID_VALDO,
+ MT6380_ID_VPHYLDO,
+ MT6380_ID_VDDRLDO,
+ MT6380_ID_VTLDO,
+ MT6380_ID_RG_MAX,
+};
+
+#define MT6380_MAX_REGULATOR MT6380_ID_RG_MAX
+
+#endif /* __LINUX_REGULATOR_mt6380_H */
diff --git a/include/linux/reservation.h b/include/linux/reservation.h
index 156cfd330b66..21fc84d82d41 100644
--- a/include/linux/reservation.h
+++ b/include/linux/reservation.h
@@ -254,6 +254,9 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
unsigned *pshared_count,
struct dma_fence ***pshared);
+int reservation_object_copy_fences(struct reservation_object *dst,
+ struct reservation_object *src);
+
long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
bool wait_all, bool intr,
unsigned long timeout);
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
index ae0528b834cd..e784761a4443 100644
--- a/include/linux/rwsem-spinlock.h
+++ b/include/linux/rwsem-spinlock.h
@@ -32,6 +32,7 @@ struct rw_semaphore {
#define RWSEM_UNLOCKED_VALUE 0x00000000
extern void __down_read(struct rw_semaphore *sem);
+extern int __must_check __down_read_killable(struct rw_semaphore *sem);
extern int __down_read_trylock(struct rw_semaphore *sem);
extern void __down_write(struct rw_semaphore *sem);
extern int __must_check __down_write_killable(struct rw_semaphore *sem);
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index dd1d14250340..0ad7318ff299 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -44,6 +44,7 @@ struct rw_semaphore {
};
extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_down_read_failed_killable(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_down_write_failed_killable(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 2ba9ec93423f..68b38335d33c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -426,7 +426,7 @@ struct sched_rt_entity {
/* rq "owned" by this entity/group: */
struct rt_rq *my_q;
#endif
-};
+} __randomize_layout;
struct sched_dl_entity {
struct rb_node rb_node;
@@ -526,6 +526,13 @@ struct task_struct {
#endif
/* -1 unrunnable, 0 runnable, >0 stopped: */
volatile long state;
+
+ /*
+ * This begins the randomizable portion of task_struct. Only
+ * scheduling-critical items should be added above here.
+ */
+ randomized_struct_fields_start
+
void *stack;
atomic_t usage;
/* Per task flags (PF_*), defined further below: */
@@ -582,9 +589,10 @@ struct task_struct {
#ifdef CONFIG_TASKS_RCU
unsigned long rcu_tasks_nvcsw;
- bool rcu_tasks_holdout;
- struct list_head rcu_tasks_holdout_list;
+ u8 rcu_tasks_holdout;
+ u8 rcu_tasks_idx;
int rcu_tasks_idle_cpu;
+ struct list_head rcu_tasks_holdout_list;
#endif /* #ifdef CONFIG_TASKS_RCU */
struct sched_info sched_info;
@@ -839,7 +847,17 @@ struct task_struct {
int lockdep_depth;
unsigned int lockdep_recursion;
struct held_lock held_locks[MAX_LOCK_DEPTH];
- gfp_t lockdep_reclaim_gfp;
+#endif
+
+#ifdef CONFIG_LOCKDEP_CROSSRELEASE
+#define MAX_XHLOCKS_NR 64UL
+ struct hist_lock *xhlocks; /* Crossrelease history locks */
+ unsigned int xhlock_idx;
+ /* For restoring at history boundaries */
+ unsigned int xhlock_idx_hist[XHLOCK_CTX_NR];
+ unsigned int hist_id;
+ /* For overwrite check at each context exit */
+ unsigned int hist_id_save[XHLOCK_CTX_NR];
#endif
#ifdef CONFIG_UBSAN
@@ -891,8 +909,9 @@ struct task_struct {
/* cg_list protected by css_set_lock and tsk->alloc_lock: */
struct list_head cg_list;
#endif
-#ifdef CONFIG_INTEL_RDT_A
- int closid;
+#ifdef CONFIG_INTEL_RDT
+ u32 closid;
+ u32 rmid;
#endif
#ifdef CONFIG_FUTEX
struct robust_list_head __user *robust_list;
@@ -1079,6 +1098,13 @@ struct task_struct {
/* Used by LSM modules for access restriction: */
void *security;
#endif
+
+ /*
+ * New fields for task_struct should be added above here, so that
+ * they are included in the randomized portion of task_struct.
+ */
+ randomized_struct_fields_end
+
/* CPU-specific state of this task: */
struct thread_struct thread;
@@ -1149,13 +1175,6 @@ static inline pid_t task_tgid_nr(struct task_struct *tsk)
return tsk->tgid;
}
-extern pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
-
-static inline pid_t task_tgid_vnr(struct task_struct *tsk)
-{
- return pid_vnr(task_tgid(tsk));
-}
-
/**
* pid_alive - check that a task structure is not stale
* @p: Task structure to be checked.
@@ -1171,23 +1190,6 @@ static inline int pid_alive(const struct task_struct *p)
return p->pids[PIDTYPE_PID].pid != NULL;
}
-static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
-{
- pid_t pid = 0;
-
- rcu_read_lock();
- if (pid_alive(tsk))
- pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
- rcu_read_unlock();
-
- return pid;
-}
-
-static inline pid_t task_ppid_nr(const struct task_struct *tsk)
-{
- return task_ppid_nr_ns(tsk, &init_pid_ns);
-}
-
static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
{
return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
@@ -1209,12 +1211,52 @@ static inline pid_t task_session_vnr(struct task_struct *tsk)
return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
}
+static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
+{
+ return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, ns);
+}
+
+static inline pid_t task_tgid_vnr(struct task_struct *tsk)
+{
+ return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, NULL);
+}
+
+static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
+{
+ pid_t pid = 0;
+
+ rcu_read_lock();
+ if (pid_alive(tsk))
+ pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
+ rcu_read_unlock();
+
+ return pid;
+}
+
+static inline pid_t task_ppid_nr(const struct task_struct *tsk)
+{
+ return task_ppid_nr_ns(tsk, &init_pid_ns);
+}
+
/* Obsolete, do not use: */
static inline pid_t task_pgrp_nr(struct task_struct *tsk)
{
return task_pgrp_nr_ns(tsk, &init_pid_ns);
}
+static inline char task_state_to_char(struct task_struct *task)
+{
+ const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
+ unsigned long state = task->state;
+
+ state = state ? __ffs(state) + 1 : 0;
+
+ /* Make sure the string lines up properly with the number of task states: */
+ BUILD_BUG_ON(sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1);
+
+ return state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?';
+}
+
/**
* is_global_init - check if a task structure is init. Since init
* is free to have sub-threads we need to check tgid.
diff --git a/include/linux/sched/debug.h b/include/linux/sched/debug.h
index e0eaee54c5a4..5d58d49e9f87 100644
--- a/include/linux/sched/debug.h
+++ b/include/linux/sched/debug.h
@@ -6,6 +6,7 @@
*/
struct task_struct;
+struct pid_namespace;
extern void dump_cpu_task(int cpu);
@@ -34,7 +35,8 @@ extern void sched_show_task(struct task_struct *p);
#ifdef CONFIG_SCHED_DEBUG
struct seq_file;
-extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
+extern void proc_sched_show_task(struct task_struct *p,
+ struct pid_namespace *ns, struct seq_file *m);
extern void proc_sched_set_task(struct task_struct *p);
#endif
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 2b24a6974847..3a19c253bdb1 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -84,12 +84,6 @@ static inline bool mmget_not_zero(struct mm_struct *mm)
/* mmput gets rid of the mappings and all user-space */
extern void mmput(struct mm_struct *);
-#ifdef CONFIG_MMU
-/* same as above but performs the slow path from the async context. Can
- * be called from the atomic context as well
- */
-extern void mmput_async(struct mm_struct *);
-#endif
/* Grab a reference to a task's mm, if it is not already going away */
extern struct mm_struct *get_task_mm(struct task_struct *task);
@@ -167,6 +161,14 @@ static inline gfp_t current_gfp_context(gfp_t flags)
return flags;
}
+#ifdef CONFIG_LOCKDEP
+extern void fs_reclaim_acquire(gfp_t gfp_mask);
+extern void fs_reclaim_release(gfp_t gfp_mask);
+#else
+static inline void fs_reclaim_acquire(gfp_t gfp_mask) { }
+static inline void fs_reclaim_release(gfp_t gfp_mask) { }
+#endif
+
static inline unsigned int memalloc_noio_save(void)
{
unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index c06d63b3a583..2a0dd40b15db 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -222,7 +222,7 @@ struct signal_struct {
struct mutex cred_guard_mutex; /* guard against foreign influences on
* credential calculations
* (notably. ptrace) */
-};
+} __randomize_layout;
/*
* Bits in flags field of signal_struct.
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index c97e5f096927..79a2a744648d 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -30,7 +30,6 @@ extern int lockdep_tasklist_lock_is_held(void);
extern asmlinkage void schedule_tail(struct task_struct *prev);
extern void init_idle(struct task_struct *idle, int cpu);
-extern void init_idle_bootup_task(struct task_struct *idle);
extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
extern void sched_dead(struct task_struct *p);
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index 7d065abc7a47..d7b6dab956ec 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -71,6 +71,14 @@ struct sched_domain_shared {
atomic_t ref;
atomic_t nr_busy_cpus;
int has_idle_cores;
+
+ /*
+ * Some variables from the most recent sd_lb_stats for this domain,
+ * used by wake_affine().
+ */
+ unsigned long nr_running;
+ unsigned long load;
+ unsigned long capacity;
};
struct sched_domain {
diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h
index 5d5415e129d4..3c07e4135127 100644
--- a/include/linux/sched/user.h
+++ b/include/linux/sched/user.h
@@ -36,7 +36,8 @@ struct user_struct {
struct hlist_node uidhash_node;
kuid_t uid;
-#if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL)
+#if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL) || \
+ defined(CONFIG_NET)
atomic_long_t locked_vm;
#endif
};
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index 99e866487e2f..82b171e1aa0b 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -273,87 +273,85 @@ struct sctp_init_chunk {
/* Section 3.3.2.1. IPv4 Address Parameter (5) */
-typedef struct sctp_ipv4addr_param {
+struct sctp_ipv4addr_param {
struct sctp_paramhdr param_hdr;
- struct in_addr addr;
-} sctp_ipv4addr_param_t;
+ struct in_addr addr;
+};
/* Section 3.3.2.1. IPv6 Address Parameter (6) */
-typedef struct sctp_ipv6addr_param {
+struct sctp_ipv6addr_param {
struct sctp_paramhdr param_hdr;
struct in6_addr addr;
-} sctp_ipv6addr_param_t;
+};
/* Section 3.3.2.1 Cookie Preservative (9) */
-typedef struct sctp_cookie_preserve_param {
+struct sctp_cookie_preserve_param {
struct sctp_paramhdr param_hdr;
- __be32 lifespan_increment;
-} sctp_cookie_preserve_param_t;
+ __be32 lifespan_increment;
+};
/* Section 3.3.2.1 Host Name Address (11) */
-typedef struct sctp_hostname_param {
+struct sctp_hostname_param {
struct sctp_paramhdr param_hdr;
uint8_t hostname[0];
-} sctp_hostname_param_t;
+};
/* Section 3.3.2.1 Supported Address Types (12) */
-typedef struct sctp_supported_addrs_param {
+struct sctp_supported_addrs_param {
struct sctp_paramhdr param_hdr;
__be16 types[0];
-} sctp_supported_addrs_param_t;
-
-/* Appendix A. ECN Capable (32768) */
-typedef struct sctp_ecn_capable_param {
- struct sctp_paramhdr param_hdr;
-} sctp_ecn_capable_param_t;
+};
/* ADDIP Section 3.2.6 Adaptation Layer Indication */
-typedef struct sctp_adaptation_ind_param {
+struct sctp_adaptation_ind_param {
struct sctp_paramhdr param_hdr;
__be32 adaptation_ind;
-} sctp_adaptation_ind_param_t;
+};
/* ADDIP Section 4.2.7 Supported Extensions Parameter */
-typedef struct sctp_supported_ext_param {
+struct sctp_supported_ext_param {
struct sctp_paramhdr param_hdr;
__u8 chunks[0];
-} sctp_supported_ext_param_t;
+};
/* AUTH Section 3.1 Random */
-typedef struct sctp_random_param {
+struct sctp_random_param {
struct sctp_paramhdr param_hdr;
__u8 random_val[0];
-} sctp_random_param_t;
+};
/* AUTH Section 3.2 Chunk List */
-typedef struct sctp_chunks_param {
+struct sctp_chunks_param {
struct sctp_paramhdr param_hdr;
__u8 chunks[0];
-} sctp_chunks_param_t;
+};
/* AUTH Section 3.3 HMAC Algorithm */
-typedef struct sctp_hmac_algo_param {
+struct sctp_hmac_algo_param {
struct sctp_paramhdr param_hdr;
__be16 hmac_ids[0];
-} sctp_hmac_algo_param_t;
+};
/* RFC 2960. Section 3.3.3 Initiation Acknowledgement (INIT ACK) (2):
* The INIT ACK chunk is used to acknowledge the initiation of an SCTP
* association.
*/
-typedef struct sctp_init_chunk sctp_initack_chunk_t;
+struct sctp_initack_chunk {
+ struct sctp_chunkhdr chunk_hdr;
+ struct sctp_inithdr init_hdr;
+};
/* Section 3.3.3.1 State Cookie (7) */
-typedef struct sctp_cookie_param {
+struct sctp_cookie_param {
struct sctp_paramhdr p;
__u8 body[0];
-} sctp_cookie_param_t;
+};
/* Section 3.3.3.1 Unrecognized Parameters (8) */
-typedef struct sctp_unrecognized_param {
+struct sctp_unrecognized_param {
struct sctp_paramhdr param_hdr;
struct sctp_paramhdr unrecognized;
-} sctp_unrecognized_param_t;
+};
@@ -365,30 +363,28 @@ typedef struct sctp_unrecognized_param {
* subsequences of DATA chunks as represented by their TSNs.
*/
-typedef struct sctp_gap_ack_block {
+struct sctp_gap_ack_block {
__be16 start;
__be16 end;
-} sctp_gap_ack_block_t;
-
-typedef __be32 sctp_dup_tsn_t;
+};
-typedef union {
- sctp_gap_ack_block_t gab;
- sctp_dup_tsn_t dup;
-} sctp_sack_variable_t;
+union sctp_sack_variable {
+ struct sctp_gap_ack_block gab;
+ __be32 dup;
+};
-typedef struct sctp_sackhdr {
+struct sctp_sackhdr {
__be32 cum_tsn_ack;
__be32 a_rwnd;
__be16 num_gap_ack_blocks;
__be16 num_dup_tsns;
- sctp_sack_variable_t variable[0];
-} sctp_sackhdr_t;
+ union sctp_sack_variable variable[0];
+};
-typedef struct sctp_sack_chunk {
+struct sctp_sack_chunk {
struct sctp_chunkhdr chunk_hdr;
- sctp_sackhdr_t sack_hdr;
-} sctp_sack_chunk_t;
+ struct sctp_sackhdr sack_hdr;
+};
/* RFC 2960. Section 3.3.5 Heartbeat Request (HEARTBEAT) (4):
@@ -398,49 +394,49 @@ typedef struct sctp_sack_chunk {
* the present association.
*/
-typedef struct sctp_heartbeathdr {
+struct sctp_heartbeathdr {
struct sctp_paramhdr info;
-} sctp_heartbeathdr_t;
+};
-typedef struct sctp_heartbeat_chunk {
+struct sctp_heartbeat_chunk {
struct sctp_chunkhdr chunk_hdr;
- sctp_heartbeathdr_t hb_hdr;
-} sctp_heartbeat_chunk_t;
+ struct sctp_heartbeathdr hb_hdr;
+};
/* For the abort and shutdown ACK we must carry the init tag in the
* common header. Just the common header is all that is needed with a
* chunk descriptor.
*/
-typedef struct sctp_abort_chunk {
+struct sctp_abort_chunk {
struct sctp_chunkhdr uh;
-} sctp_abort_chunk_t;
+};
/* For the graceful shutdown we must carry the tag (in common header)
* and the highest consecutive acking value.
*/
-typedef struct sctp_shutdownhdr {
+struct sctp_shutdownhdr {
__be32 cum_tsn_ack;
-} sctp_shutdownhdr_t;
+};
-struct sctp_shutdown_chunk_t {
+struct sctp_shutdown_chunk {
struct sctp_chunkhdr chunk_hdr;
- sctp_shutdownhdr_t shutdown_hdr;
+ struct sctp_shutdownhdr shutdown_hdr;
};
/* RFC 2960. Section 3.3.10 Operation Error (ERROR) (9) */
-typedef struct sctp_errhdr {
+struct sctp_errhdr {
__be16 cause;
__be16 length;
__u8 variable[0];
-} sctp_errhdr_t;
+};
-typedef struct sctp_operr_chunk {
+struct sctp_operr_chunk {
struct sctp_chunkhdr chunk_hdr;
- sctp_errhdr_t err_hdr;
-} sctp_operr_chunk_t;
+ struct sctp_errhdr err_hdr;
+};
/* RFC 2960 3.3.10 - Operation Error
*
@@ -461,7 +457,7 @@ typedef struct sctp_operr_chunk {
* 9 No User Data
* 10 Cookie Received While Shutting Down
*/
-typedef enum {
+enum sctp_error {
SCTP_ERROR_NO_ERROR = cpu_to_be16(0x00),
SCTP_ERROR_INV_STRM = cpu_to_be16(0x01),
@@ -516,33 +512,28 @@ typedef enum {
* 0x0105 Unsupported HMAC Identifier
*/
SCTP_ERROR_UNSUP_HMAC = cpu_to_be16(0x0105)
-} sctp_error_t;
+};
/* RFC 2960. Appendix A. Explicit Congestion Notification.
* Explicit Congestion Notification Echo (ECNE) (12)
*/
-typedef struct sctp_ecnehdr {
+struct sctp_ecnehdr {
__be32 lowest_tsn;
-} sctp_ecnehdr_t;
+};
-typedef struct sctp_ecne_chunk {
+struct sctp_ecne_chunk {
struct sctp_chunkhdr chunk_hdr;
- sctp_ecnehdr_t ence_hdr;
-} sctp_ecne_chunk_t;
+ struct sctp_ecnehdr ence_hdr;
+};
/* RFC 2960. Appendix A. Explicit Congestion Notification.
* Congestion Window Reduced (CWR) (13)
*/
-typedef struct sctp_cwrhdr {
+struct sctp_cwrhdr {
__be32 lowest_tsn;
-} sctp_cwrhdr_t;
-
-typedef struct sctp_cwr_chunk {
- struct sctp_chunkhdr chunk_hdr;
- sctp_cwrhdr_t cwr_hdr;
-} sctp_cwr_chunk_t;
+};
/* PR-SCTP
* 3.2 Forward Cumulative TSN Chunk Definition (FORWARD TSN)
@@ -638,20 +629,20 @@ struct sctp_fwdtsn_chunk {
* The ASCONF Parameter Response is used in the ASCONF-ACK to
* report status of ASCONF processing.
*/
-typedef struct sctp_addip_param {
- struct sctp_paramhdr param_hdr;
- __be32 crr_id;
-} sctp_addip_param_t;
+struct sctp_addip_param {
+ struct sctp_paramhdr param_hdr;
+ __be32 crr_id;
+};
-typedef struct sctp_addiphdr {
+struct sctp_addiphdr {
__be32 serial;
__u8 params[0];
-} sctp_addiphdr_t;
+};
-typedef struct sctp_addip_chunk {
+struct sctp_addip_chunk {
struct sctp_chunkhdr chunk_hdr;
- sctp_addiphdr_t addip_hdr;
-} sctp_addip_chunk_t;
+ struct sctp_addiphdr addip_hdr;
+};
/* AUTH
* Section 4.1 Authentication Chunk (AUTH)
@@ -702,16 +693,16 @@ typedef struct sctp_addip_chunk {
* HMAC: n bytes (unsigned integer) This hold the result of the HMAC
* calculation.
*/
-typedef struct sctp_authhdr {
+struct sctp_authhdr {
__be16 shkey_id;
__be16 hmac_id;
__u8 hmac[0];
-} sctp_authhdr_t;
+};
-typedef struct sctp_auth_chunk {
+struct sctp_auth_chunk {
struct sctp_chunkhdr chunk_hdr;
- sctp_authhdr_t auth_hdr;
-} sctp_auth_chunk_t;
+ struct sctp_authhdr auth_hdr;
+};
struct sctp_infox {
struct sctp_info *sctpinfo;
diff --git a/include/linux/seg6_local.h b/include/linux/seg6_local.h
new file mode 100644
index 000000000000..ee63e76fe0c7
--- /dev/null
+++ b/include/linux/seg6_local.h
@@ -0,0 +1,6 @@
+#ifndef _LINUX_SEG6_LOCAL_H
+#define _LINUX_SEG6_LOCAL_H
+
+#include <uapi/linux/seg6_local.h>
+
+#endif
diff --git a/include/linux/sem.h b/include/linux/sem.h
index be5cf2ea14ad..de2deb8676bd 100644
--- a/include/linux/sem.h
+++ b/include/linux/sem.h
@@ -41,7 +41,7 @@ struct sem_array {
unsigned int use_global_lock;/* >0: global lock required */
struct sem sems[];
-};
+} __randomize_layout;
#ifdef CONFIG_SYSVIPC
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index 61fbb440449c..a27ef5f56431 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -80,9 +80,10 @@ struct uart_8250_ops {
};
struct uart_8250_em485 {
- struct timer_list start_tx_timer; /* "rs485 start tx" timer */
- struct timer_list stop_tx_timer; /* "rs485 stop tx" timer */
- struct timer_list *active_timer; /* pointer to active timer */
+ struct hrtimer start_tx_timer; /* "rs485 start tx" timer */
+ struct hrtimer stop_tx_timer; /* "rs485 stop tx" timer */
+ struct hrtimer *active_timer; /* pointer to active timer */
+ struct uart_8250_port *port; /* for hrtimer callbacks */
};
/*
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 1775500294bb..5553e04e59c9 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -20,7 +20,7 @@
#ifndef LINUX_SERIAL_CORE_H
#define LINUX_SERIAL_CORE_H
-
+#include <linux/bitops.h>
#include <linux/compiler.h>
#include <linux/interrupt.h>
#include <linux/circ_buf.h>
@@ -144,7 +144,7 @@ struct uart_port {
unsigned char x_char; /* xon/xoff char */
unsigned char regshift; /* reg offset shift */
unsigned char iotype; /* io access style */
- unsigned char unused1;
+ unsigned char quirks; /* internal quirks */
#define UPIO_PORT (SERIAL_IO_PORT) /* 8b I/O port access */
#define UPIO_HUB6 (SERIAL_IO_HUB6) /* Hub6 ISA card */
@@ -155,6 +155,9 @@ struct uart_port {
#define UPIO_MEM32BE (SERIAL_IO_MEM32BE) /* 32b big endian */
#define UPIO_MEM16 (SERIAL_IO_MEM16) /* 16b little endian */
+ /* quirks must be updated while holding port mutex */
+#define UPQ_NO_TXEN_TEST BIT(0)
+
unsigned int read_status_mask; /* driver specific */
unsigned int ignore_status_mask; /* driver specific */
struct uart_state *state; /* pointer to parent state */
@@ -175,7 +178,6 @@ struct uart_port {
* [for bit definitions in the UPF_CHANGE_MASK]
*
* Bits [0..UPF_LAST_USER] are userspace defined/visible/changeable
- * except bit 15 (UPF_NO_TXEN_TEST) which is masked off.
* The remaining bits are serial-core specific and not modifiable by
* userspace.
*/
@@ -192,7 +194,6 @@ struct uart_port {
#define UPF_SPD_SHI ((__force upf_t) ASYNC_SPD_SHI /* 12 */ )
#define UPF_LOW_LATENCY ((__force upf_t) ASYNC_LOW_LATENCY /* 13 */ )
#define UPF_BUGGY_UART ((__force upf_t) ASYNC_BUGGY_UART /* 14 */ )
-#define UPF_NO_TXEN_TEST ((__force upf_t) (1 << 15))
#define UPF_MAGIC_MULTIPLIER ((__force upf_t) ASYNC_MAGIC_MULTIPLIER /* 16 */ )
#define UPF_NO_THRE_TEST ((__force upf_t) (1 << 19))
@@ -246,7 +247,6 @@ struct uart_port {
struct device *dev; /* parent device */
unsigned char hub6; /* this should be in the 8250 driver */
unsigned char suspended;
- unsigned char irq_wake;
unsigned char unused[2];
const char *name; /* port name */
struct attribute_group *attr_group; /* port specific attributes */
diff --git a/include/linux/sfp.h b/include/linux/sfp.h
new file mode 100644
index 000000000000..4a906f560817
--- /dev/null
+++ b/include/linux/sfp.h
@@ -0,0 +1,434 @@
+#ifndef LINUX_SFP_H
+#define LINUX_SFP_H
+
+#include <linux/phy.h>
+
+struct __packed sfp_eeprom_base {
+ u8 phys_id;
+ u8 phys_ext_id;
+ u8 connector;
+#if defined __BIG_ENDIAN_BITFIELD
+ u8 e10g_base_er:1;
+ u8 e10g_base_lrm:1;
+ u8 e10g_base_lr:1;
+ u8 e10g_base_sr:1;
+ u8 if_1x_sx:1;
+ u8 if_1x_lx:1;
+ u8 if_1x_copper_active:1;
+ u8 if_1x_copper_passive:1;
+
+ u8 escon_mmf_1310_led:1;
+ u8 escon_smf_1310_laser:1;
+ u8 sonet_oc192_short_reach:1;
+ u8 sonet_reach_bit1:1;
+ u8 sonet_reach_bit2:1;
+ u8 sonet_oc48_long_reach:1;
+ u8 sonet_oc48_intermediate_reach:1;
+ u8 sonet_oc48_short_reach:1;
+
+ u8 unallocated_5_7:1;
+ u8 sonet_oc12_smf_long_reach:1;
+ u8 sonet_oc12_smf_intermediate_reach:1;
+ u8 sonet_oc12_short_reach:1;
+ u8 unallocated_5_3:1;
+ u8 sonet_oc3_smf_long_reach:1;
+ u8 sonet_oc3_smf_intermediate_reach:1;
+ u8 sonet_oc3_short_reach:1;
+
+ u8 e_base_px:1;
+ u8 e_base_bx10:1;
+ u8 e100_base_fx:1;
+ u8 e100_base_lx:1;
+ u8 e1000_base_t:1;
+ u8 e1000_base_cx:1;
+ u8 e1000_base_lx:1;
+ u8 e1000_base_sx:1;
+
+ u8 fc_ll_v:1;
+ u8 fc_ll_s:1;
+ u8 fc_ll_i:1;
+ u8 fc_ll_l:1;
+ u8 fc_ll_m:1;
+ u8 fc_tech_sa:1;
+ u8 fc_tech_lc:1;
+ u8 fc_tech_electrical_inter_enclosure:1;
+
+ u8 fc_tech_electrical_intra_enclosure:1;
+ u8 fc_tech_sn:1;
+ u8 fc_tech_sl:1;
+ u8 fc_tech_ll:1;
+ u8 sfp_ct_active:1;
+ u8 sfp_ct_passive:1;
+ u8 unallocated_8_1:1;
+ u8 unallocated_8_0:1;
+
+ u8 fc_media_tw:1;
+ u8 fc_media_tp:1;
+ u8 fc_media_mi:1;
+ u8 fc_media_tv:1;
+ u8 fc_media_m6:1;
+ u8 fc_media_m5:1;
+ u8 unallocated_9_1:1;
+ u8 fc_media_sm:1;
+
+ u8 fc_speed_1200:1;
+ u8 fc_speed_800:1;
+ u8 fc_speed_1600:1;
+ u8 fc_speed_400:1;
+ u8 fc_speed_3200:1;
+ u8 fc_speed_200:1;
+ u8 unallocated_10_1:1;
+ u8 fc_speed_100:1;
+#elif defined __LITTLE_ENDIAN_BITFIELD
+ u8 if_1x_copper_passive:1;
+ u8 if_1x_copper_active:1;
+ u8 if_1x_lx:1;
+ u8 if_1x_sx:1;
+ u8 e10g_base_sr:1;
+ u8 e10g_base_lr:1;
+ u8 e10g_base_lrm:1;
+ u8 e10g_base_er:1;
+
+ u8 sonet_oc3_short_reach:1;
+ u8 sonet_oc3_smf_intermediate_reach:1;
+ u8 sonet_oc3_smf_long_reach:1;
+ u8 unallocated_5_3:1;
+ u8 sonet_oc12_short_reach:1;
+ u8 sonet_oc12_smf_intermediate_reach:1;
+ u8 sonet_oc12_smf_long_reach:1;
+ u8 unallocated_5_7:1;
+
+ u8 sonet_oc48_short_reach:1;
+ u8 sonet_oc48_intermediate_reach:1;
+ u8 sonet_oc48_long_reach:1;
+ u8 sonet_reach_bit2:1;
+ u8 sonet_reach_bit1:1;
+ u8 sonet_oc192_short_reach:1;
+ u8 escon_smf_1310_laser:1;
+ u8 escon_mmf_1310_led:1;
+
+ u8 e1000_base_sx:1;
+ u8 e1000_base_lx:1;
+ u8 e1000_base_cx:1;
+ u8 e1000_base_t:1;
+ u8 e100_base_lx:1;
+ u8 e100_base_fx:1;
+ u8 e_base_bx10:1;
+ u8 e_base_px:1;
+
+ u8 fc_tech_electrical_inter_enclosure:1;
+ u8 fc_tech_lc:1;
+ u8 fc_tech_sa:1;
+ u8 fc_ll_m:1;
+ u8 fc_ll_l:1;
+ u8 fc_ll_i:1;
+ u8 fc_ll_s:1;
+ u8 fc_ll_v:1;
+
+ u8 unallocated_8_0:1;
+ u8 unallocated_8_1:1;
+ u8 sfp_ct_passive:1;
+ u8 sfp_ct_active:1;
+ u8 fc_tech_ll:1;
+ u8 fc_tech_sl:1;
+ u8 fc_tech_sn:1;
+ u8 fc_tech_electrical_intra_enclosure:1;
+
+ u8 fc_media_sm:1;
+ u8 unallocated_9_1:1;
+ u8 fc_media_m5:1;
+ u8 fc_media_m6:1;
+ u8 fc_media_tv:1;
+ u8 fc_media_mi:1;
+ u8 fc_media_tp:1;
+ u8 fc_media_tw:1;
+
+ u8 fc_speed_100:1;
+ u8 unallocated_10_1:1;
+ u8 fc_speed_200:1;
+ u8 fc_speed_3200:1;
+ u8 fc_speed_400:1;
+ u8 fc_speed_1600:1;
+ u8 fc_speed_800:1;
+ u8 fc_speed_1200:1;
+#else
+#error Unknown Endian
+#endif
+ u8 encoding;
+ u8 br_nominal;
+ u8 rate_id;
+ u8 link_len[6];
+ char vendor_name[16];
+ u8 extended_cc;
+ char vendor_oui[3];
+ char vendor_pn[16];
+ char vendor_rev[4];
+ union {
+ __be16 optical_wavelength;
+ u8 cable_spec;
+ };
+ u8 reserved62;
+ u8 cc_base;
+};
+
+struct __packed sfp_eeprom_ext {
+ __be16 options;
+ u8 br_max;
+ u8 br_min;
+ char vendor_sn[16];
+ char datecode[8];
+ u8 diagmon;
+ u8 enhopts;
+ u8 sff8472_compliance;
+ u8 cc_ext;
+};
+
+struct __packed sfp_eeprom_id {
+ struct sfp_eeprom_base base;
+ struct sfp_eeprom_ext ext;
+};
+
+/* SFP EEPROM registers */
+enum {
+ SFP_PHYS_ID = 0x00,
+ SFP_PHYS_EXT_ID = 0x01,
+ SFP_CONNECTOR = 0x02,
+ SFP_COMPLIANCE = 0x03,
+ SFP_ENCODING = 0x0b,
+ SFP_BR_NOMINAL = 0x0c,
+ SFP_RATE_ID = 0x0d,
+ SFP_LINK_LEN_SM_KM = 0x0e,
+ SFP_LINK_LEN_SM_100M = 0x0f,
+ SFP_LINK_LEN_50UM_OM2_10M = 0x10,
+ SFP_LINK_LEN_62_5UM_OM1_10M = 0x11,
+ SFP_LINK_LEN_COPPER_1M = 0x12,
+ SFP_LINK_LEN_50UM_OM4_10M = 0x12,
+ SFP_LINK_LEN_50UM_OM3_10M = 0x13,
+ SFP_VENDOR_NAME = 0x14,
+ SFP_VENDOR_OUI = 0x25,
+ SFP_VENDOR_PN = 0x28,
+ SFP_VENDOR_REV = 0x38,
+ SFP_OPTICAL_WAVELENGTH_MSB = 0x3c,
+ SFP_OPTICAL_WAVELENGTH_LSB = 0x3d,
+ SFP_CABLE_SPEC = 0x3c,
+ SFP_CC_BASE = 0x3f,
+ SFP_OPTIONS = 0x40, /* 2 bytes, MSB, LSB */
+ SFP_BR_MAX = 0x42,
+ SFP_BR_MIN = 0x43,
+ SFP_VENDOR_SN = 0x44,
+ SFP_DATECODE = 0x54,
+ SFP_DIAGMON = 0x5c,
+ SFP_ENHOPTS = 0x5d,
+ SFP_SFF8472_COMPLIANCE = 0x5e,
+ SFP_CC_EXT = 0x5f,
+
+ SFP_PHYS_ID_SFP = 0x03,
+ SFP_PHYS_EXT_ID_SFP = 0x04,
+ SFP_CONNECTOR_UNSPEC = 0x00,
+ /* codes 01-05 not supportable on SFP, but some modules have single SC */
+ SFP_CONNECTOR_SC = 0x01,
+ SFP_CONNECTOR_FIBERJACK = 0x06,
+ SFP_CONNECTOR_LC = 0x07,
+ SFP_CONNECTOR_MT_RJ = 0x08,
+ SFP_CONNECTOR_MU = 0x09,
+ SFP_CONNECTOR_SG = 0x0a,
+ SFP_CONNECTOR_OPTICAL_PIGTAIL = 0x0b,
+ SFP_CONNECTOR_MPO_1X12 = 0x0c,
+ SFP_CONNECTOR_MPO_2X16 = 0x0d,
+ SFP_CONNECTOR_HSSDC_II = 0x20,
+ SFP_CONNECTOR_COPPER_PIGTAIL = 0x21,
+ SFP_CONNECTOR_RJ45 = 0x22,
+ SFP_CONNECTOR_NOSEPARATE = 0x23,
+ SFP_CONNECTOR_MXC_2X16 = 0x24,
+ SFP_ENCODING_UNSPEC = 0x00,
+ SFP_ENCODING_8B10B = 0x01,
+ SFP_ENCODING_4B5B = 0x02,
+ SFP_ENCODING_NRZ = 0x03,
+ SFP_ENCODING_8472_MANCHESTER = 0x04,
+ SFP_ENCODING_8472_SONET = 0x05,
+ SFP_ENCODING_8472_64B66B = 0x06,
+ SFP_ENCODING_256B257B = 0x07,
+ SFP_ENCODING_PAM4 = 0x08,
+ SFP_OPTIONS_HIGH_POWER_LEVEL = BIT(13),
+ SFP_OPTIONS_PAGING_A2 = BIT(12),
+ SFP_OPTIONS_RETIMER = BIT(11),
+ SFP_OPTIONS_COOLED_XCVR = BIT(10),
+ SFP_OPTIONS_POWER_DECL = BIT(9),
+ SFP_OPTIONS_RX_LINEAR_OUT = BIT(8),
+ SFP_OPTIONS_RX_DECISION_THRESH = BIT(7),
+ SFP_OPTIONS_TUNABLE_TX = BIT(6),
+ SFP_OPTIONS_RATE_SELECT = BIT(5),
+ SFP_OPTIONS_TX_DISABLE = BIT(4),
+ SFP_OPTIONS_TX_FAULT = BIT(3),
+ SFP_OPTIONS_LOS_INVERTED = BIT(2),
+ SFP_OPTIONS_LOS_NORMAL = BIT(1),
+ SFP_DIAGMON_DDM = BIT(6),
+ SFP_DIAGMON_INT_CAL = BIT(5),
+ SFP_DIAGMON_EXT_CAL = BIT(4),
+ SFP_DIAGMON_RXPWR_AVG = BIT(3),
+ SFP_DIAGMON_ADDRMODE = BIT(2),
+ SFP_ENHOPTS_ALARMWARN = BIT(7),
+ SFP_ENHOPTS_SOFT_TX_DISABLE = BIT(6),
+ SFP_ENHOPTS_SOFT_TX_FAULT = BIT(5),
+ SFP_ENHOPTS_SOFT_RX_LOS = BIT(4),
+ SFP_ENHOPTS_SOFT_RATE_SELECT = BIT(3),
+ SFP_ENHOPTS_APP_SELECT_SFF8079 = BIT(2),
+ SFP_ENHOPTS_SOFT_RATE_SFF8431 = BIT(1),
+ SFP_SFF8472_COMPLIANCE_NONE = 0x00,
+ SFP_SFF8472_COMPLIANCE_REV9_3 = 0x01,
+ SFP_SFF8472_COMPLIANCE_REV9_5 = 0x02,
+ SFP_SFF8472_COMPLIANCE_REV10_2 = 0x03,
+ SFP_SFF8472_COMPLIANCE_REV10_4 = 0x04,
+ SFP_SFF8472_COMPLIANCE_REV11_0 = 0x05,
+ SFP_SFF8472_COMPLIANCE_REV11_3 = 0x06,
+ SFP_SFF8472_COMPLIANCE_REV11_4 = 0x07,
+ SFP_SFF8472_COMPLIANCE_REV12_0 = 0x08,
+};
+
+/* SFP Diagnostics */
+enum {
+ /* Alarm and warnings stored MSB at lower address then LSB */
+ SFP_TEMP_HIGH_ALARM = 0x00,
+ SFP_TEMP_LOW_ALARM = 0x02,
+ SFP_TEMP_HIGH_WARN = 0x04,
+ SFP_TEMP_LOW_WARN = 0x06,
+ SFP_VOLT_HIGH_ALARM = 0x08,
+ SFP_VOLT_LOW_ALARM = 0x0a,
+ SFP_VOLT_HIGH_WARN = 0x0c,
+ SFP_VOLT_LOW_WARN = 0x0e,
+ SFP_BIAS_HIGH_ALARM = 0x10,
+ SFP_BIAS_LOW_ALARM = 0x12,
+ SFP_BIAS_HIGH_WARN = 0x14,
+ SFP_BIAS_LOW_WARN = 0x16,
+ SFP_TXPWR_HIGH_ALARM = 0x18,
+ SFP_TXPWR_LOW_ALARM = 0x1a,
+ SFP_TXPWR_HIGH_WARN = 0x1c,
+ SFP_TXPWR_LOW_WARN = 0x1e,
+ SFP_RXPWR_HIGH_ALARM = 0x20,
+ SFP_RXPWR_LOW_ALARM = 0x22,
+ SFP_RXPWR_HIGH_WARN = 0x24,
+ SFP_RXPWR_LOW_WARN = 0x26,
+ SFP_LASER_TEMP_HIGH_ALARM = 0x28,
+ SFP_LASER_TEMP_LOW_ALARM = 0x2a,
+ SFP_LASER_TEMP_HIGH_WARN = 0x2c,
+ SFP_LASER_TEMP_LOW_WARN = 0x2e,
+ SFP_TEC_CUR_HIGH_ALARM = 0x30,
+ SFP_TEC_CUR_LOW_ALARM = 0x32,
+ SFP_TEC_CUR_HIGH_WARN = 0x34,
+ SFP_TEC_CUR_LOW_WARN = 0x36,
+ SFP_CAL_RXPWR4 = 0x38,
+ SFP_CAL_RXPWR3 = 0x3c,
+ SFP_CAL_RXPWR2 = 0x40,
+ SFP_CAL_RXPWR1 = 0x44,
+ SFP_CAL_RXPWR0 = 0x48,
+ SFP_CAL_TXI_SLOPE = 0x4c,
+ SFP_CAL_TXI_OFFSET = 0x4e,
+ SFP_CAL_TXPWR_SLOPE = 0x50,
+ SFP_CAL_TXPWR_OFFSET = 0x52,
+ SFP_CAL_T_SLOPE = 0x54,
+ SFP_CAL_T_OFFSET = 0x56,
+ SFP_CAL_V_SLOPE = 0x58,
+ SFP_CAL_V_OFFSET = 0x5a,
+ SFP_CHKSUM = 0x5f,
+
+ SFP_TEMP = 0x60,
+ SFP_VCC = 0x62,
+ SFP_TX_BIAS = 0x64,
+ SFP_TX_POWER = 0x66,
+ SFP_RX_POWER = 0x68,
+ SFP_LASER_TEMP = 0x6a,
+ SFP_TEC_CUR = 0x6c,
+
+ SFP_STATUS = 0x6e,
+ SFP_ALARM = 0x70,
+
+ SFP_EXT_STATUS = 0x76,
+ SFP_VSL = 0x78,
+ SFP_PAGE = 0x7f,
+};
+
+struct device_node;
+struct ethtool_eeprom;
+struct ethtool_modinfo;
+struct net_device;
+struct sfp_bus;
+
+struct sfp_upstream_ops {
+ int (*module_insert)(void *, const struct sfp_eeprom_id *id);
+ void (*module_remove)(void *);
+ void (*link_down)(void *);
+ void (*link_up)(void *);
+ int (*connect_phy)(void *, struct phy_device *);
+ void (*disconnect_phy)(void *);
+};
+
+#if IS_ENABLED(CONFIG_SFP)
+int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
+ unsigned long *support);
+phy_interface_t sfp_parse_interface(struct sfp_bus *bus,
+ const struct sfp_eeprom_id *id);
+void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
+ unsigned long *support);
+
+int sfp_get_module_info(struct sfp_bus *bus, struct ethtool_modinfo *modinfo);
+int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee,
+ u8 *data);
+void sfp_upstream_start(struct sfp_bus *bus);
+void sfp_upstream_stop(struct sfp_bus *bus);
+struct sfp_bus *sfp_register_upstream(struct device_node *np,
+ struct net_device *ndev, void *upstream,
+ const struct sfp_upstream_ops *ops);
+void sfp_unregister_upstream(struct sfp_bus *bus);
+#else
+static inline int sfp_parse_port(struct sfp_bus *bus,
+ const struct sfp_eeprom_id *id,
+ unsigned long *support)
+{
+ return PORT_OTHER;
+}
+
+static inline phy_interface_t sfp_parse_interface(struct sfp_bus *bus,
+ const struct sfp_eeprom_id *id)
+{
+ return PHY_INTERFACE_MODE_NA;
+}
+
+static inline void sfp_parse_support(struct sfp_bus *bus,
+ const struct sfp_eeprom_id *id,
+ unsigned long *support)
+{
+}
+
+static inline int sfp_get_module_info(struct sfp_bus *bus,
+ struct ethtool_modinfo *modinfo)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int sfp_get_module_eeprom(struct sfp_bus *bus,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void sfp_upstream_start(struct sfp_bus *bus)
+{
+}
+
+static inline void sfp_upstream_stop(struct sfp_bus *bus)
+{
+}
+
+static inline struct sfp_bus *sfp_register_upstream(struct device_node *np,
+ struct net_device *ndev, void *upstream,
+ const struct sfp_upstream_ops *ops)
+{
+ return (struct sfp_bus *)-1;
+}
+
+static inline void sfp_unregister_upstream(struct sfp_bus *bus)
+{
+}
+#endif
+
+#endif
diff --git a/include/linux/shm.h b/include/linux/shm.h
index 04e881829625..21a5e6c43385 100644
--- a/include/linux/shm.h
+++ b/include/linux/shm.h
@@ -22,28 +22,11 @@ struct shmid_kernel /* private to the kernel */
/* The task created the shm object. NULL if the task is dead. */
struct task_struct *shm_creator;
struct list_head shm_clist; /* list by creator */
-};
+} __randomize_layout;
/* shm_mode upper byte flags */
#define SHM_DEST 01000 /* segment will be destroyed on last detach */
#define SHM_LOCKED 02000 /* segment will not be swapped */
-#define SHM_HUGETLB 04000 /* segment will use huge TLB pages */
-#define SHM_NORESERVE 010000 /* don't check for reservations */
-
-/* Bits [26:31] are reserved */
-
-/*
- * When SHM_HUGETLB is set bits [26:31] encode the log2 of the huge page size.
- * This gives us 6 bits, which is enough until someone invents 128 bit address
- * spaces.
- *
- * Assume these are all power of twos.
- * When 0 use the default page size.
- */
-#define SHM_HUGE_SHIFT 26
-#define SHM_HUGE_MASK 0x3f
-#define SHM_HUGE_2MB (21 << SHM_HUGE_SHIFT)
-#define SHM_HUGE_1GB (30 << SHM_HUGE_SHIFT)
#ifdef CONFIG_SYSVIPC
struct sysv_shm {
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index a7d6bd2a918f..b6c3540e07bc 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -137,9 +137,15 @@ extern int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
unsigned long dst_addr,
unsigned long src_addr,
struct page **pagep);
+extern int shmem_mfill_zeropage_pte(struct mm_struct *dst_mm,
+ pmd_t *dst_pmd,
+ struct vm_area_struct *dst_vma,
+ unsigned long dst_addr);
#else
#define shmem_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
src_addr, pagep) ({ BUG(); 0; })
+#define shmem_mfill_zeropage_pte(dst_mm, dst_pmd, dst_vma, \
+ dst_addr) ({ BUG(); 0; })
#endif
#endif
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 4fcacd915d45..51d189615bda 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -18,6 +18,13 @@ struct shrink_control {
*/
unsigned long nr_to_scan;
+ /*
+ * How many objects did scan_objects process?
+ * This defaults to nr_to_scan before every call, but the callee
+ * should track its actual progress.
+ */
+ unsigned long nr_scanned;
+
/* current node being shrunk (for NUMA aware shrinkers) */
int nid;
diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h
index 35226cd4efb0..8621ffdeecbf 100644
--- a/include/linux/skb_array.h
+++ b/include/linux/skb_array.h
@@ -193,7 +193,8 @@ static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp)
}
static inline int skb_array_resize_multiple(struct skb_array **rings,
- int nrings, int size, gfp_t gfp)
+ int nrings, unsigned int size,
+ gfp_t gfp)
{
BUILD_BUG_ON(offsetof(struct skb_array, ring));
return ptr_ring_resize_multiple((struct ptr_ring **)rings,
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index dbe29b6c9bd6..f751f3b93039 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -22,6 +22,7 @@
#include <linux/cache.h>
#include <linux/rbtree.h>
#include <linux/socket.h>
+#include <linux/refcount.h>
#include <linux/atomic.h>
#include <asm/types.h>
@@ -345,6 +346,42 @@ static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
frag->size -= delta;
}
+static inline bool skb_frag_must_loop(struct page *p)
+{
+#if defined(CONFIG_HIGHMEM)
+ if (PageHighMem(p))
+ return true;
+#endif
+ return false;
+}
+
+/**
+ * skb_frag_foreach_page - loop over pages in a fragment
+ *
+ * @f: skb frag to operate on
+ * @f_off: offset from start of f->page.p
+ * @f_len: length from f_off to loop over
+ * @p: (temp var) current page
+ * @p_off: (temp var) offset from start of current page,
+ * non-zero only on first page.
+ * @p_len: (temp var) length in current page,
+ * < PAGE_SIZE only on first and last page.
+ * @copied: (temp var) length so far, excluding current p_len.
+ *
+ * A fragment can hold a compound page, in which case per-page
+ * operations, notably kmap_atomic, must be called for each
+ * regular page.
+ */
+#define skb_frag_foreach_page(f, f_off, f_len, p, p_off, p_len, copied) \
+ for (p = skb_frag_page(f) + ((f_off) >> PAGE_SHIFT), \
+ p_off = (f_off) & (PAGE_SIZE - 1), \
+ p_len = skb_frag_must_loop(p) ? \
+ min_t(u32, f_len, PAGE_SIZE - p_off) : f_len, \
+ copied = 0; \
+ copied < f_len; \
+ copied += p_len, p++, p_off = 0, \
+ p_len = min_t(u32, f_len - copied, PAGE_SIZE)) \
+
#define HAVE_HW_TIME_STAMP
/**
@@ -393,6 +430,7 @@ enum {
SKBTX_SCHED_TSTAMP = 1 << 6,
};
+#define SKBTX_ZEROCOPY_FRAG (SKBTX_DEV_ZEROCOPY | SKBTX_SHARED_FRAG)
#define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \
SKBTX_SCHED_TSTAMP)
#define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP)
@@ -407,10 +445,46 @@ enum {
*/
struct ubuf_info {
void (*callback)(struct ubuf_info *, bool zerocopy_success);
- void *ctx;
- unsigned long desc;
+ union {
+ struct {
+ unsigned long desc;
+ void *ctx;
+ };
+ struct {
+ u32 id;
+ u16 len;
+ u16 zerocopy:1;
+ u32 bytelen;
+ };
+ };
+ refcount_t refcnt;
+
+ struct mmpin {
+ struct user_struct *user;
+ unsigned int num_pg;
+ } mmp;
};
+#define skb_uarg(SKB) ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg))
+
+struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size);
+struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size,
+ struct ubuf_info *uarg);
+
+static inline void sock_zerocopy_get(struct ubuf_info *uarg)
+{
+ refcount_inc(&uarg->refcnt);
+}
+
+void sock_zerocopy_put(struct ubuf_info *uarg);
+void sock_zerocopy_put_abort(struct ubuf_info *uarg);
+
+void sock_zerocopy_callback(struct ubuf_info *uarg, bool success);
+
+int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
+ struct msghdr *msg, int len,
+ struct ubuf_info *uarg);
+
/* This data is invariant across clones and lives at
* the end of the header data, ie. at skb->end.
*/
@@ -463,39 +537,38 @@ enum {
enum {
SKB_GSO_TCPV4 = 1 << 0,
- SKB_GSO_UDP = 1 << 1,
/* This indicates the skb is from an untrusted source. */
- SKB_GSO_DODGY = 1 << 2,
+ SKB_GSO_DODGY = 1 << 1,
/* This indicates the tcp segment has CWR set. */
- SKB_GSO_TCP_ECN = 1 << 3,
+ SKB_GSO_TCP_ECN = 1 << 2,
- SKB_GSO_TCP_FIXEDID = 1 << 4,
+ SKB_GSO_TCP_FIXEDID = 1 << 3,
- SKB_GSO_TCPV6 = 1 << 5,
+ SKB_GSO_TCPV6 = 1 << 4,
- SKB_GSO_FCOE = 1 << 6,
+ SKB_GSO_FCOE = 1 << 5,
- SKB_GSO_GRE = 1 << 7,
+ SKB_GSO_GRE = 1 << 6,
- SKB_GSO_GRE_CSUM = 1 << 8,
+ SKB_GSO_GRE_CSUM = 1 << 7,
- SKB_GSO_IPXIP4 = 1 << 9,
+ SKB_GSO_IPXIP4 = 1 << 8,
- SKB_GSO_IPXIP6 = 1 << 10,
+ SKB_GSO_IPXIP6 = 1 << 9,
- SKB_GSO_UDP_TUNNEL = 1 << 11,
+ SKB_GSO_UDP_TUNNEL = 1 << 10,
- SKB_GSO_UDP_TUNNEL_CSUM = 1 << 12,
+ SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11,
- SKB_GSO_PARTIAL = 1 << 13,
+ SKB_GSO_PARTIAL = 1 << 12,
- SKB_GSO_TUNNEL_REMCSUM = 1 << 14,
+ SKB_GSO_TUNNEL_REMCSUM = 1 << 13,
- SKB_GSO_SCTP = 1 << 15,
+ SKB_GSO_SCTP = 1 << 14,
- SKB_GSO_ESP = 1 << 16,
+ SKB_GSO_ESP = 1 << 15,
};
#if BITS_PER_LONG > 32
@@ -945,12 +1018,6 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
}
-struct sk_buff *__alloc_skb_head(gfp_t priority, int node);
-static inline struct sk_buff *alloc_skb_head(gfp_t priority)
-{
- return __alloc_skb_head(priority, -1);
-}
-
struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
@@ -973,7 +1040,23 @@ int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg
int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg,
int offset, int len);
int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
-int skb_pad(struct sk_buff *skb, int pad);
+int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error);
+
+/**
+ * skb_pad - zero pad the tail of an skb
+ * @skb: buffer to pad
+ * @pad: space to pad
+ *
+ * Ensure that a buffer is followed by a padding area that is zero
+ * filled. Used by network drivers which may DMA or transfer data
+ * beyond the buffer end onto the wire.
+ *
+ * May return error in out of memory cases. The skb is freed on error.
+ */
+static inline int skb_pad(struct sk_buff *skb, int pad)
+{
+ return __skb_pad(skb, pad, true);
+}
#define dev_kfree_skb(a) consume_skb(a)
int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
@@ -1129,8 +1212,6 @@ static inline __u32 skb_get_hash(struct sk_buff *skb)
return skb->hash;
}
-__u32 __skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6);
-
static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
{
if (!skb->l4_hash && !skb->sw_hash) {
@@ -1143,20 +1224,6 @@ static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6
return skb->hash;
}
-__u32 __skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl);
-
-static inline __u32 skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl4)
-{
- if (!skb->l4_hash && !skb->sw_hash) {
- struct flow_keys keys;
- __u32 hash = __get_hash_from_flowi4(fl4, &keys);
-
- __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
- }
-
- return skb->hash;
-}
-
__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
@@ -1201,6 +1268,50 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
return &skb_shinfo(skb)->hwtstamps;
}
+static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb)
+{
+ bool is_zcopy = skb && skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY;
+
+ return is_zcopy ? skb_uarg(skb) : NULL;
+}
+
+static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg)
+{
+ if (skb && uarg && !skb_zcopy(skb)) {
+ sock_zerocopy_get(uarg);
+ skb_shinfo(skb)->destructor_arg = uarg;
+ skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
+ }
+}
+
+/* Release a reference on a zerocopy structure */
+static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
+{
+ struct ubuf_info *uarg = skb_zcopy(skb);
+
+ if (uarg) {
+ if (uarg->callback == sock_zerocopy_callback) {
+ uarg->zerocopy = uarg->zerocopy && zerocopy;
+ sock_zerocopy_put(uarg);
+ } else {
+ uarg->callback(uarg, zerocopy);
+ }
+
+ skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
+ }
+}
+
+/* Abort a zerocopy operation and revert zckey on error in send syscall */
+static inline void skb_zcopy_abort(struct sk_buff *skb)
+{
+ struct ubuf_info *uarg = skb_zcopy(skb);
+
+ if (uarg) {
+ sock_zerocopy_put_abort(uarg);
+ skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
+ }
+}
+
/**
* skb_queue_empty - check if a queue is empty
* @list: queue head
@@ -1783,13 +1894,18 @@ static inline unsigned int skb_headlen(const struct sk_buff *skb)
return skb->len - skb->data_len;
}
-static inline unsigned int skb_pagelen(const struct sk_buff *skb)
+static inline unsigned int __skb_pagelen(const struct sk_buff *skb)
{
unsigned int i, len = 0;
for (i = skb_shinfo(skb)->nr_frags - 1; (int)i >= 0; i--)
len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
- return len + skb_headlen(skb);
+ return len;
+}
+
+static inline unsigned int skb_pagelen(const struct sk_buff *skb)
+{
+ return skb_headlen(skb) + __skb_pagelen(skb);
}
/**
@@ -2434,7 +2550,17 @@ static inline void skb_orphan(struct sk_buff *skb)
*/
static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
{
- if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)))
+ if (likely(!skb_zcopy(skb)))
+ return 0;
+ if (skb_uarg(skb)->callback == sock_zerocopy_callback)
+ return 0;
+ return skb_copy_ubufs(skb, gfp_mask);
+}
+
+/* Frags must be orphaned, even if refcounted, if skb might loop to rx path */
+static inline int skb_orphan_frags_rx(struct sk_buff *skb, gfp_t gfp_mask)
+{
+ if (likely(!skb_zcopy(skb)))
return 0;
return skb_copy_ubufs(skb, gfp_mask);
}
@@ -2825,25 +2951,42 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len)
* skb_put_padto - increase size and pad an skbuff up to a minimal size
* @skb: buffer to pad
* @len: minimal length
+ * @free_on_error: free buffer on error
*
* Pads up a buffer to ensure the trailing bytes exist and are
* blanked. If the buffer already contains sufficient data it
* is untouched. Otherwise it is extended. Returns zero on
- * success. The skb is freed on error.
+ * success. The skb is freed on error if @free_on_error is true.
*/
-static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
+static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len,
+ bool free_on_error)
{
unsigned int size = skb->len;
if (unlikely(size < len)) {
len -= size;
- if (skb_pad(skb, len))
+ if (__skb_pad(skb, len, free_on_error))
return -ENOMEM;
__skb_put(skb, len);
}
return 0;
}
+/**
+ * skb_put_padto - increase size and pad an skbuff up to a minimal size
+ * @skb: buffer to pad
+ * @len: minimal length
+ *
+ * Pads up a buffer to ensure the trailing bytes exist and are
+ * blanked. If the buffer already contains sufficient data it
+ * is untouched. Otherwise it is extended. Returns zero on
+ * success. The skb is freed on error.
+ */
+static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
+{
+ return __skb_put_padto(skb, len, true);
+}
+
static inline int skb_add_data(struct sk_buff *skb,
struct iov_iter *from, int copy)
{
@@ -2866,6 +3009,8 @@ static inline int skb_add_data(struct sk_buff *skb,
static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
const struct page *page, int off)
{
+ if (skb_zcopy(skb))
+ return false;
if (i) {
const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
@@ -3120,6 +3265,9 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
struct pipe_inode_info *pipe, unsigned int len,
unsigned int flags);
+int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
+ int len);
+int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len);
void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index cc0faf3a90be..0783b622311e 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -115,6 +115,10 @@ struct kmem_cache {
#endif
#endif
+#ifdef CONFIG_SLAB_FREELIST_HARDENED
+ unsigned long random;
+#endif
+
#ifdef CONFIG_NUMA
/*
* Defragmentation by allocating from a remote node.
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 68123c1fe549..98b1fe027fc9 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -14,13 +14,17 @@
#include <linux/llist.h>
typedef void (*smp_call_func_t)(void *info);
-struct call_single_data {
+struct __call_single_data {
struct llist_node llist;
smp_call_func_t func;
void *info;
unsigned int flags;
};
+/* Use __aligned() to avoid to use 2 cache lines for 1 csd */
+typedef struct __call_single_data call_single_data_t
+ __aligned(sizeof(struct __call_single_data));
+
/* total number of cpus in this system (may exceed NR_CPUS) */
extern unsigned int total_cpus;
@@ -48,7 +52,7 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
smp_call_func_t func, void *info, bool wait,
gfp_t gfp_flags);
-int smp_call_function_single_async(int cpu, struct call_single_data *csd);
+int smp_call_function_single_async(int cpu, call_single_data_t *csd);
#ifdef CONFIG_SMP
diff --git a/include/linux/soc/ti/knav_dma.h b/include/linux/soc/ti/knav_dma.h
index 2b7882666ef6..66693bc4c6ad 100644
--- a/include/linux/soc/ti/knav_dma.h
+++ b/include/linux/soc/ti/knav_dma.h
@@ -17,6 +17,8 @@
#ifndef __SOC_TI_KEYSTONE_NAVIGATOR_DMA_H__
#define __SOC_TI_KEYSTONE_NAVIGATOR_DMA_H__
+#include <linux/dmaengine.h>
+
/*
* PKTDMA descriptor manipulation macros for host packet descriptor
*/
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 8b13db5163cc..8ad963cdc88c 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -287,6 +287,7 @@ struct ucred {
#define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */
#define MSG_EOF MSG_FIN
+#define MSG_ZEROCOPY 0x4000000 /* Use user data in kernel path */
#define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */
#define MSG_CMSG_CLOEXEC 0x40000000 /* Set close_on_exec for file
descriptor received through
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index d9510e8522d4..69e079c5ff98 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -118,24 +118,41 @@ do { \
#endif
/*
- * Despite its name it doesn't necessarily has to be a full barrier.
- * It should only guarantee that a STORE before the critical section
- * can not be reordered with LOADs and STOREs inside this section.
- * spin_lock() is the one-way barrier, this LOAD can not escape out
- * of the region. So the default implementation simply ensures that
- * a STORE can not move into the critical section, smp_wmb() should
- * serialize it with another STORE done by spin_lock().
+ * This barrier must provide two things:
+ *
+ * - it must guarantee a STORE before the spin_lock() is ordered against a
+ * LOAD after it, see the comments at its two usage sites.
+ *
+ * - it must ensure the critical section is RCsc.
+ *
+ * The latter is important for cases where we observe values written by other
+ * CPUs in spin-loops, without barriers, while being subject to scheduling.
+ *
+ * CPU0 CPU1 CPU2
+ *
+ * for (;;) {
+ * if (READ_ONCE(X))
+ * break;
+ * }
+ * X=1
+ * <sched-out>
+ * <sched-in>
+ * r = X;
+ *
+ * without transitivity it could be that CPU1 observes X!=0 breaks the loop,
+ * we get migrated and CPU2 sees X==0.
+ *
+ * Since most load-store architectures implement ACQUIRE with an smp_mb() after
+ * the LL/SC loop, they need no further barriers. Similarly all our TSO
+ * architectures imply an smp_mb() for each atomic instruction and equally don't
+ * need more.
+ *
+ * Architectures that can implement ACQUIRE better need to take care.
*/
-#ifndef smp_mb__before_spinlock
-#define smp_mb__before_spinlock() smp_wmb()
+#ifndef smp_mb__after_spinlock
+#define smp_mb__after_spinlock() do { } while (0)
#endif
-/**
- * raw_spin_unlock_wait - wait until the spinlock gets unlocked
- * @lock: the spinlock in question.
- */
-#define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock)
-
#ifdef CONFIG_DEBUG_SPINLOCK
extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
@@ -369,31 +386,6 @@ static __always_inline int spin_trylock_irq(spinlock_t *lock)
raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
})
-/**
- * spin_unlock_wait - Interpose between successive critical sections
- * @lock: the spinlock whose critical sections are to be interposed.
- *
- * Semantically this is equivalent to a spin_lock() immediately
- * followed by a spin_unlock(). However, most architectures have
- * more efficient implementations in which the spin_unlock_wait()
- * cannot block concurrent lock acquisition, and in some cases
- * where spin_unlock_wait() does not write to the lock variable.
- * Nevertheless, spin_unlock_wait() can have high overhead, so if
- * you feel the need to use it, please check to see if there is
- * a better way to get your job done.
- *
- * The ordering guarantees provided by spin_unlock_wait() are:
- *
- * 1. All accesses preceding the spin_unlock_wait() happen before
- * any accesses in later critical sections for this same lock.
- * 2. All accesses following the spin_unlock_wait() happen after
- * any accesses in earlier critical sections for this same lock.
- */
-static __always_inline void spin_unlock_wait(spinlock_t *lock)
-{
- raw_spin_unlock_wait(&lock->rlock);
-}
-
static __always_inline int spin_is_locked(spinlock_t *lock)
{
return raw_spin_is_locked(&lock->rlock);
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
index 0d9848de677d..612fb530af41 100644
--- a/include/linux/spinlock_up.h
+++ b/include/linux/spinlock_up.h
@@ -26,11 +26,6 @@
#ifdef CONFIG_DEBUG_SPINLOCK
#define arch_spin_is_locked(x) ((x)->slock == 0)
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
- smp_cond_load_acquire(&lock->slock, VAL);
-}
-
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
lock->slock = 0;
@@ -73,7 +68,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
#else /* DEBUG_SPINLOCK */
#define arch_spin_is_locked(lock) ((void)(lock), 0)
-#define arch_spin_unlock_wait(lock) do { barrier(); (void)(lock); } while (0)
/* for sched/core.c and kernel_lock.c: */
# define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0)
# define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0)
diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h
index cfbfc540cafc..261471f407a5 100644
--- a/include/linux/srcutiny.h
+++ b/include/linux/srcutiny.h
@@ -87,4 +87,17 @@ static inline void srcu_barrier(struct srcu_struct *sp)
synchronize_srcu(sp);
}
+/* Defined here to avoid size increase for non-torture kernels. */
+static inline void srcu_torture_stats_print(struct srcu_struct *sp,
+ char *tt, char *tf)
+{
+ int idx;
+
+ idx = READ_ONCE(sp->srcu_idx) & 0x1;
+ pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd)\n",
+ tt, tf, idx,
+ READ_ONCE(sp->srcu_lock_nesting[!idx]),
+ READ_ONCE(sp->srcu_lock_nesting[idx]));
+}
+
#endif
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
index 42973f787e7e..a949f4f9e4d7 100644
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -104,8 +104,6 @@ struct srcu_struct {
#define SRCU_STATE_SCAN1 1
#define SRCU_STATE_SCAN2 2
-void process_srcu(struct work_struct *work);
-
#define __SRCU_STRUCT_INIT(name) \
{ \
.sda = &name##_srcu_data, \
@@ -141,5 +139,6 @@ void process_srcu(struct work_struct *work);
void synchronize_srcu_expedited(struct srcu_struct *sp);
void srcu_barrier(struct srcu_struct *sp);
+void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf);
#endif
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 0b1cf32edfd7..d10b7980799d 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -33,10 +33,10 @@ static inline void pm_restore_console(void)
typedef int __bitwise suspend_state_t;
#define PM_SUSPEND_ON ((__force suspend_state_t) 0)
-#define PM_SUSPEND_FREEZE ((__force suspend_state_t) 1)
+#define PM_SUSPEND_TO_IDLE ((__force suspend_state_t) 1)
#define PM_SUSPEND_STANDBY ((__force suspend_state_t) 2)
#define PM_SUSPEND_MEM ((__force suspend_state_t) 3)
-#define PM_SUSPEND_MIN PM_SUSPEND_FREEZE
+#define PM_SUSPEND_MIN PM_SUSPEND_TO_IDLE
#define PM_SUSPEND_MAX ((__force suspend_state_t) 4)
enum suspend_stat_step {
@@ -186,7 +186,7 @@ struct platform_suspend_ops {
void (*recover)(void);
};
-struct platform_freeze_ops {
+struct platform_s2idle_ops {
int (*begin)(void);
int (*prepare)(void);
void (*wake)(void);
@@ -196,6 +196,9 @@ struct platform_freeze_ops {
};
#ifdef CONFIG_SUSPEND
+extern suspend_state_t mem_sleep_current;
+extern suspend_state_t mem_sleep_default;
+
/**
* suspend_set_ops - set platform dependent suspend operations
* @ops: The new suspend operations to set.
@@ -234,22 +237,22 @@ static inline bool pm_resume_via_firmware(void)
}
/* Suspend-to-idle state machnine. */
-enum freeze_state {
- FREEZE_STATE_NONE, /* Not suspended/suspending. */
- FREEZE_STATE_ENTER, /* Enter suspend-to-idle. */
- FREEZE_STATE_WAKE, /* Wake up from suspend-to-idle. */
+enum s2idle_states {
+ S2IDLE_STATE_NONE, /* Not suspended/suspending. */
+ S2IDLE_STATE_ENTER, /* Enter suspend-to-idle. */
+ S2IDLE_STATE_WAKE, /* Wake up from suspend-to-idle. */
};
-extern enum freeze_state __read_mostly suspend_freeze_state;
+extern enum s2idle_states __read_mostly s2idle_state;
-static inline bool idle_should_freeze(void)
+static inline bool idle_should_enter_s2idle(void)
{
- return unlikely(suspend_freeze_state == FREEZE_STATE_ENTER);
+ return unlikely(s2idle_state == S2IDLE_STATE_ENTER);
}
extern void __init pm_states_init(void);
-extern void freeze_set_ops(const struct platform_freeze_ops *ops);
-extern void freeze_wake(void);
+extern void s2idle_set_ops(const struct platform_s2idle_ops *ops);
+extern void s2idle_wake(void);
/**
* arch_suspend_disable_irqs - disable IRQs for suspend
@@ -281,10 +284,10 @@ static inline bool pm_resume_via_firmware(void) { return false; }
static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
-static inline bool idle_should_freeze(void) { return false; }
+static inline bool idle_should_enter_s2idle(void) { return false; }
static inline void __init pm_states_init(void) {}
-static inline void freeze_set_ops(const struct platform_freeze_ops *ops) {}
-static inline void freeze_wake(void) {}
+static inline void s2idle_set_ops(const struct platform_s2idle_ops *ops) {}
+static inline void s2idle_wake(void) {}
#endif /* !CONFIG_SUSPEND */
/* struct pbe is used for creating lists of pages that should be restored
@@ -427,6 +430,7 @@ extern int unregister_pm_notifier(struct notifier_block *nb);
/* drivers/base/power/wakeup.c */
extern bool events_check_enabled;
extern unsigned int pm_wakeup_irq;
+extern suspend_state_t pm_suspend_target_state;
extern bool pm_wakeup_pending(void);
extern void pm_system_wakeup(void);
@@ -491,10 +495,24 @@ static inline void unlock_system_sleep(void) {}
#ifdef CONFIG_PM_SLEEP_DEBUG
extern bool pm_print_times_enabled;
+extern bool pm_debug_messages_on;
+extern __printf(2, 3) void __pm_pr_dbg(bool defer, const char *fmt, ...);
#else
#define pm_print_times_enabled (false)
+#define pm_debug_messages_on (false)
+
+#include <linux/printk.h>
+
+#define __pm_pr_dbg(defer, fmt, ...) \
+ no_printk(KERN_DEBUG fmt, ##__VA_ARGS__)
#endif
+#define pm_pr_dbg(fmt, ...) \
+ __pm_pr_dbg(false, fmt, ##__VA_ARGS__)
+
+#define pm_deferred_pr_dbg(fmt, ...) \
+ __pm_pr_dbg(true, fmt, ##__VA_ARGS__)
+
#ifdef CONFIG_PM_AUTOSLEEP
/* kernel/power/autosleep.c */
diff --git a/include/linux/swait.h b/include/linux/swait.h
index c1f9c62a8a50..4a4e180d0a35 100644
--- a/include/linux/swait.h
+++ b/include/linux/swait.h
@@ -169,4 +169,59 @@ do { \
__ret; \
})
+#define __swait_event_idle(wq, condition) \
+ (void)___swait_event(wq, condition, TASK_IDLE, 0, schedule())
+
+/**
+ * swait_event_idle - wait without system load contribution
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ *
+ * The process is put to sleep (TASK_IDLE) until the @condition evaluates to
+ * true. The @condition is checked each time the waitqueue @wq is woken up.
+ *
+ * This function is mostly used when a kthread or workqueue waits for some
+ * condition and doesn't want to contribute to system load. Signals are
+ * ignored.
+ */
+#define swait_event_idle(wq, condition) \
+do { \
+ if (condition) \
+ break; \
+ __swait_event_idle(wq, condition); \
+} while (0)
+
+#define __swait_event_idle_timeout(wq, condition, timeout) \
+ ___swait_event(wq, ___wait_cond_timeout(condition), \
+ TASK_IDLE, timeout, \
+ __ret = schedule_timeout(__ret))
+
+/**
+ * swait_event_idle_timeout - wait up to timeout without load contribution
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * @timeout: timeout at which we'll give up in jiffies
+ *
+ * The process is put to sleep (TASK_IDLE) until the @condition evaluates to
+ * true. The @condition is checked each time the waitqueue @wq is woken up.
+ *
+ * This function is mostly used when a kthread or workqueue waits for some
+ * condition and doesn't want to contribute to system load. Signals are
+ * ignored.
+ *
+ * Returns:
+ * 0 if the @condition evaluated to %false after the @timeout elapsed,
+ * 1 if the @condition evaluated to %true after the @timeout elapsed,
+ * or the remaining jiffies (at least 1) if the @condition evaluated
+ * to %true before the @timeout elapsed.
+ */
+#define swait_event_idle_timeout(wq, condition, timeout) \
+({ \
+ long __ret = timeout; \
+ if (!___wait_cond_timeout(condition)) \
+ __ret = __swait_event_idle_timeout(wq, \
+ condition, timeout); \
+ __ret; \
+})
+
#endif /* _LINUX_SWAIT_H */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index d83d28e53e62..8bf3487fb204 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -188,6 +188,7 @@ struct swap_cluster_info {
};
#define CLUSTER_FLAG_FREE 1 /* This cluster is free */
#define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */
+#define CLUSTER_FLAG_HUGE 4 /* This cluster is backing a transparent huge page */
/*
* We assign a cluster to each CPU, so each CPU can allocate swap entry from
@@ -211,7 +212,7 @@ struct swap_info_struct {
unsigned long flags; /* SWP_USED etc: see above */
signed short prio; /* swap priority of this type */
struct plist_node list; /* entry in swap_active_head */
- struct plist_node avail_list; /* entry in swap_avail_head */
+ struct plist_node avail_lists[MAX_NUMNODES];/* entry in swap_avail_heads */
signed char type; /* strange name for an index */
unsigned int max; /* extent of the swap_map */
unsigned char *swap_map; /* vmalloc'ed array of usage counts */
@@ -250,6 +251,25 @@ struct swap_info_struct {
struct swap_cluster_list discard_clusters; /* discard clusters list */
};
+#ifdef CONFIG_64BIT
+#define SWAP_RA_ORDER_CEILING 5
+#else
+/* Avoid stack overflow, because we need to save part of page table */
+#define SWAP_RA_ORDER_CEILING 3
+#define SWAP_RA_PTE_CACHE_SIZE (1 << SWAP_RA_ORDER_CEILING)
+#endif
+
+struct vma_swap_readahead {
+ unsigned short win;
+ unsigned short offset;
+ unsigned short nr_pte;
+#ifdef CONFIG_64BIT
+ pte_t *ptes;
+#else
+ pte_t ptes[SWAP_RA_PTE_CACHE_SIZE];
+#endif
+};
+
/* linux/mm/workingset.c */
void *workingset_eviction(struct address_space *mapping, struct page *page);
bool workingset_refault(void *shadow);
@@ -262,8 +282,8 @@ extern unsigned long totalreserve_pages;
extern unsigned long nr_free_buffer_pages(void);
extern unsigned long nr_free_pagecache_pages(void);
-/* Definition of global_page_state not available yet */
-#define nr_free_pages() global_page_state(NR_FREE_PAGES)
+/* Definition of global_zone_page_state not available yet */
+#define nr_free_pages() global_zone_page_state(NR_FREE_PAGES)
/* linux/mm/swap.c */
@@ -349,6 +369,7 @@ int generic_swapfile_activate(struct swap_info_struct *, struct file *,
#define SWAP_ADDRESS_SPACE_SHIFT 14
#define SWAP_ADDRESS_SPACE_PAGES (1 << SWAP_ADDRESS_SPACE_SHIFT)
extern struct address_space *swapper_spaces[];
+extern bool swap_vma_readahead;
#define swap_address_space(entry) \
(&swapper_spaces[swp_type(entry)][swp_offset(entry) \
>> SWAP_ADDRESS_SPACE_SHIFT])
@@ -361,7 +382,9 @@ extern void __delete_from_swap_cache(struct page *);
extern void delete_from_swap_cache(struct page *);
extern void free_page_and_swap_cache(struct page *);
extern void free_pages_and_swap_cache(struct page **, int);
-extern struct page *lookup_swap_cache(swp_entry_t);
+extern struct page *lookup_swap_cache(swp_entry_t entry,
+ struct vm_area_struct *vma,
+ unsigned long addr);
extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
struct vm_area_struct *vma, unsigned long addr,
bool do_poll);
@@ -371,11 +394,23 @@ extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t,
extern struct page *swapin_readahead(swp_entry_t, gfp_t,
struct vm_area_struct *vma, unsigned long addr);
+extern struct page *swap_readahead_detect(struct vm_fault *vmf,
+ struct vma_swap_readahead *swap_ra);
+extern struct page *do_swap_page_readahead(swp_entry_t fentry, gfp_t gfp_mask,
+ struct vm_fault *vmf,
+ struct vma_swap_readahead *swap_ra);
+
/* linux/mm/swapfile.c */
extern atomic_long_t nr_swap_pages;
extern long total_swap_pages;
+extern atomic_t nr_rotate_swap;
extern bool has_usable_swap(void);
+static inline bool swap_use_vma_readahead(void)
+{
+ return READ_ONCE(swap_vma_readahead) && !atomic_read(&nr_rotate_swap);
+}
+
/* Swap 50% full? Release swapcache more aggressively.. */
static inline bool vm_swap_full(void)
{
@@ -465,12 +500,32 @@ static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
return NULL;
}
+static inline bool swap_use_vma_readahead(void)
+{
+ return false;
+}
+
+static inline struct page *swap_readahead_detect(
+ struct vm_fault *vmf, struct vma_swap_readahead *swap_ra)
+{
+ return NULL;
+}
+
+static inline struct page *do_swap_page_readahead(
+ swp_entry_t fentry, gfp_t gfp_mask,
+ struct vm_fault *vmf, struct vma_swap_readahead *swap_ra)
+{
+ return NULL;
+}
+
static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
{
return 0;
}
-static inline struct page *lookup_swap_cache(swp_entry_t swp)
+static inline struct page *lookup_swap_cache(swp_entry_t swp,
+ struct vm_area_struct *vma,
+ unsigned long addr)
{
return NULL;
}
@@ -509,8 +564,8 @@ static inline int swp_swapcount(swp_entry_t entry)
return 0;
}
-#define reuse_swap_page(page, total_mapcount) \
- (page_trans_huge_mapcount(page, total_mapcount) == 1)
+#define reuse_swap_page(page, total_map_swapcount) \
+ (page_trans_huge_mapcount(page, total_map_swapcount) == 1)
static inline int try_to_free_swap(struct page *page)
{
@@ -526,6 +581,15 @@ static inline swp_entry_t get_swap_page(struct page *page)
#endif /* CONFIG_SWAP */
+#ifdef CONFIG_THP_SWAP
+extern int split_swap_cluster(swp_entry_t entry);
+#else
+static inline int split_swap_cluster(swp_entry_t entry)
+{
+ return 0;
+}
+#endif
+
#ifdef CONFIG_MEMCG
static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
{
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 4ee479f2f355..15e7160751a8 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -35,6 +35,7 @@ int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
extern unsigned long swiotlb_nr_tbl(void);
unsigned long swiotlb_size_or_default(void);
extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs);
+extern void __init swiotlb_update_mem_attributes(void);
/*
* Enumeration for sync targets
diff --git a/include/linux/sync_file.h b/include/linux/sync_file.h
index 5726107963b2..0ad87c434ae6 100644
--- a/include/linux/sync_file.h
+++ b/include/linux/sync_file.h
@@ -43,12 +43,13 @@ struct sync_file {
#endif
wait_queue_head_t wq;
+ unsigned long flags;
struct dma_fence *fence;
struct dma_fence_cb cb;
};
-#define POLL_ENABLED DMA_FENCE_FLAG_USER_BITS
+#define POLL_ENABLED 0
struct sync_file *sync_file_create(struct dma_fence *fence);
struct dma_fence *sync_file_get_fence(int fd);
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 3cb15ea48aee..88951b795ee3 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -100,11 +100,12 @@ union bpf_attr;
#define __MAP(n,...) __MAP##n(__VA_ARGS__)
#define __SC_DECL(t, a) t a
-#define __TYPE_IS_L(t) (__same_type((t)0, 0L))
-#define __TYPE_IS_UL(t) (__same_type((t)0, 0UL))
-#define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
+#define __TYPE_AS(t, v) __same_type((__force t)0, v)
+#define __TYPE_IS_L(t) (__TYPE_AS(t, 0L))
+#define __TYPE_IS_UL(t) (__TYPE_AS(t, 0UL))
+#define __TYPE_IS_LL(t) (__TYPE_AS(t, 0LL) || __TYPE_AS(t, 0ULL))
#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
-#define __SC_CAST(t, a) (t) a
+#define __SC_CAST(t, a) (__force t) a
#define __SC_ARGS(t, a) a
#define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
@@ -172,8 +173,20 @@ extern struct trace_event_functions exit_syscall_print_funcs;
static struct syscall_metadata __used \
__attribute__((section("__syscalls_metadata"))) \
*__p_syscall_meta_##sname = &__syscall_meta_##sname;
+
+static inline int is_syscall_trace_event(struct trace_event_call *tp_event)
+{
+ return tp_event->class == &event_class_syscall_enter ||
+ tp_event->class == &event_class_syscall_exit;
+}
+
#else
#define SYSCALL_METADATA(sname, nb, ...)
+
+static inline int is_syscall_trace_event(struct trace_event_call *tp_event)
+{
+ return 0;
+}
#endif
#define SYSCALL_DEFINE0(sname) \
@@ -206,6 +219,22 @@ extern struct trace_event_functions exit_syscall_print_funcs;
} \
static inline long SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__))
+#ifdef TIF_FSCHECK
+/*
+ * Called before coming back to user-mode. Returning to user-mode with an
+ * address limit different than USER_DS can allow to overwrite kernel memory.
+ */
+static inline void addr_limit_user_check(void)
+{
+
+ if (!test_thread_flag(TIF_FSCHECK))
+ return;
+
+ BUG_ON(!segment_eq(get_fs(), USER_DS));
+ clear_thread_flag(TIF_FSCHECK);
+}
+#endif
+
asmlinkage long sys32_quotactl(unsigned int cmd, const char __user *special,
qid_t id, void __user *addr);
asmlinkage long sys_time(time_t __user *tloc);
@@ -578,12 +607,12 @@ asmlinkage long sys_preadv(unsigned long fd, const struct iovec __user *vec,
unsigned long vlen, unsigned long pos_l, unsigned long pos_h);
asmlinkage long sys_preadv2(unsigned long fd, const struct iovec __user *vec,
unsigned long vlen, unsigned long pos_l, unsigned long pos_h,
- int flags);
+ rwf_t flags);
asmlinkage long sys_pwritev(unsigned long fd, const struct iovec __user *vec,
unsigned long vlen, unsigned long pos_l, unsigned long pos_h);
asmlinkage long sys_pwritev2(unsigned long fd, const struct iovec __user *vec,
unsigned long vlen, unsigned long pos_l, unsigned long pos_h,
- int flags);
+ rwf_t flags);
asmlinkage long sys_getcwd(char __user *buf, unsigned long size);
asmlinkage long sys_mkdir(const char __user *pathname, umode_t mode);
asmlinkage long sys_chdir(const char __user *filename);
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 3a89b9ff4cdc..1d4dba490fb6 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -120,7 +120,7 @@ struct ctl_table
struct ctl_table_poll *poll;
void *extra1;
void *extra2;
-};
+} __randomize_layout;
struct ctl_node {
struct rb_node node;
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 542ca1ae02c4..4aa40ef02d32 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -192,15 +192,6 @@ struct tcp_sock {
struct list_head tsq_node; /* anchor in tsq_tasklet.head list */
- /* Data for direct copy to user */
- struct {
- struct sk_buff_head prequeue;
- struct task_struct *task;
- struct msghdr *msg;
- int memory;
- int len;
- } ucopy;
-
u32 snd_wl1; /* Sequence for window update */
u32 snd_wnd; /* The window we expect to receive */
u32 max_window; /* Maximal window ever seen from peer */
@@ -273,7 +264,7 @@ struct tcp_sock {
u32 snd_cwnd_clamp; /* Do not allow snd_cwnd to grow above this */
u32 snd_cwnd_used;
u32 snd_cwnd_stamp;
- u32 prior_cwnd; /* Congestion window at start of Recovery. */
+ u32 prior_cwnd; /* cwnd right before starting loss recovery */
u32 prr_delivered; /* Number of newly delivered packets to
* receiver in Recovery. */
u32 prr_out; /* Total number of pkts sent during Recovery. */
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 250a27614328..905d769d8ddc 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -38,6 +38,10 @@ enum {
#ifdef __KERNEL__
+#ifndef THREAD_ALIGN
+#define THREAD_ALIGN THREAD_SIZE
+#endif
+
#ifdef CONFIG_DEBUG_STACK_USAGE
# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | \
__GFP_ZERO)
diff --git a/include/linux/time.h b/include/linux/time.h
index 4abb32d4c6b8..3877136bbdf8 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -285,4 +285,19 @@ static inline bool itimerspec64_valid(const struct itimerspec64 *its)
return true;
}
+/**
+ * time_after32 - compare two 32-bit relative times
+ * @a: the time which may be after @b
+ * @b: the time which may be before @a
+ *
+ * time_after32(a, b) returns true if the time @a is after time @b.
+ * time_before32(b, a) returns true if the time @b is before time @a.
+ *
+ * Similar to time_after(), compare two 32-bit timestamps for relative
+ * times. This is useful for comparing 32-bit seconds values that can't
+ * be converted to 64-bit values (e.g. due to disk format or wire protocol
+ * issues) when it is known that the times are less than 68 years apart.
+ */
+#define time_after32(a, b) ((s32)((u32)(b) - (u32)(a)) < 0)
+#define time_before32(b, a) time_after32(a, b)
#endif
diff --git a/include/linux/tnum.h b/include/linux/tnum.h
new file mode 100644
index 000000000000..0d2d3da46139
--- /dev/null
+++ b/include/linux/tnum.h
@@ -0,0 +1,81 @@
+/* tnum: tracked (or tristate) numbers
+ *
+ * A tnum tracks knowledge about the bits of a value. Each bit can be either
+ * known (0 or 1), or unknown (x). Arithmetic operations on tnums will
+ * propagate the unknown bits such that the tnum result represents all the
+ * possible results for possible values of the operands.
+ */
+#include <linux/types.h>
+
+struct tnum {
+ u64 value;
+ u64 mask;
+};
+
+/* Constructors */
+/* Represent a known constant as a tnum. */
+struct tnum tnum_const(u64 value);
+/* A completely unknown value */
+extern const struct tnum tnum_unknown;
+/* A value that's unknown except that @min <= value <= @max */
+struct tnum tnum_range(u64 min, u64 max);
+
+/* Arithmetic and logical ops */
+/* Shift a tnum left (by a fixed shift) */
+struct tnum tnum_lshift(struct tnum a, u8 shift);
+/* Shift a tnum right (by a fixed shift) */
+struct tnum tnum_rshift(struct tnum a, u8 shift);
+/* Add two tnums, return @a + @b */
+struct tnum tnum_add(struct tnum a, struct tnum b);
+/* Subtract two tnums, return @a - @b */
+struct tnum tnum_sub(struct tnum a, struct tnum b);
+/* Bitwise-AND, return @a & @b */
+struct tnum tnum_and(struct tnum a, struct tnum b);
+/* Bitwise-OR, return @a | @b */
+struct tnum tnum_or(struct tnum a, struct tnum b);
+/* Bitwise-XOR, return @a ^ @b */
+struct tnum tnum_xor(struct tnum a, struct tnum b);
+/* Multiply two tnums, return @a * @b */
+struct tnum tnum_mul(struct tnum a, struct tnum b);
+
+/* Return a tnum representing numbers satisfying both @a and @b */
+struct tnum tnum_intersect(struct tnum a, struct tnum b);
+
+/* Return @a with all but the lowest @size bytes cleared */
+struct tnum tnum_cast(struct tnum a, u8 size);
+
+/* Returns true if @a is a known constant */
+static inline bool tnum_is_const(struct tnum a)
+{
+ return !a.mask;
+}
+
+/* Returns true if @a == tnum_const(@b) */
+static inline bool tnum_equals_const(struct tnum a, u64 b)
+{
+ return tnum_is_const(a) && a.value == b;
+}
+
+/* Returns true if @a is completely unknown */
+static inline bool tnum_is_unknown(struct tnum a)
+{
+ return !~a.mask;
+}
+
+/* Returns true if @a is known to be a multiple of @size.
+ * @size must be a power of two.
+ */
+bool tnum_is_aligned(struct tnum a, u64 size);
+
+/* Returns true if @b represents a subset of @a. */
+bool tnum_in(struct tnum a, struct tnum b);
+
+/* Formatting functions. These have snprintf-like semantics: they will write
+ * up to @size bytes (including the terminating NUL byte), and return the number
+ * of bytes (excluding the terminating NUL) which would have been written had
+ * sufficient space been available. (Thus tnum_sbin always returns 64.)
+ */
+/* Format a tnum as a pair of hex numbers (value; mask) */
+int tnum_strn(char *str, size_t size, struct tnum a);
+/* Format a tnum as tristate binary expansion */
+int tnum_sbin(char *str, size_t size, struct tnum a);
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index f73cedfa2e0b..5012b524283d 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -338,7 +338,7 @@ enum {
struct trace_event_file {
struct list_head list;
struct trace_event_call *event_call;
- struct event_filter *filter;
+ struct event_filter __rcu *filter;
struct dentry *dir;
struct trace_array *tr;
struct trace_subsystem_dir *system;
@@ -508,9 +508,9 @@ void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
static inline void
perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type,
u64 count, struct pt_regs *regs, void *head,
- struct task_struct *task)
+ struct task_struct *task, struct perf_event *event)
{
- perf_tp_event(type, count, raw_data, size, regs, head, rctx, task);
+ perf_tp_event(type, count, raw_data, size, regs, head, rctx, task, event);
}
#endif
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 69464c0d8068..cf53eb539f6e 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -261,6 +261,8 @@ struct tty_port {
*/
#define TTY_PORT_CTS_FLOW 3 /* h/w flow control enabled */
#define TTY_PORT_CHECK_CD 4 /* carrier detect enabled */
+#define TTY_PORT_KOPENED 5 /* device exclusively opened by
+ kernel */
/*
* Where all of the state associated with a tty is kept while the tty
@@ -332,7 +334,7 @@ struct tty_struct {
/* If the tty has a pending do_SAK, queue it here - akpm */
struct work_struct SAK_work;
struct tty_port *port;
-};
+} __randomize_layout;
/* Each of a tty's open files has private_data pointing to tty_file_private */
struct tty_file_private {
@@ -399,8 +401,8 @@ extern struct tty_struct *get_current_tty(void);
/* tty_io.c */
extern int __init tty_init(void);
extern const char *tty_name(const struct tty_struct *tty);
-extern struct tty_struct *tty_open_by_driver(dev_t device, struct inode *inode,
- struct file *filp);
+extern struct tty_struct *tty_kopen(dev_t device);
+extern void tty_kclose(struct tty_struct *tty);
extern int tty_dev_name_to_number(const char *name, dev_t *number);
#else
static inline void tty_kref_put(struct tty_struct *tty)
@@ -422,9 +424,10 @@ static inline int __init tty_init(void)
{ return 0; }
static inline const char *tty_name(const struct tty_struct *tty)
{ return "(none)"; }
-static inline struct tty_struct *tty_open_by_driver(dev_t device,
- struct inode *inode, struct file *filp)
-{ return NULL; }
+static inline struct tty_struct *tty_kopen(dev_t device)
+{ return ERR_PTR(-ENODEV); }
+static inline void tty_kclose(struct tty_struct *tty)
+{ }
static inline int tty_dev_name_to_number(const char *name, dev_t *number)
{ return -ENOTSUPP; }
#endif
@@ -652,6 +655,19 @@ static inline void tty_port_set_initialized(struct tty_port *port, bool val)
clear_bit(TTY_PORT_INITIALIZED, &port->iflags);
}
+static inline bool tty_port_kopened(struct tty_port *port)
+{
+ return test_bit(TTY_PORT_KOPENED, &port->iflags);
+}
+
+static inline void tty_port_set_kopened(struct tty_port *port, bool val)
+{
+ if (val)
+ set_bit(TTY_PORT_KOPENED, &port->iflags);
+ else
+ clear_bit(TTY_PORT_KOPENED, &port->iflags);
+}
+
extern struct tty_struct *tty_port_tty_get(struct tty_port *port);
extern void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty);
extern int tty_port_carrier_raised(struct tty_port *port);
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index b742b5e47cc2..fcdc0f5d9098 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -243,6 +243,7 @@
#include <linux/list.h>
#include <linux/cdev.h>
#include <linux/termios.h>
+#include <linux/seq_file.h>
struct tty_struct;
struct tty_driver;
@@ -285,13 +286,14 @@ struct tty_operations {
int (*set_termiox)(struct tty_struct *tty, struct termiox *tnew);
int (*get_icount)(struct tty_struct *tty,
struct serial_icounter_struct *icount);
+ void (*show_fdinfo)(struct tty_struct *tty, struct seq_file *m);
#ifdef CONFIG_CONSOLE_POLL
int (*poll_init)(struct tty_driver *driver, int line, char *options);
int (*poll_get_char)(struct tty_driver *driver, int line);
void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
#endif
const struct file_operations *proc_fops;
-};
+} __randomize_layout;
struct tty_driver {
int magic; /* magic number for this structure */
@@ -325,7 +327,7 @@ struct tty_driver {
const struct tty_operations *ops;
struct list_head tty_drivers;
-};
+} __randomize_layout;
extern struct list_head tty_drivers;
diff --git a/include/linux/tty_flip.h b/include/linux/tty_flip.h
index c28dd523f96e..d43837f2ce3a 100644
--- a/include/linux/tty_flip.h
+++ b/include/linux/tty_flip.h
@@ -12,6 +12,7 @@ extern int tty_prepare_flip_string(struct tty_port *port,
unsigned char **chars, size_t size);
extern void tty_flip_buffer_push(struct tty_port *port);
void tty_schedule_flip(struct tty_port *port);
+int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag);
static inline int tty_insert_flip_char(struct tty_port *port,
unsigned char ch, char flag)
@@ -26,7 +27,7 @@ static inline int tty_insert_flip_char(struct tty_port *port,
*char_buf_ptr(tb, tb->used++) = ch;
return 1;
}
- return tty_insert_flip_string_flags(port, &ch, &flag, 1);
+ return __tty_insert_flip_char(port, ch, flag);
}
static inline int tty_insert_flip_string(struct tty_port *port,
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index acdd6f915a8d..20ef8e6ec2db 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -156,7 +156,7 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
}
#ifdef CONFIG_COMPAT
static __always_inline unsigned long __must_check
-copy_in_user(void __user *to, const void *from, unsigned long n)
+copy_in_user(void __user *to, const void __user *from, unsigned long n)
{
might_fault();
if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n))
diff --git a/include/linux/usb/audio-v2.h b/include/linux/usb/audio-v2.h
index c5f2158ab00e..fd73bc0e9027 100644
--- a/include/linux/usb/audio-v2.h
+++ b/include/linux/usb/audio-v2.h
@@ -115,13 +115,13 @@ struct uac2_input_terminal_descriptor {
__u8 bDescriptorType;
__u8 bDescriptorSubtype;
__u8 bTerminalID;
- __u16 wTerminalType;
+ __le16 wTerminalType;
__u8 bAssocTerminal;
__u8 bCSourceID;
__u8 bNrChannels;
- __u32 bmChannelConfig;
+ __le32 bmChannelConfig;
__u8 iChannelNames;
- __u16 bmControls;
+ __le16 bmControls;
__u8 iTerminal;
} __attribute__((packed));
@@ -132,11 +132,11 @@ struct uac2_output_terminal_descriptor {
__u8 bDescriptorType;
__u8 bDescriptorSubtype;
__u8 bTerminalID;
- __u16 wTerminalType;
+ __le16 wTerminalType;
__u8 bAssocTerminal;
__u8 bSourceID;
__u8 bCSourceID;
- __u16 bmControls;
+ __le16 bmControls;
__u8 iTerminal;
} __attribute__((packed));
@@ -164,9 +164,9 @@ struct uac2_as_header_descriptor {
__u8 bTerminalLink;
__u8 bmControls;
__u8 bFormatType;
- __u32 bmFormats;
+ __le32 bmFormats;
__u8 bNrChannels;
- __u32 bmChannelConfig;
+ __le32 bmChannelConfig;
__u8 iChannelNames;
} __attribute__((packed));
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h
index 021f7a88f52c..1a59699cf82a 100644
--- a/include/linux/usb/cdc_ncm.h
+++ b/include/linux/usb/cdc_ncm.h
@@ -83,6 +83,7 @@
/* Driver flags */
#define CDC_NCM_FLAG_NDP_TO_END 0x02 /* NDP is placed at end of frame */
#define CDC_MBIM_FLAG_AVOID_ALTSETTING_TOGGLE 0x04 /* Avoid altsetting toggle during init */
+#define CDC_NCM_FLAG_RESET_NTB16 0x08 /* set NDP16 one more time after altsetting switch */
#define cdc_ncm_comm_intf_is_mbim(x) ((x)->desc.bInterfaceSubClass == USB_CDC_SUBCLASS_MBIM && \
(x)->desc.bInterfaceProtocol == USB_CDC_PROTO_NONE)
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
index c5fdfcf99828..d725cff7268d 100644
--- a/include/linux/usb/chipidea.h
+++ b/include/linux/usb/chipidea.h
@@ -58,6 +58,7 @@ struct ci_hdrc_platform_data {
#define CI_HDRC_OVERRIDE_TX_BURST BIT(10)
#define CI_HDRC_OVERRIDE_RX_BURST BIT(11)
#define CI_HDRC_OVERRIDE_PHY_CONTROL BIT(12) /* Glue layer manages phy */
+#define CI_HDRC_REQUIRES_ALIGNED_DMA BIT(13)
enum usb_dr_mode dr_mode;
#define CI_HDRC_CONTROLLER_RESET_EVENT 0
#define CI_HDRC_CONTROLLER_STOPPED_EVENT 1
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 1a4a4bacfae6..21468a722c4a 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -48,6 +48,7 @@ struct usb_ep;
* by adding a zero length packet as needed;
* @short_not_ok: When reading data, makes short packets be
* treated as errors (queue stops advancing till cleanup).
+ * @dma_mapped: Indicates if request has been mapped to DMA (internal)
* @complete: Function called when request completes, so this request and
* its buffer may be re-used. The function will always be called with
* interrupts disabled, and it must not sleep.
@@ -103,6 +104,7 @@ struct usb_request {
unsigned no_interrupt:1;
unsigned zero:1;
unsigned short_not_ok:1;
+ unsigned dma_mapped:1;
void (*complete)(struct usb_ep *ep,
struct usb_request *req);
diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h
index 299245105610..8c6914873a16 100644
--- a/include/linux/usb/phy.h
+++ b/include/linux/usb/phy.h
@@ -12,6 +12,7 @@
#include <linux/extcon.h>
#include <linux/notifier.h>
#include <linux/usb.h>
+#include <uapi/linux/usb/charger.h>
enum usb_phy_interface {
USBPHY_INTERFACE_MODE_UNKNOWN,
@@ -72,6 +73,17 @@ struct usb_phy_io_ops {
int (*write)(struct usb_phy *x, u32 val, u32 reg);
};
+struct usb_charger_current {
+ unsigned int sdp_min;
+ unsigned int sdp_max;
+ unsigned int dcp_min;
+ unsigned int dcp_max;
+ unsigned int cdp_min;
+ unsigned int cdp_max;
+ unsigned int aca_min;
+ unsigned int aca_max;
+};
+
struct usb_phy {
struct device *dev;
const char *label;
@@ -91,6 +103,13 @@ struct usb_phy {
struct extcon_dev *id_edev;
struct notifier_block vbus_nb;
struct notifier_block id_nb;
+ struct notifier_block type_nb;
+
+ /* Support USB charger */
+ enum usb_charger_type chg_type;
+ enum usb_charger_state chg_state;
+ struct usb_charger_current chg_cur;
+ struct work_struct chg_work;
/* for notification of usb_phy_events */
struct atomic_notifier_head notifier;
@@ -129,6 +148,12 @@ struct usb_phy {
enum usb_device_speed speed);
int (*notify_disconnect)(struct usb_phy *x,
enum usb_device_speed speed);
+
+ /*
+ * Charger detection method can be implemented if you need to
+ * manually detect the charger type.
+ */
+ enum usb_charger_type (*charger_detect)(struct usb_phy *x);
};
/**
@@ -219,6 +244,12 @@ extern void devm_usb_put_phy(struct device *dev, struct usb_phy *x);
extern int usb_bind_phy(const char *dev_name, u8 index,
const char *phy_dev_name);
extern void usb_phy_set_event(struct usb_phy *x, unsigned long event);
+extern void usb_phy_set_charger_current(struct usb_phy *usb_phy,
+ unsigned int mA);
+extern void usb_phy_get_charger_current(struct usb_phy *usb_phy,
+ unsigned int *min, unsigned int *max);
+extern void usb_phy_set_charger_state(struct usb_phy *usb_phy,
+ enum usb_charger_state state);
#else
static inline struct usb_phy *usb_get_phy(enum usb_phy_type type)
{
@@ -270,12 +301,33 @@ static inline int usb_bind_phy(const char *dev_name, u8 index,
static inline void usb_phy_set_event(struct usb_phy *x, unsigned long event)
{
}
+
+static inline void usb_phy_set_charger_current(struct usb_phy *usb_phy,
+ unsigned int mA)
+{
+}
+
+static inline void usb_phy_get_charger_current(struct usb_phy *usb_phy,
+ unsigned int *min,
+ unsigned int *max)
+{
+}
+
+static inline void usb_phy_set_charger_state(struct usb_phy *usb_phy,
+ enum usb_charger_state state)
+{
+}
#endif
static inline int
usb_phy_set_power(struct usb_phy *x, unsigned mA)
{
- if (x && x->set_power)
+ if (!x)
+ return 0;
+
+ usb_phy_set_charger_current(x, mA);
+
+ if (x->set_power)
return x->set_power(x, mA);
return 0;
}
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index 32354b4b4b2b..b3575ce29148 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -66,7 +66,7 @@ struct user_namespace {
#endif
struct ucounts *ucounts;
int ucount_max[UCOUNT_COUNTS];
-};
+} __randomize_layout;
struct ucounts {
struct hlist_node node;
diff --git a/include/linux/utsname.h b/include/linux/utsname.h
index 60f0bb83b313..da826ed059cf 100644
--- a/include/linux/utsname.h
+++ b/include/linux/utsname.h
@@ -26,7 +26,7 @@ struct uts_namespace {
struct user_namespace *user_ns;
struct ucounts *ucounts;
struct ns_common ns;
-};
+} __randomize_layout;
extern struct uts_namespace init_uts_ns;
#ifdef CONFIG_UTS_NS
diff --git a/include/linux/uuid.h b/include/linux/uuid.h
index 2251e1925ea4..33b0bdbb613c 100644
--- a/include/linux/uuid.h
+++ b/include/linux/uuid.h
@@ -84,26 +84,12 @@ int guid_parse(const char *uuid, guid_t *u);
int uuid_parse(const char *uuid, uuid_t *u);
/* backwards compatibility, don't use in new code */
-typedef uuid_t uuid_be;
-#define UUID_BE(a, _b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
- UUID_INIT(a, _b, c, d0, d1, d2, d3, d4, d5, d6, d7)
-#define NULL_UUID_BE \
- UUID_BE(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, \
- 0x00, 0x00, 0x00, 0x00)
-
#define uuid_le_gen(u) guid_gen(u)
-#define uuid_be_gen(u) uuid_gen(u)
#define uuid_le_to_bin(guid, u) guid_parse(guid, u)
-#define uuid_be_to_bin(uuid, u) uuid_parse(uuid, u)
static inline int uuid_le_cmp(const guid_t u1, const guid_t u2)
{
return memcmp(&u1, &u2, sizeof(guid_t));
}
-static inline int uuid_be_cmp(const uuid_t u1, const uuid_t u2)
-{
- return memcmp(&u1, &u2, sizeof(uuid_t));
-}
-
#endif
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index 586809abb273..a47b985341d1 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -152,7 +152,7 @@ extern int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr,
size_t *data_size);
struct pci_dev;
-#ifdef CONFIG_EEH
+#if IS_ENABLED(CONFIG_VFIO_SPAPR_EEH)
extern void vfio_spapr_pci_eeh_open(struct pci_dev *pdev);
extern void vfio_spapr_pci_eeh_release(struct pci_dev *pdev);
extern long vfio_spapr_iommu_eeh_ioctl(struct iommu_group *group,
@@ -173,7 +173,7 @@ static inline long vfio_spapr_iommu_eeh_ioctl(struct iommu_group *group,
{
return -ENOTTY;
}
-#endif /* CONFIG_EEH */
+#endif /* CONFIG_VFIO_SPAPR_EEH */
/*
* IRQfd - generic
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 5209b5ed2a64..32fb046f2173 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -18,9 +18,6 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
case VIRTIO_NET_HDR_GSO_TCPV6:
gso_type = SKB_GSO_TCPV6;
break;
- case VIRTIO_NET_HDR_GSO_UDP:
- gso_type = SKB_GSO_UDP;
- break;
default:
return -EINVAL;
}
@@ -73,8 +70,6 @@ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
else if (sinfo->gso_type & SKB_GSO_TCPV6)
hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
- else if (sinfo->gso_type & SKB_GSO_UDP)
- hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
else
return -EINVAL;
if (sinfo->gso_type & SKB_GSO_TCP_ECN)
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 37e8d31a4632..d77bc35278b0 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -85,6 +85,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
#endif
THP_ZERO_PAGE_ALLOC,
THP_ZERO_PAGE_ALLOC_FAILED,
+ THP_SWPOUT,
+ THP_SWPOUT_FALLBACK,
#endif
#ifdef CONFIG_MEMORY_BALLOON
BALLOON_INFLATE,
@@ -104,6 +106,10 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
VMACACHE_FIND_HITS,
VMACACHE_FULL_FLUSHES,
#endif
+#ifdef CONFIG_SWAP
+ SWAP_RA,
+ SWAP_RA_HIT,
+#endif
NR_VM_EVENT_ITEMS
};
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index b3d85f30d424..97e11ab573f0 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -123,7 +123,7 @@ static inline void node_page_state_add(long x, struct pglist_data *pgdat,
atomic_long_add(x, &vm_node_stat[item]);
}
-static inline unsigned long global_page_state(enum zone_stat_item item)
+static inline unsigned long global_zone_page_state(enum zone_stat_item item)
{
long x = atomic_long_read(&vm_zone_stat[item]);
#ifdef CONFIG_SMP
@@ -199,7 +199,7 @@ extern unsigned long sum_zone_node_page_state(int node,
extern unsigned long node_page_state(struct pglist_data *pgdat,
enum node_stat_item item);
#else
-#define sum_zone_node_page_state(node, item) global_page_state(item)
+#define sum_zone_node_page_state(node, item) global_zone_page_state(item)
#define node_page_state(node, item) global_node_page_state(item)
#endif /* CONFIG_NUMA */
diff --git a/include/linux/w1.h b/include/linux/w1.h
index 90cbe7e65059..5b2972946dda 100644
--- a/include/linux/w1.h
+++ b/include/linux/w1.h
@@ -68,6 +68,7 @@ struct w1_reg_num {
* @family: module for device family type
* @family_data: pointer for use by the family module
* @dev: kernel device identifier
+ * @hwmon: pointer to hwmon device
*
*/
struct w1_slave {
@@ -83,6 +84,7 @@ struct w1_slave {
struct w1_family *family;
void *family_data;
struct device dev;
+ struct device *hwmon;
};
typedef void (*w1_slave_found_callback)(struct w1_master *, u64);
@@ -250,11 +252,13 @@ void w1_remove_master_device(struct w1_bus_master *master);
* @add_slave: add_slave
* @remove_slave: remove_slave
* @groups: sysfs group
+ * @chip_info: pointer to struct hwmon_chip_info
*/
struct w1_family_ops {
int (*add_slave)(struct w1_slave *sl);
void (*remove_slave)(struct w1_slave *sl);
const struct attribute_group **groups;
+ const struct hwmon_chip_info *chip_info;
};
/**
diff --git a/include/linux/wait.h b/include/linux/wait.h
index b289c96151ee..dc19880c02f5 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -529,13 +529,13 @@ do { \
/**
* wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
- * @wq_head: the waitqueue to wait on
+ * @wq: the waitqueue to wait on
* @condition: a C expression for the event to wait for
* @timeout: timeout, as a ktime_t
*
* The process is put to sleep (TASK_INTERRUPTIBLE) until the
* @condition evaluates to true or a signal is received.
- * The @condition is checked each time the waitqueue @wq_head is woken up.
+ * The @condition is checked each time the waitqueue @wq is woken up.
*
* wake_up() has to be called after changing any variable that could
* change the result of the wait condition.
@@ -735,12 +735,12 @@ extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
/**
* wait_event_killable - sleep until a condition gets true
- * @wq: the waitqueue to wait on
+ * @wq_head: the waitqueue to wait on
* @condition: a C expression for the event to wait for
*
* The process is put to sleep (TASK_KILLABLE) until the
* @condition evaluates to true or a signal is received.
- * The @condition is checked each time the waitqueue @wq is woken up.
+ * The @condition is checked each time the waitqueue @wq_head is woken up.
*
* wake_up() has to be called after changing any variable that could
* change the result of the wait condition.
@@ -757,6 +757,43 @@ extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
__ret; \
})
+#define __wait_event_killable_timeout(wq_head, condition, timeout) \
+ ___wait_event(wq_head, ___wait_cond_timeout(condition), \
+ TASK_KILLABLE, 0, timeout, \
+ __ret = schedule_timeout(__ret))
+
+/**
+ * wait_event_killable_timeout - sleep until a condition gets true or a timeout elapses
+ * @wq_head: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * @timeout: timeout, in jiffies
+ *
+ * The process is put to sleep (TASK_KILLABLE) until the
+ * @condition evaluates to true or a kill signal is received.
+ * The @condition is checked each time the waitqueue @wq_head is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * Returns:
+ * 0 if the @condition evaluated to %false after the @timeout elapsed,
+ * 1 if the @condition evaluated to %true after the @timeout elapsed,
+ * the remaining jiffies (at least 1) if the @condition evaluated
+ * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
+ * interrupted by a kill signal.
+ *
+ * Only kill signals interrupt this process.
+ */
+#define wait_event_killable_timeout(wq_head, condition, timeout) \
+({ \
+ long __ret = timeout; \
+ might_sleep(); \
+ if (!___wait_cond_timeout(condition)) \
+ __ret = __wait_event_killable_timeout(wq_head, \
+ condition, timeout); \
+ __ret; \
+})
+
#define __wait_event_lock_irq(wq_head, condition, lock, cmd) \
(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index c102ef65cb64..1c49431f3121 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -324,6 +324,7 @@ enum {
__WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
__WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */
__WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */
+ __WQ_ORDERED_EXPLICIT = 1 << 19, /* internal: alloc_ordered_workqueue() */
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
@@ -422,7 +423,8 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
* Pointer to the allocated workqueue on success, %NULL on failure.
*/
#define alloc_ordered_workqueue(fmt, flags, args...) \
- alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
+ alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | \
+ __WQ_ORDERED_EXPLICIT | (flags), 1, ##args)
#define create_workqueue(name) \
alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
diff --git a/include/media/cec-notifier.h b/include/media/cec-notifier.h
index 298f996969df..a4f7429c4ae5 100644
--- a/include/media/cec-notifier.h
+++ b/include/media/cec-notifier.h
@@ -57,6 +57,7 @@ void cec_notifier_put(struct cec_notifier *n);
* @pa: the CEC physical address
*
* Set a new CEC physical address.
+ * Does nothing if @n == NULL.
*/
void cec_notifier_set_phys_addr(struct cec_notifier *n, u16 pa);
@@ -66,6 +67,7 @@ void cec_notifier_set_phys_addr(struct cec_notifier *n, u16 pa);
* @edid: the struct edid pointer
*
* Parses the EDID to obtain the new CEC physical address and set it.
+ * Does nothing if @n == NULL.
*/
void cec_notifier_set_phys_addr_from_edid(struct cec_notifier *n,
const struct edid *edid);
@@ -118,4 +120,17 @@ static inline void cec_notifier_unregister(struct cec_notifier *n)
#endif
+/**
+ * cec_notifier_phys_addr_invalidate() - set the physical address to INVALID
+ *
+ * @n: the CEC notifier
+ *
+ * This is a simple helper function to invalidate the physical
+ * address. Does nothing if @n == NULL.
+ */
+static inline void cec_notifier_phys_addr_invalidate(struct cec_notifier *n)
+{
+ cec_notifier_set_phys_addr(n, CEC_PHYS_ADDR_INVALID);
+}
+
#endif
diff --git a/include/media/davinci/dm644x_ccdc.h b/include/media/davinci/dm644x_ccdc.h
index 7c909da29d43..6ea2ce241851 100644
--- a/include/media/davinci/dm644x_ccdc.h
+++ b/include/media/davinci/dm644x_ccdc.h
@@ -103,16 +103,6 @@ struct ccdc_black_compensation {
char gb;
};
-/* structure for fault pixel correction */
-struct ccdc_fault_pixel {
- /* Enable or Disable fault pixel correction */
- unsigned char enable;
- /* Number of fault pixel */
- unsigned short fp_num;
- /* Address of fault pixel table */
- unsigned long fpc_table_addr;
-};
-
/* Structure for CCDC configuration parameters for raw capture mode passed
* by application
*/
@@ -125,8 +115,6 @@ struct ccdc_config_params_raw {
struct ccdc_black_clamp blk_clamp;
/* Structure for Black Compensation */
struct ccdc_black_compensation blk_comp;
- /* Structure for Fault Pixel Module Configuration */
- struct ccdc_fault_pixel fault_pxl;
};
diff --git a/include/media/davinci/vpfe_capture.h b/include/media/davinci/vpfe_capture.h
index 8e1a4d88daa0..f003533602d0 100644
--- a/include/media/davinci/vpfe_capture.h
+++ b/include/media/davinci/vpfe_capture.h
@@ -183,14 +183,4 @@ struct vpfe_config_params {
};
#endif /* End of __KERNEL__ */
-/**
- * VPFE_CMD_S_CCDC_RAW_PARAMS - EXPERIMENTAL IOCTL to set raw capture params
- * This can be used to configure modules such as defect pixel correction,
- * color space conversion, culling etc. This is an experimental ioctl that
- * will change in future kernels. So use this ioctl with care !
- * TODO: This is to be split into multiple ioctls and also explore the
- * possibility of extending the v4l2 api to include this
- **/
-#define VPFE_CMD_S_CCDC_RAW_PARAMS _IOW('V', BASE_VIDIOC_PRIVATE + 1, \
- void *)
#endif /* _DAVINCI_VPFE_H */
diff --git a/include/media/vsp1.h b/include/media/vsp1.h
index c837383b2013..68a8abe4fac5 100644
--- a/include/media/vsp1.h
+++ b/include/media/vsp1.h
@@ -34,11 +34,12 @@ struct vsp1_du_lif_config {
unsigned int width;
unsigned int height;
- void (*callback)(void *);
+ void (*callback)(void *, bool);
void *callback_data;
};
-int vsp1_du_setup_lif(struct device *dev, const struct vsp1_du_lif_config *cfg);
+int vsp1_du_setup_lif(struct device *dev, unsigned int pipe_index,
+ const struct vsp1_du_lif_config *cfg);
struct vsp1_du_atomic_config {
u32 pixelformat;
@@ -50,10 +51,11 @@ struct vsp1_du_atomic_config {
unsigned int zpos;
};
-void vsp1_du_atomic_begin(struct device *dev);
-int vsp1_du_atomic_update(struct device *dev, unsigned int rpf,
+void vsp1_du_atomic_begin(struct device *dev, unsigned int pipe_index);
+int vsp1_du_atomic_update(struct device *dev, unsigned int pipe_index,
+ unsigned int rpf,
const struct vsp1_du_atomic_config *cfg);
-void vsp1_du_atomic_flush(struct device *dev);
+void vsp1_du_atomic_flush(struct device *dev, unsigned int pipe_index);
int vsp1_du_map_sg(struct device *dev, struct sg_table *sgt);
void vsp1_du_unmap_sg(struct device *dev, struct sg_table *sgt);
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 26ffd8333f50..8f3d5d8b5ae0 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -10,12 +10,9 @@
#include <net/net_namespace.h>
#include <net/netns/generic.h>
-
-struct tcf_hashinfo {
- struct hlist_head *htab;
- unsigned int hmask;
- spinlock_t lock;
- u32 index;
+struct tcf_idrinfo {
+ spinlock_t lock;
+ struct idr action_idr;
};
struct tc_action_ops;
@@ -25,9 +22,8 @@ struct tc_action {
__u32 type; /* for backward compat(TCA_OLD_COMPAT) */
__u32 order;
struct list_head list;
- struct tcf_hashinfo *hinfo;
+ struct tcf_idrinfo *idrinfo;
- struct hlist_node tcfa_head;
u32 tcfa_index;
int tcfa_refcnt;
int tcfa_bindcnt;
@@ -44,7 +40,6 @@ struct tc_action {
struct tc_cookie *act_cookie;
struct tcf_chain *goto_chain;
};
-#define tcf_head common.tcfa_head
#define tcf_index common.tcfa_index
#define tcf_refcnt common.tcfa_refcnt
#define tcf_bindcnt common.tcfa_bindcnt
@@ -57,27 +52,6 @@ struct tc_action {
#define tcf_lock common.tcfa_lock
#define tcf_rcu common.tcfa_rcu
-static inline unsigned int tcf_hash(u32 index, unsigned int hmask)
-{
- return index & hmask;
-}
-
-static inline int tcf_hashinfo_init(struct tcf_hashinfo *hf, unsigned int mask)
-{
- int i;
-
- spin_lock_init(&hf->lock);
- hf->index = 0;
- hf->hmask = mask;
- hf->htab = kzalloc((mask + 1) * sizeof(struct hlist_head),
- GFP_KERNEL);
- if (!hf->htab)
- return -ENOMEM;
- for (i = 0; i < mask + 1; i++)
- INIT_HLIST_HEAD(&hf->htab[i]);
- return 0;
-}
-
/* Update lastuse only if needed, to avoid dirtying a cache line.
* We use a temp variable to avoid fetching jiffies twice.
*/
@@ -126,53 +100,51 @@ struct tc_action_ops {
};
struct tc_action_net {
- struct tcf_hashinfo *hinfo;
+ struct tcf_idrinfo *idrinfo;
const struct tc_action_ops *ops;
};
static inline
int tc_action_net_init(struct tc_action_net *tn,
- const struct tc_action_ops *ops, unsigned int mask)
+ const struct tc_action_ops *ops)
{
int err = 0;
- tn->hinfo = kmalloc(sizeof(*tn->hinfo), GFP_KERNEL);
- if (!tn->hinfo)
+ tn->idrinfo = kmalloc(sizeof(*tn->idrinfo), GFP_KERNEL);
+ if (!tn->idrinfo)
return -ENOMEM;
tn->ops = ops;
- err = tcf_hashinfo_init(tn->hinfo, mask);
- if (err)
- kfree(tn->hinfo);
+ spin_lock_init(&tn->idrinfo->lock);
+ idr_init(&tn->idrinfo->action_idr);
return err;
}
-void tcf_hashinfo_destroy(const struct tc_action_ops *ops,
- struct tcf_hashinfo *hinfo);
+void tcf_idrinfo_destroy(const struct tc_action_ops *ops,
+ struct tcf_idrinfo *idrinfo);
static inline void tc_action_net_exit(struct tc_action_net *tn)
{
- tcf_hashinfo_destroy(tn->ops, tn->hinfo);
- kfree(tn->hinfo);
+ tcf_idrinfo_destroy(tn->ops, tn->idrinfo);
+ kfree(tn->idrinfo);
}
int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
struct netlink_callback *cb, int type,
const struct tc_action_ops *ops);
-int tcf_hash_search(struct tc_action_net *tn, struct tc_action **a, u32 index);
-u32 tcf_hash_new_index(struct tc_action_net *tn);
-bool tcf_hash_check(struct tc_action_net *tn, u32 index, struct tc_action **a,
+int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index);
+bool tcf_idr_check(struct tc_action_net *tn, u32 index, struct tc_action **a,
int bind);
-int tcf_hash_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
- struct tc_action **a, const struct tc_action_ops *ops, int bind,
- bool cpustats);
-void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est);
-void tcf_hash_insert(struct tc_action_net *tn, struct tc_action *a);
+int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
+ struct tc_action **a, const struct tc_action_ops *ops,
+ int bind, bool cpustats);
+void tcf_idr_cleanup(struct tc_action *a, struct nlattr *est);
+void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a);
-int __tcf_hash_release(struct tc_action *a, bool bind, bool strict);
+int __tcf_idr_release(struct tc_action *a, bool bind, bool strict);
-static inline int tcf_hash_release(struct tc_action *a, bool bind)
+static inline int tcf_idr_release(struct tc_action *a, bool bind)
{
- return __tcf_hash_release(a, bind, false);
+ return __tcf_idr_release(a, bind, false);
}
int tcf_register_action(struct tc_action_ops *a, struct pernet_operations *ops);
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 6df79e96a780..f44ff2476758 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -336,6 +336,16 @@ static inline void in6_dev_put(struct inet6_dev *idev)
in6_dev_finish_destroy(idev);
}
+static inline void in6_dev_put_clear(struct inet6_dev **pidev)
+{
+ struct inet6_dev *idev = *pidev;
+
+ if (idev) {
+ in6_dev_put(idev);
+ *pidev = NULL;
+ }
+}
+
static inline void __in6_dev_put(struct inet6_dev *idev)
{
refcount_dec(&idev->refcnt);
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index c172709787af..3ac79150291f 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -19,8 +19,22 @@ struct sock;
struct socket;
struct rxrpc_call;
+/*
+ * Call completion condition (state == RXRPC_CALL_COMPLETE).
+ */
+enum rxrpc_call_completion {
+ RXRPC_CALL_SUCCEEDED, /* - Normal termination */
+ RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */
+ RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */
+ RXRPC_CALL_LOCAL_ERROR, /* - call failed due to local error */
+ RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */
+ NR__RXRPC_CALL_COMPLETIONS
+};
+
typedef void (*rxrpc_notify_rx_t)(struct sock *, struct rxrpc_call *,
unsigned long);
+typedef void (*rxrpc_notify_end_tx_t)(struct sock *, struct rxrpc_call *,
+ unsigned long);
typedef void (*rxrpc_notify_new_call_t)(struct sock *, struct rxrpc_call *,
unsigned long);
typedef void (*rxrpc_discard_new_call_t)(struct rxrpc_call *, unsigned long);
@@ -37,7 +51,8 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *,
gfp_t,
rxrpc_notify_rx_t);
int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *,
- struct msghdr *, size_t);
+ struct msghdr *, size_t,
+ rxrpc_notify_end_tx_t);
int rxrpc_kernel_recv_data(struct socket *, struct rxrpc_call *,
void *, size_t, size_t *, bool, u32 *);
bool rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *,
@@ -48,5 +63,9 @@ void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *,
int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
rxrpc_user_attach_call_t, unsigned long, gfp_t);
void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64);
+int rxrpc_kernel_retry_call(struct socket *, struct rxrpc_call *,
+ struct sockaddr_rxrpc *, struct key *);
+int rxrpc_kernel_check_call(struct socket *, struct rxrpc_call *,
+ enum rxrpc_call_completion *, u32 *);
#endif /* _NET_RXRPC_H */
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 678e4d6fa317..afb37f835449 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -37,7 +37,7 @@ struct unix_skb_parms {
u32 secid; /* Security ID */
#endif
u32 consumed;
-};
+} __randomize_layout;
#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
@@ -58,7 +58,6 @@ struct unix_sock {
struct list_head link;
atomic_long_t inflight;
spinlock_t lock;
- unsigned char recursion_level;
unsigned long gc_flags;
#define UNIX_GC_CANDIDATE 0
#define UNIX_GC_MAYBE_CYCLE 1
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 01487192f628..020142bb9735 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -233,7 +233,7 @@ static inline void bacpy(bdaddr_t *dst, const bdaddr_t *src)
memcpy(dst, src, sizeof(bdaddr_t));
}
-void baswap(bdaddr_t *dst, bdaddr_t *src);
+void baswap(bdaddr_t *dst, const bdaddr_t *src);
/* Common socket structures and functions */
diff --git a/include/net/bonding.h b/include/net/bonding.h
index b00508d22e0a..b2e68657a216 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -277,6 +277,11 @@ static inline bool bond_is_lb(const struct bonding *bond)
BOND_MODE(bond) == BOND_MODE_ALB;
}
+static inline bool bond_needs_speed_duplex(const struct bonding *bond)
+{
+ return BOND_MODE(bond) == BOND_MODE_8023AD || bond_is_lb(bond);
+}
+
static inline bool bond_is_nondyn_tlb(const struct bonding *bond)
{
return (BOND_MODE(bond) == BOND_MODE_TLB) &&
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index 8ffd434676b7..71c72a939bf8 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -29,18 +29,18 @@
#include <linux/sched/signal.h>
#include <net/ip.h>
-#ifdef CONFIG_NET_RX_BUSY_POLL
-
-struct napi_struct;
-extern unsigned int sysctl_net_busy_read __read_mostly;
-extern unsigned int sysctl_net_busy_poll __read_mostly;
-
/* 0 - Reserved to indicate value not set
* 1..NR_CPUS - Reserved for sender_cpu
* NR_CPUS+1..~0 - Region available for NAPI IDs
*/
#define MIN_NAPI_ID ((unsigned int)(NR_CPUS + 1))
+#ifdef CONFIG_NET_RX_BUSY_POLL
+
+struct napi_struct;
+extern unsigned int sysctl_net_busy_read __read_mostly;
+extern unsigned int sysctl_net_busy_poll __read_mostly;
+
static inline bool net_busy_loop_on(void)
{
return sysctl_net_busy_poll;
diff --git a/include/net/devlink.h b/include/net/devlink.h
index ed7687bbf5d0..b9654e133599 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -178,7 +178,6 @@ struct devlink_dpipe_table_ops;
* struct devlink_dpipe_table - table object
* @priv: private
* @name: table name
- * @size: maximum number of entries
* @counters_enabled: indicates if counters are active
* @counter_control_extern: indicates if counter control is in dpipe or
* external tool
@@ -189,7 +188,6 @@ struct devlink_dpipe_table {
void *priv;
struct list_head list;
const char *name;
- u64 size;
bool counters_enabled;
bool counter_control_extern;
struct devlink_dpipe_table_ops *table_ops;
@@ -204,6 +202,7 @@ struct devlink_dpipe_table {
* @counters_set_update - when changing the counter status hardware sync
* maybe needed to allocate/free counter related
* resources
+ * @size_get - get size
*/
struct devlink_dpipe_table_ops {
int (*actions_dump)(void *priv, struct sk_buff *skb);
@@ -211,6 +210,7 @@ struct devlink_dpipe_table_ops {
int (*entries_dump)(void *priv, bool counters_enabled,
struct devlink_dpipe_dump_ctx *dump_ctx);
int (*counters_set_update)(void *priv, bool enable);
+ u64 (*size_get)(void *priv);
};
/**
@@ -311,8 +311,7 @@ void devlink_sb_unregister(struct devlink *devlink, unsigned int sb_index);
int devlink_dpipe_table_register(struct devlink *devlink,
const char *table_name,
struct devlink_dpipe_table_ops *table_ops,
- void *priv, u64 size,
- bool counter_control_extern);
+ void *priv, bool counter_control_extern);
void devlink_dpipe_table_unregister(struct devlink *devlink,
const char *table_name);
int devlink_dpipe_headers_register(struct devlink *devlink,
@@ -324,10 +323,14 @@ int devlink_dpipe_entry_ctx_prepare(struct devlink_dpipe_dump_ctx *dump_ctx);
int devlink_dpipe_entry_ctx_append(struct devlink_dpipe_dump_ctx *dump_ctx,
struct devlink_dpipe_entry *entry);
int devlink_dpipe_entry_ctx_close(struct devlink_dpipe_dump_ctx *dump_ctx);
+void devlink_dpipe_entry_clear(struct devlink_dpipe_entry *entry);
int devlink_dpipe_action_put(struct sk_buff *skb,
struct devlink_dpipe_action *action);
int devlink_dpipe_match_put(struct sk_buff *skb,
struct devlink_dpipe_match *match);
+extern struct devlink_dpipe_header devlink_dpipe_header_ethernet;
+extern struct devlink_dpipe_header devlink_dpipe_header_ipv4;
+extern struct devlink_dpipe_header devlink_dpipe_header_ipv6;
#else
@@ -400,8 +403,7 @@ static inline int
devlink_dpipe_table_register(struct devlink *devlink,
const char *table_name,
struct devlink_dpipe_table_ops *table_ops,
- void *priv, u64 size,
- bool counter_control_extern)
+ void *priv, bool counter_control_extern)
{
return 0;
}
@@ -447,6 +449,11 @@ devlink_dpipe_entry_ctx_close(struct devlink_dpipe_dump_ctx *dump_ctx)
return 0;
}
+static inline void
+devlink_dpipe_entry_clear(struct devlink_dpipe_entry *entry)
+{
+}
+
static inline int
devlink_dpipe_action_put(struct sk_buff *skb,
struct devlink_dpipe_action *action)
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 58969b9a090c..dd44d6ce1097 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -101,6 +101,14 @@ struct dsa_platform_data {
struct packet_type;
+struct dsa_device_ops {
+ struct sk_buff *(*xmit)(struct sk_buff *skb, struct net_device *dev);
+ struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt);
+ int (*flow_dissect)(const struct sk_buff *skb, __be16 *proto,
+ int *offset);
+};
+
struct dsa_switch_tree {
struct list_head list;
@@ -125,8 +133,7 @@ struct dsa_switch_tree {
/* Copy of tag_ops->rcv for faster access in hot path */
struct sk_buff * (*rcv)(struct sk_buff *skb,
struct net_device *dev,
- struct packet_type *pt,
- struct net_device *orig_dev);
+ struct packet_type *pt);
/*
* The switch port to which the CPU is attached.
@@ -236,6 +243,9 @@ struct dsa_switch {
/* devlink used to represent this switch device */
struct devlink *devlink;
+ /* Number of switch port queues */
+ unsigned int num_tx_queues;
+
/* Dynamically allocated ports, keep last */
size_t num_ports;
struct dsa_port ports[];
@@ -256,11 +266,6 @@ static inline bool dsa_is_normal_port(struct dsa_switch *ds, int p)
return !dsa_is_cpu_port(ds, p) && !dsa_is_dsa_port(ds, p);
}
-static inline bool dsa_is_port_initialized(struct dsa_switch *ds, int p)
-{
- return ds->enabled_port_mask & (1 << p) && ds->ports[p].netdev;
-}
-
static inline u8 dsa_upstream_port(struct dsa_switch *ds)
{
struct dsa_switch_tree *dst = ds->dst;
@@ -277,6 +282,8 @@ static inline u8 dsa_upstream_port(struct dsa_switch *ds)
return ds->rtable[dst->cpu_dp->ds->index];
}
+typedef int dsa_fdb_dump_cb_t(const unsigned char *addr, u16 vid,
+ bool is_static, void *data);
struct dsa_switch_ops {
/*
* Legacy probing.
@@ -337,13 +344,12 @@ struct dsa_switch_ops {
struct phy_device *phy);
/*
- * EEE setttings
+ * Port's MAC EEE settings
*/
- int (*set_eee)(struct dsa_switch *ds, int port,
- struct phy_device *phydev,
- struct ethtool_eee *e);
- int (*get_eee)(struct dsa_switch *ds, int port,
- struct ethtool_eee *e);
+ int (*set_mac_eee)(struct dsa_switch *ds, int port,
+ struct ethtool_eee *e);
+ int (*get_mac_eee)(struct dsa_switch *ds, int port,
+ struct ethtool_eee *e);
/* EEPROM access */
int (*get_eeprom_len)(struct dsa_switch *ds);
@@ -384,24 +390,15 @@ struct dsa_switch_ops {
struct switchdev_trans *trans);
int (*port_vlan_del)(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan);
- int (*port_vlan_dump)(struct dsa_switch *ds, int port,
- struct switchdev_obj_port_vlan *vlan,
- switchdev_obj_dump_cb_t *cb);
-
/*
* Forwarding database
*/
- int (*port_fdb_prepare)(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_fdb *fdb,
- struct switchdev_trans *trans);
- void (*port_fdb_add)(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_fdb *fdb,
- struct switchdev_trans *trans);
+ int (*port_fdb_add)(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
int (*port_fdb_del)(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_fdb *fdb);
+ const unsigned char *addr, u16 vid);
int (*port_fdb_dump)(struct dsa_switch *ds, int port,
- struct switchdev_obj_port_fdb *fdb,
- switchdev_obj_dump_cb_t *cb);
+ dsa_fdb_dump_cb_t *cb, void *data);
/*
* Multicast database
@@ -414,10 +411,6 @@ struct dsa_switch_ops {
struct switchdev_trans *trans);
int (*port_mdb_del)(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb);
- int (*port_mdb_dump)(struct dsa_switch *ds, int port,
- struct switchdev_obj_port_mdb *mdb,
- switchdev_obj_dump_cb_t *cb);
-
/*
* RXNFC
*/
diff --git a/include/net/dst.h b/include/net/dst.h
index f73611ec4017..93568bd0a352 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -14,6 +14,7 @@
#include <linux/rcupdate.h>
#include <linux/bug.h>
#include <linux/jiffies.h>
+#include <linux/refcount.h>
#include <net/neighbour.h>
#include <asm/processor.h>
@@ -107,7 +108,7 @@ struct dst_entry {
struct dst_metrics {
u32 metrics[RTAX_MAX];
- atomic_t refcnt;
+ refcount_t refcnt;
};
extern const struct dst_metrics dst_default_metrics;
diff --git a/include/net/erspan.h b/include/net/erspan.h
new file mode 100644
index 000000000000..ca94fc86865e
--- /dev/null
+++ b/include/net/erspan.h
@@ -0,0 +1,61 @@
+#ifndef __LINUX_ERSPAN_H
+#define __LINUX_ERSPAN_H
+
+/*
+ * GRE header for ERSPAN encapsulation (8 octets [34:41]) -- 8 bytes
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |0|0|0|1|0|00000|000000000|00000| Protocol Type for ERSPAN |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Sequence Number (increments per packet per session) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Note that in the above GRE header [RFC1701] out of the C, R, K, S,
+ * s, Recur, Flags, Version fields only S (bit 03) is set to 1. The
+ * other fields are set to zero, so only a sequence number follows.
+ *
+ * ERSPAN Type II header (8 octets [42:49])
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Ver | VLAN | COS | En|T| Session ID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Reserved | Index |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * GRE proto ERSPAN type II = 0x88BE, type III = 0x22EB
+ */
+
+#define ERSPAN_VERSION 0x1
+
+#define VER_MASK 0xf000
+#define VLAN_MASK 0x0fff
+#define COS_MASK 0xe000
+#define EN_MASK 0x1800
+#define T_MASK 0x0400
+#define ID_MASK 0x03ff
+#define INDEX_MASK 0xfffff
+
+enum erspan_encap_type {
+ ERSPAN_ENCAP_NOVLAN = 0x0, /* originally without VLAN tag */
+ ERSPAN_ENCAP_ISL = 0x1, /* originally ISL encapsulated */
+ ERSPAN_ENCAP_8021Q = 0x2, /* originally 802.1Q encapsulated */
+ ERSPAN_ENCAP_INFRAME = 0x3, /* VLAN tag perserved in frame */
+};
+
+struct erspan_metadata {
+ __be32 index; /* type II */
+};
+
+struct erspanhdr {
+ __be16 ver_vlan;
+#define VER_OFFSET 12
+ __be16 session_id;
+#define COS_OFFSET 13
+#define EN_OFFSET 11
+#define T_OFFSET 10
+ struct erspan_metadata md;
+};
+
+#endif
diff --git a/include/net/fib_notifier.h b/include/net/fib_notifier.h
new file mode 100644
index 000000000000..669b9716dc7a
--- /dev/null
+++ b/include/net/fib_notifier.h
@@ -0,0 +1,46 @@
+#ifndef __NET_FIB_NOTIFIER_H
+#define __NET_FIB_NOTIFIER_H
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <net/net_namespace.h>
+
+struct fib_notifier_info {
+ struct net *net;
+ int family;
+};
+
+enum fib_event_type {
+ FIB_EVENT_ENTRY_REPLACE,
+ FIB_EVENT_ENTRY_APPEND,
+ FIB_EVENT_ENTRY_ADD,
+ FIB_EVENT_ENTRY_DEL,
+ FIB_EVENT_RULE_ADD,
+ FIB_EVENT_RULE_DEL,
+ FIB_EVENT_NH_ADD,
+ FIB_EVENT_NH_DEL,
+};
+
+struct fib_notifier_ops {
+ int family;
+ struct list_head list;
+ unsigned int (*fib_seq_read)(struct net *net);
+ int (*fib_dump)(struct net *net, struct notifier_block *nb);
+ struct module *owner;
+ struct rcu_head rcu;
+};
+
+int call_fib_notifier(struct notifier_block *nb, struct net *net,
+ enum fib_event_type event_type,
+ struct fib_notifier_info *info);
+int call_fib_notifiers(struct net *net, enum fib_event_type event_type,
+ struct fib_notifier_info *info);
+int register_fib_notifier(struct notifier_block *nb,
+ void (*cb)(struct notifier_block *nb));
+int unregister_fib_notifier(struct notifier_block *nb);
+struct fib_notifier_ops *
+fib_notifier_ops_register(const struct fib_notifier_ops *tmpl, struct net *net);
+void fib_notifier_ops_unregister(struct fib_notifier_ops *ops);
+
+#endif
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index c487bfa2f479..3d7f1cefc6f5 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -8,6 +8,7 @@
#include <linux/refcount.h>
#include <net/flow.h>
#include <net/rtnetlink.h>
+#include <net/fib_notifier.h>
struct fib_kuid_range {
kuid_t start;
@@ -57,6 +58,7 @@ struct fib_rules_ops {
int addr_size;
int unresolved_rules;
int nr_goto_rules;
+ unsigned int fib_rules_seq;
int (*action)(struct fib_rule *,
struct flowi *, int,
@@ -89,6 +91,11 @@ struct fib_rules_ops {
struct rcu_head rcu;
};
+struct fib_rule_notifier_info {
+ struct fib_notifier_info info; /* must be first */
+ struct fib_rule *rule;
+};
+
#define FRA_GENERIC_POLICY \
[FRA_IIFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, \
[FRA_OIFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, \
@@ -143,6 +150,8 @@ int fib_rules_lookup(struct fib_rules_ops *, struct flowi *, int flags,
int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table,
u32 flags);
bool fib_rule_matchall(const struct fib_rule *rule);
+int fib_rules_dump(struct net *net, struct notifier_block *nb, int family);
+unsigned int fib_rules_seq_read(struct net *net, int family);
int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack);
diff --git a/include/net/flow.h b/include/net/flow.h
index bae198b3039e..eb60cee30b44 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -149,6 +149,7 @@ struct flowi6 {
#define fl6_ipsec_spi uli.spi
#define fl6_mh_type uli.mht.type
#define fl6_gre_key uli.gre_key
+ __u32 mp_hash;
} __attribute__((__aligned__(BITS_PER_LONG/8)));
struct flowidn {
@@ -218,40 +219,6 @@ static inline unsigned int flow_key_size(u16 family)
return 0;
}
-#define FLOW_DIR_IN 0
-#define FLOW_DIR_OUT 1
-#define FLOW_DIR_FWD 2
-
-struct net;
-struct sock;
-struct flow_cache_ops;
-
-struct flow_cache_object {
- const struct flow_cache_ops *ops;
-};
-
-struct flow_cache_ops {
- struct flow_cache_object *(*get)(struct flow_cache_object *);
- int (*check)(struct flow_cache_object *);
- void (*delete)(struct flow_cache_object *);
-};
-
-typedef struct flow_cache_object *(*flow_resolve_t)(
- struct net *net, const struct flowi *key, u16 family,
- u8 dir, struct flow_cache_object *oldobj, void *ctx);
-
-struct flow_cache_object *flow_cache_lookup(struct net *net,
- const struct flowi *key, u16 family,
- u8 dir, flow_resolve_t resolver,
- void *ctx);
-int flow_cache_init(struct net *net);
-void flow_cache_fini(struct net *net);
-void flow_cache_hp_init(void);
-
-void flow_cache_flush(struct net *net);
-void flow_cache_flush_deferred(struct net *net);
-extern atomic_t flow_cache_genid;
-
__u32 __get_hash_from_flowi6(const struct flowi6 *fl6, struct flow_keys *keys);
static inline __u32 get_hash_from_flowi6(const struct flowi6 *fl6)
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index e2663e900b0a..fc3dce730a6b 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -19,6 +19,14 @@ struct flow_dissector_key_control {
#define FLOW_DIS_FIRST_FRAG BIT(1)
#define FLOW_DIS_ENCAPSULATION BIT(2)
+enum flow_dissect_ret {
+ FLOW_DISSECT_RET_OUT_GOOD,
+ FLOW_DISSECT_RET_OUT_BAD,
+ FLOW_DISSECT_RET_PROTO_AGAIN,
+ FLOW_DISSECT_RET_IPPROTO_AGAIN,
+ FLOW_DISSECT_RET_CONTINUE,
+};
+
/**
* struct flow_dissector_key_basic:
* @thoff: Transport header offset
diff --git a/include/net/flowcache.h b/include/net/flowcache.h
deleted file mode 100644
index 51eb971e8973..000000000000
--- a/include/net/flowcache.h
+++ /dev/null
@@ -1,25 +0,0 @@
-#ifndef _NET_FLOWCACHE_H
-#define _NET_FLOWCACHE_H
-
-#include <linux/interrupt.h>
-#include <linux/types.h>
-#include <linux/timer.h>
-#include <linux/notifier.h>
-
-struct flow_cache_percpu {
- struct hlist_head *hash_table;
- unsigned int hash_count;
- u32 hash_rnd;
- int hash_rnd_recalc;
- struct tasklet_struct flush_tasklet;
-};
-
-struct flow_cache {
- u32 hash_shift;
- struct flow_cache_percpu __percpu *percpu;
- struct hlist_node node;
- unsigned int low_watermark;
- unsigned int high_watermark;
- struct timer_list rnd_timer;
-};
-#endif /* _NET_FLOWCACHE_H */
diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
index b87becacd9d3..6e91e38a31da 100644
--- a/include/net/inet6_hashtables.h
+++ b/include/net/inet6_hashtables.h
@@ -49,7 +49,8 @@ struct sock *__inet6_lookup_established(struct net *net,
const struct in6_addr *saddr,
const __be16 sport,
const struct in6_addr *daddr,
- const u16 hnum, const int dif);
+ const u16 hnum, const int dif,
+ const int sdif);
struct sock *inet6_lookup_listener(struct net *net,
struct inet_hashinfo *hashinfo,
@@ -57,7 +58,8 @@ struct sock *inet6_lookup_listener(struct net *net,
const struct in6_addr *saddr,
const __be16 sport,
const struct in6_addr *daddr,
- const unsigned short hnum, const int dif);
+ const unsigned short hnum,
+ const int dif, const int sdif);
static inline struct sock *__inet6_lookup(struct net *net,
struct inet_hashinfo *hashinfo,
@@ -66,24 +68,25 @@ static inline struct sock *__inet6_lookup(struct net *net,
const __be16 sport,
const struct in6_addr *daddr,
const u16 hnum,
- const int dif,
+ const int dif, const int sdif,
bool *refcounted)
{
struct sock *sk = __inet6_lookup_established(net, hashinfo, saddr,
- sport, daddr, hnum, dif);
+ sport, daddr, hnum,
+ dif, sdif);
*refcounted = true;
if (sk)
return sk;
*refcounted = false;
return inet6_lookup_listener(net, hashinfo, skb, doff, saddr, sport,
- daddr, hnum, dif);
+ daddr, hnum, dif, sdif);
}
static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
const __be16 sport,
const __be16 dport,
- int iif,
+ int iif, int sdif,
bool *refcounted)
{
struct sock *sk = skb_steal_sock(skb);
@@ -95,7 +98,7 @@ static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo,
return __inet6_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb,
doff, &ipv6_hdr(skb)->saddr, sport,
&ipv6_hdr(skb)->daddr, ntohs(dport),
- iif, refcounted);
+ iif, sdif, refcounted);
}
struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo,
@@ -107,13 +110,14 @@ struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo,
int inet6_hash(struct sock *sk);
#endif /* IS_ENABLED(CONFIG_IPV6) */
-#define INET6_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif) \
+#define INET6_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif, __sdif) \
(((__sk)->sk_portpair == (__ports)) && \
((__sk)->sk_family == AF_INET6) && \
ipv6_addr_equal(&(__sk)->sk_v6_daddr, (__saddr)) && \
ipv6_addr_equal(&(__sk)->sk_v6_rcv_saddr, (__daddr)) && \
(!(__sk)->sk_bound_dev_if || \
- ((__sk)->sk_bound_dev_if == (__dif))) && \
+ ((__sk)->sk_bound_dev_if == (__dif)) || \
+ ((__sk)->sk_bound_dev_if == (__sdif))) && \
net_eq(sock_net(__sk), (__net)))
#endif /* _INET6_HASHTABLES_H */
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 6fdcd2427776..fc59e0775e00 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -1,14 +1,9 @@
#ifndef __NET_FRAG_H__
#define __NET_FRAG_H__
-#include <linux/percpu_counter.h>
-
struct netns_frags {
- /* The percpu_counter "mem" need to be cacheline aligned.
- * mem.count must not share cacheline with other writers
- */
- struct percpu_counter mem ____cacheline_aligned_in_smp;
-
+ /* Keep atomic mem on separate cachelines in structs that include it */
+ atomic_t mem ____cacheline_aligned_in_smp;
/* sysctls */
int timeout;
int high_thresh;
@@ -108,15 +103,10 @@ struct inet_frags {
int inet_frags_init(struct inet_frags *);
void inet_frags_fini(struct inet_frags *);
-static inline int inet_frags_init_net(struct netns_frags *nf)
-{
- return percpu_counter_init(&nf->mem, 0, GFP_KERNEL);
-}
-static inline void inet_frags_uninit_net(struct netns_frags *nf)
+static inline void inet_frags_init_net(struct netns_frags *nf)
{
- percpu_counter_destroy(&nf->mem);
+ atomic_set(&nf->mem, 0);
}
-
void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
@@ -140,31 +130,24 @@ static inline bool inet_frag_evicting(struct inet_frag_queue *q)
/* Memory Tracking Functions. */
-/* The default percpu_counter batch size is not big enough to scale to
- * fragmentation mem acct sizes.
- * The mem size of a 64K fragment is approx:
- * (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes
- */
-static unsigned int frag_percpu_counter_batch = 130000;
-
static inline int frag_mem_limit(struct netns_frags *nf)
{
- return percpu_counter_read(&nf->mem);
+ return atomic_read(&nf->mem);
}
static inline void sub_frag_mem_limit(struct netns_frags *nf, int i)
{
- percpu_counter_add_batch(&nf->mem, -i, frag_percpu_counter_batch);
+ atomic_sub(i, &nf->mem);
}
static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
{
- percpu_counter_add_batch(&nf->mem, i, frag_percpu_counter_batch);
+ atomic_add(i, &nf->mem);
}
-static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf)
+static inline int sum_frag_mem_limit(struct netns_frags *nf)
{
- return percpu_counter_sum_positive(&nf->mem);
+ return atomic_read(&nf->mem);
}
/* RFC 3168 support :
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 5026b1f08bb8..2dbbbff5e1e3 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -221,16 +221,16 @@ struct sock *__inet_lookup_listener(struct net *net,
const __be32 saddr, const __be16 sport,
const __be32 daddr,
const unsigned short hnum,
- const int dif);
+ const int dif, const int sdif);
static inline struct sock *inet_lookup_listener(struct net *net,
struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
__be32 saddr, __be16 sport,
- __be32 daddr, __be16 dport, int dif)
+ __be32 daddr, __be16 dport, int dif, int sdif)
{
return __inet_lookup_listener(net, hashinfo, skb, doff, saddr, sport,
- daddr, ntohs(dport), dif);
+ daddr, ntohs(dport), dif, sdif);
}
/* Socket demux engine toys. */
@@ -262,22 +262,24 @@ static inline struct sock *inet_lookup_listener(struct net *net,
(((__force __u64)(__be32)(__daddr)) << 32) | \
((__force __u64)(__be32)(__saddr)))
#endif /* __BIG_ENDIAN */
-#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif) \
+#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif, __sdif) \
(((__sk)->sk_portpair == (__ports)) && \
((__sk)->sk_addrpair == (__cookie)) && \
(!(__sk)->sk_bound_dev_if || \
- ((__sk)->sk_bound_dev_if == (__dif))) && \
+ ((__sk)->sk_bound_dev_if == (__dif)) || \
+ ((__sk)->sk_bound_dev_if == (__sdif))) && \
net_eq(sock_net(__sk), (__net)))
#else /* 32-bit arch */
#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
const int __name __deprecated __attribute__((unused))
-#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif) \
+#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif, __sdif) \
(((__sk)->sk_portpair == (__ports)) && \
((__sk)->sk_daddr == (__saddr)) && \
((__sk)->sk_rcv_saddr == (__daddr)) && \
(!(__sk)->sk_bound_dev_if || \
- ((__sk)->sk_bound_dev_if == (__dif))) && \
+ ((__sk)->sk_bound_dev_if == (__dif)) || \
+ ((__sk)->sk_bound_dev_if == (__sdif))) && \
net_eq(sock_net(__sk), (__net)))
#endif /* 64-bit arch */
@@ -288,7 +290,7 @@ struct sock *__inet_lookup_established(struct net *net,
struct inet_hashinfo *hashinfo,
const __be32 saddr, const __be16 sport,
const __be32 daddr, const u16 hnum,
- const int dif);
+ const int dif, const int sdif);
static inline struct sock *
inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo,
@@ -297,7 +299,7 @@ static inline struct sock *
const int dif)
{
return __inet_lookup_established(net, hashinfo, saddr, sport, daddr,
- ntohs(dport), dif);
+ ntohs(dport), dif, 0);
}
static inline struct sock *__inet_lookup(struct net *net,
@@ -305,20 +307,20 @@ static inline struct sock *__inet_lookup(struct net *net,
struct sk_buff *skb, int doff,
const __be32 saddr, const __be16 sport,
const __be32 daddr, const __be16 dport,
- const int dif,
+ const int dif, const int sdif,
bool *refcounted)
{
u16 hnum = ntohs(dport);
struct sock *sk;
sk = __inet_lookup_established(net, hashinfo, saddr, sport,
- daddr, hnum, dif);
+ daddr, hnum, dif, sdif);
*refcounted = true;
if (sk)
return sk;
*refcounted = false;
return __inet_lookup_listener(net, hashinfo, skb, doff, saddr,
- sport, daddr, hnum, dif);
+ sport, daddr, hnum, dif, sdif);
}
static inline struct sock *inet_lookup(struct net *net,
@@ -332,7 +334,7 @@ static inline struct sock *inet_lookup(struct net *net,
bool refcounted;
sk = __inet_lookup(net, hashinfo, skb, doff, saddr, sport, daddr,
- dport, dif, &refcounted);
+ dport, dif, 0, &refcounted);
if (sk && !refcounted && !refcount_inc_not_zero(&sk->sk_refcnt))
sk = NULL;
@@ -344,6 +346,7 @@ static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
int doff,
const __be16 sport,
const __be16 dport,
+ const int sdif,
bool *refcounted)
{
struct sock *sk = skb_steal_sock(skb);
@@ -355,7 +358,7 @@ static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
return __inet_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb,
doff, iph->saddr, sport,
- iph->daddr, dport, inet_iif(skb),
+ iph->daddr, dport, inet_iif(skb), sdif,
refcounted);
}
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index f2a215fc78e4..950ed182f62f 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -33,18 +33,12 @@ struct inetpeer_addr {
};
struct inet_peer {
- /* group together avl_left,avl_right,v4daddr to speedup lookups */
- struct inet_peer __rcu *avl_left, *avl_right;
+ struct rb_node rb_node;
struct inetpeer_addr daddr;
- __u32 avl_height;
u32 metrics[RTAX_MAX];
u32 rate_tokens; /* rate limiting for ICMP */
unsigned long rate_last;
- union {
- struct list_head gc_list;
- struct rcu_head gc_rcu;
- };
/*
* Once inet_peer is queued for deletion (refcnt == 0), following field
* is not available: rid
@@ -55,7 +49,6 @@ struct inet_peer {
atomic_t rid; /* Frag reception counter */
};
struct rcu_head rcu;
- struct inet_peer *gc_next;
};
/* following fields might be frequently dirtied */
@@ -64,7 +57,7 @@ struct inet_peer {
};
struct inet_peer_base {
- struct inet_peer __rcu *root;
+ struct rb_root rb_root;
seqlock_t lock;
int total;
};
diff --git a/include/net/ip.h b/include/net/ip.h
index 821cedcc8e73..9896f46cbbf1 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -78,6 +78,16 @@ struct ipcm_cookie {
#define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
#define PKTINFO_SKB_CB(skb) ((struct in_pktinfo *)((skb)->cb))
+/* return enslaved device index if relevant */
+static inline int inet_sdif(struct sk_buff *skb)
+{
+#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
+ if (skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
+ return IPCB(skb)->iif;
+#endif
+ return 0;
+}
+
struct ip_ra_chain {
struct ip_ra_chain __rcu *next;
struct sock *sk;
@@ -352,7 +362,7 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
!forwarding)
return dst_mtu(dst);
- return min(dst->dev->mtu, IP_MAX_MTU);
+ return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU);
}
static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
@@ -364,7 +374,7 @@ static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
}
- return min(skb_dst(skb)->dev->mtu, IP_MAX_MTU);
+ return min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
}
u32 ip_idents_reserve(u32 hash, int segs);
@@ -567,11 +577,12 @@ int ip_forward(struct sk_buff *skb);
void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
__be32 daddr, struct rtable *rt, int is_frag);
-int __ip_options_echo(struct ip_options *dopt, struct sk_buff *skb,
- const struct ip_options *sopt);
-static inline int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb)
+int __ip_options_echo(struct net *net, struct ip_options *dopt,
+ struct sk_buff *skb, const struct ip_options *sopt);
+static inline int ip_options_echo(struct net *net, struct ip_options *dopt,
+ struct sk_buff *skb)
{
- return __ip_options_echo(dopt, skb, &IPCB(skb)->opt);
+ return __ip_options_echo(net, dopt, skb, &IPCB(skb)->opt);
}
void ip_options_fragment(struct sk_buff *skb);
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 1a88008cc6f5..d060d711a624 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -16,10 +16,12 @@
#include <linux/ipv6_route.h>
#include <linux/rtnetlink.h>
#include <linux/spinlock.h>
+#include <linux/notifier.h>
#include <net/dst.h>
#include <net/flow.h>
#include <net/netlink.h>
#include <net/inetpeer.h>
+#include <net/fib_notifier.h>
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
#define FIB6_TABLE_HASHSZ 256
@@ -70,6 +72,7 @@ struct fib6_node {
__u16 fn_flags;
int fn_sernum;
struct rt6_info *rr_ptr;
+ struct rcu_head rcu;
};
#ifndef CONFIG_IPV6_SUBTREES
@@ -104,7 +107,7 @@ struct rt6_info {
* the same cache line.
*/
struct fib6_table *rt6i_table;
- struct fib6_node *rt6i_node;
+ struct fib6_node __rcu *rt6i_node;
struct in6_addr rt6i_gateway;
@@ -118,6 +121,8 @@ struct rt6_info {
atomic_t rt6i_ref;
+ unsigned int rt6i_nh_flags;
+
/* These are in a separate cache line. */
struct rt6key rt6i_dst ____cacheline_aligned_in_smp;
u32 rt6i_flags;
@@ -167,13 +172,40 @@ static inline void rt6_update_expires(struct rt6_info *rt0, int timeout)
rt0->rt6i_flags |= RTF_EXPIRES;
}
+/* Function to safely get fn->sernum for passed in rt
+ * and store result in passed in cookie.
+ * Return true if we can get cookie safely
+ * Return false if not
+ */
+static inline bool rt6_get_cookie_safe(const struct rt6_info *rt,
+ u32 *cookie)
+{
+ struct fib6_node *fn;
+ bool status = false;
+
+ rcu_read_lock();
+ fn = rcu_dereference(rt->rt6i_node);
+
+ if (fn) {
+ *cookie = fn->fn_sernum;
+ status = true;
+ }
+
+ rcu_read_unlock();
+ return status;
+}
+
static inline u32 rt6_get_cookie(const struct rt6_info *rt)
{
+ u32 cookie = 0;
+
if (rt->rt6i_flags & RTF_PCPU ||
(unlikely(!list_empty(&rt->rt6i_uncached)) && rt->dst.from))
rt = (struct rt6_info *)(rt->dst.from);
- return rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
+ rt6_get_cookie_safe(rt, &cookie);
+
+ return cookie;
}
static inline void ip6_rt_put(struct rt6_info *rt)
@@ -185,6 +217,22 @@ static inline void ip6_rt_put(struct rt6_info *rt)
dst_release(&rt->dst);
}
+void rt6_free_pcpu(struct rt6_info *non_pcpu_rt);
+
+static inline void rt6_hold(struct rt6_info *rt)
+{
+ atomic_inc(&rt->rt6i_ref);
+}
+
+static inline void rt6_release(struct rt6_info *rt)
+{
+ if (atomic_dec_and_test(&rt->rt6i_ref)) {
+ rt6_free_pcpu(rt);
+ dst_dev_put(&rt->dst);
+ dst_release(&rt->dst);
+ }
+}
+
enum fib6_walk_state {
#ifdef CONFIG_IPV6_SUBTREES
FWS_S,
@@ -233,6 +281,7 @@ struct fib6_table {
struct fib6_node tb6_root;
struct inet_peer_base tb6_peers;
unsigned int flags;
+ unsigned int fib_seq;
#define RT6_TABLE_HAS_DFLT_ROUTER BIT(0)
};
@@ -256,6 +305,11 @@ typedef struct rt6_info *(*pol_lookup_t)(struct net *,
struct fib6_table *,
struct flowi6 *, int);
+struct fib6_entry_notifier_info {
+ struct fib_notifier_info info; /* must be first */
+ struct rt6_info *rt;
+};
+
/*
* exported functions
*/
@@ -292,9 +346,24 @@ int fib6_init(void);
int ipv6_route_open(struct inode *inode, struct file *file);
+int call_fib6_notifier(struct notifier_block *nb, struct net *net,
+ enum fib_event_type event_type,
+ struct fib_notifier_info *info);
+int call_fib6_notifiers(struct net *net, enum fib_event_type event_type,
+ struct fib_notifier_info *info);
+
+int __net_init fib6_notifier_init(struct net *net);
+void __net_exit fib6_notifier_exit(struct net *net);
+
+unsigned int fib6_tables_seq_read(struct net *net);
+int fib6_tables_dump(struct net *net, struct notifier_block *nb);
+
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
int fib6_rules_init(void);
void fib6_rules_cleanup(void);
+bool fib6_rule_default(const struct fib_rule *rule);
+int fib6_rules_dump(struct net *net, struct notifier_block *nb);
+unsigned int fib6_rules_seq_read(struct net *net);
#else
static inline int fib6_rules_init(void)
{
@@ -304,5 +373,17 @@ static inline void fib6_rules_cleanup(void)
{
return ;
}
+static inline bool fib6_rule_default(const struct fib_rule *rule)
+{
+ return true;
+}
+static inline int fib6_rules_dump(struct net *net, struct notifier_block *nb)
+{
+ return 0;
+}
+static inline unsigned int fib6_rules_seq_read(struct net *net)
+{
+ return 0;
+}
#endif
#endif
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 199056933dcb..ee96f402cb75 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -115,6 +115,7 @@ static inline int ip6_route_get_saddr(struct net *net, struct rt6_info *rt,
struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
const struct in6_addr *saddr, int oif, int flags);
+u32 rt6_multipath_hash(const struct flowi6 *fl6, const struct sk_buff *skb);
struct dst_entry *icmp6_dst_alloc(struct net_device *dev, struct flowi6 *fl6);
@@ -163,6 +164,16 @@ void rt6_mtu_change(struct net_device *dev, unsigned int mtu);
void rt6_remove_prefsrc(struct inet6_ifaddr *ifp);
void rt6_clean_tohost(struct net *net, struct in6_addr *gateway);
+static inline const struct rt6_info *skb_rt6_info(const struct sk_buff *skb)
+{
+ const struct dst_entry *dst = skb_dst(skb);
+ const struct rt6_info *rt6 = NULL;
+
+ if (dst)
+ rt6 = container_of(dst, struct rt6_info, dst);
+
+ return rt6;
+}
/*
* Store a destination cache entry in a socket
@@ -194,7 +205,7 @@ static inline bool ipv6_anycast_destination(const struct dst_entry *dst,
struct rt6_info *rt = (struct rt6_info *)dst;
return rt->rt6i_flags & RTF_ANYCAST ||
- (rt->rt6i_dst.plen != 128 &&
+ (rt->rt6i_dst.plen < 127 &&
ipv6_addr_equal(&rt->rt6i_dst.addr, daddr));
}
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 41d580c6185f..1a7f7e424320 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -19,6 +19,7 @@
#include <net/flow.h>
#include <linux/seq_file.h>
#include <linux/rcupdate.h>
+#include <net/fib_notifier.h>
#include <net/fib_rules.h>
#include <net/inetpeer.h>
#include <linux/percpu.h>
@@ -124,7 +125,6 @@ struct fib_info {
#ifdef CONFIG_IP_ROUTE_MULTIPATH
int fib_weight;
#endif
- unsigned int fib_offload_cnt;
struct rcu_head rcu;
struct fib_nh fib_nh[0];
#define fib_dev fib_nh[0].nh_dev
@@ -177,18 +177,6 @@ struct fib_result_nl {
__be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
-static inline void fib_info_offload_inc(struct fib_info *fi)
-{
- fi->fib_offload_cnt++;
- fi->fib_flags |= RTNH_F_OFFLOAD;
-}
-
-static inline void fib_info_offload_dec(struct fib_info *fi)
-{
- if (--fi->fib_offload_cnt == 0)
- fi->fib_flags &= ~RTNH_F_OFFLOAD;
-}
-
#define FIB_RES_SADDR(net, res) \
((FIB_RES_NH(res).nh_saddr_genid == \
atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
@@ -201,10 +189,6 @@ static inline void fib_info_offload_dec(struct fib_info *fi)
#define FIB_RES_PREFSRC(net, res) ((res).fi->fib_prefsrc ? : \
FIB_RES_SADDR(net, res))
-struct fib_notifier_info {
- struct net *net;
-};
-
struct fib_entry_notifier_info {
struct fib_notifier_info info; /* must be first */
u32 dst;
@@ -215,44 +199,21 @@ struct fib_entry_notifier_info {
u32 tb_id;
};
-struct fib_rule_notifier_info {
- struct fib_notifier_info info; /* must be first */
- struct fib_rule *rule;
-};
-
struct fib_nh_notifier_info {
struct fib_notifier_info info; /* must be first */
struct fib_nh *fib_nh;
};
-enum fib_event_type {
- FIB_EVENT_ENTRY_REPLACE,
- FIB_EVENT_ENTRY_APPEND,
- FIB_EVENT_ENTRY_ADD,
- FIB_EVENT_ENTRY_DEL,
- FIB_EVENT_RULE_ADD,
- FIB_EVENT_RULE_DEL,
- FIB_EVENT_NH_ADD,
- FIB_EVENT_NH_DEL,
-};
-
-int register_fib_notifier(struct notifier_block *nb,
- void (*cb)(struct notifier_block *nb));
-int unregister_fib_notifier(struct notifier_block *nb);
-int call_fib_notifier(struct notifier_block *nb, struct net *net,
- enum fib_event_type event_type,
- struct fib_notifier_info *info);
-int call_fib_notifiers(struct net *net, enum fib_event_type event_type,
+int call_fib4_notifier(struct notifier_block *nb, struct net *net,
+ enum fib_event_type event_type,
struct fib_notifier_info *info);
+int call_fib4_notifiers(struct net *net, enum fib_event_type event_type,
+ struct fib_notifier_info *info);
+
+int __net_init fib4_notifier_init(struct net *net);
+void __net_exit fib4_notifier_exit(struct net *net);
void fib_notify(struct net *net, struct notifier_block *nb);
-#ifdef CONFIG_IP_MULTIPLE_TABLES
-void fib_rules_notify(struct net *net, struct notifier_block *nb);
-#else
-static inline void fib_rules_notify(struct net *net, struct notifier_block *nb)
-{
-}
-#endif
struct fib_table {
struct hlist_node tb_hlist;
@@ -325,6 +286,16 @@ static inline bool fib4_rule_default(const struct fib_rule *rule)
return true;
}
+static inline int fib4_rules_dump(struct net *net, struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline unsigned int fib4_rules_seq_read(struct net *net)
+{
+ return 0;
+}
+
#else /* CONFIG_IP_MULTIPLE_TABLES */
int __net_init fib4_rules_init(struct net *net);
void __net_exit fib4_rules_exit(struct net *net);
@@ -370,6 +341,8 @@ out:
}
bool fib4_rule_default(const struct fib_rule *rule);
+int fib4_rules_dump(struct net *net, struct notifier_block *nb);
+unsigned int fib4_rules_seq_read(struct net *net);
#endif /* CONFIG_IP_MULTIPLE_TABLES */
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 520809912f03..992652856fe8 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -115,6 +115,9 @@ struct ip_tunnel {
u32 o_seqno; /* The last output seqno */
int tun_hlen; /* Precalculated header length */
+ /* This field used only by ERSPAN */
+ u32 index; /* ERSPAN type II index */
+
struct dst_cache dst_cache;
struct ip_tunnel_parm parms;
@@ -151,8 +154,10 @@ struct ip_tunnel {
#define TUNNEL_GENEVE_OPT __cpu_to_be16(0x0800)
#define TUNNEL_VXLAN_OPT __cpu_to_be16(0x1000)
#define TUNNEL_NOCACHE __cpu_to_be16(0x2000)
+#define TUNNEL_ERSPAN_OPT __cpu_to_be16(0x4000)
-#define TUNNEL_OPTIONS_PRESENT (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT)
+#define TUNNEL_OPTIONS_PRESENT \
+ (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT | TUNNEL_ERSPAN_OPT)
struct tnl_ptk_info {
__be16 flags;
diff --git a/include/net/irda/af_irda.h b/include/net/irda/af_irda.h
deleted file mode 100644
index 0df574931522..000000000000
--- a/include/net/irda/af_irda.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*********************************************************************
- *
- * Filename: af_irda.h
- * Version: 1.0
- * Description: IrDA sockets declarations
- * Status: Stable
- * Author: Dag Brattli <dagb@cs.uit.no>
- * Created at: Tue Dec 9 21:13:12 1997
- * Modified at: Fri Jan 28 13:16:32 2000
- * Modified by: Dag Brattli <dagb@cs.uit.no>
- *
- * Copyright (c) 1998-2000 Dag Brattli, All Rights Reserved.
- * Copyright (c) 2000-2002 Jean Tourrilhes <jt@hpl.hp.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * Neither Dag Brattli nor University of Tromsø admit liability nor
- * provide warranty for any of this software. This material is
- * provided "AS-IS" and at no charge.
- *
- ********************************************************************/
-
-#ifndef AF_IRDA_H
-#define AF_IRDA_H
-
-#include <linux/irda.h>
-#include <net/irda/irda.h>
-#include <net/irda/iriap.h> /* struct iriap_cb */
-#include <net/irda/irias_object.h> /* struct ias_value */
-#include <net/irda/irlmp.h> /* struct lsap_cb */
-#include <net/irda/irttp.h> /* struct tsap_cb */
-#include <net/irda/discovery.h> /* struct discovery_t */
-#include <net/sock.h>
-
-/* IrDA Socket */
-struct irda_sock {
- /* struct sock has to be the first member of irda_sock */
- struct sock sk;
- __u32 saddr; /* my local address */
- __u32 daddr; /* peer address */
-
- struct lsap_cb *lsap; /* LSAP used by Ultra */
- __u8 pid; /* Protocol IP (PID) used by Ultra */
-
- struct tsap_cb *tsap; /* TSAP used by this connection */
- __u8 dtsap_sel; /* remote TSAP address */
- __u8 stsap_sel; /* local TSAP address */
-
- __u32 max_sdu_size_rx;
- __u32 max_sdu_size_tx;
- __u32 max_data_size;
- __u8 max_header_size;
- struct qos_info qos_tx;
-
- __u16_host_order mask; /* Hint bits mask */
- __u16_host_order hints; /* Hint bits */
-
- void *ckey; /* IrLMP client handle */
- void *skey; /* IrLMP service handle */
-
- struct ias_object *ias_obj; /* Our service name + lsap in IAS */
- struct iriap_cb *iriap; /* Used to query remote IAS */
- struct ias_value *ias_result; /* Result of remote IAS query */
-
- hashbin_t *cachelog; /* Result of discovery query */
- __u32 cachedaddr; /* Result of selective discovery query */
-
- int nslots; /* Number of slots to use for discovery */
-
- int errno; /* status of the IAS query */
-
- wait_queue_head_t query_wait; /* Wait for the answer to a query */
- struct timer_list watchdog; /* Timeout for discovery */
-
- LOCAL_FLOW tx_flow;
- LOCAL_FLOW rx_flow;
-};
-
-static inline struct irda_sock *irda_sk(struct sock *sk)
-{
- return (struct irda_sock *)sk;
-}
-
-#endif /* AF_IRDA_H */
diff --git a/include/net/irda/crc.h b/include/net/irda/crc.h
deleted file mode 100644
index f202296df9bb..000000000000
--- a/include/net/irda/crc.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*********************************************************************
- *
- * Filename: crc.h
- * Version:
- * Description: CRC routines
- * Status: Experimental.
- * Author: Dag Brattli <dagb@cs.uit.no>
- * Created at: Mon Aug 4 20:40:53 1997
- * Modified at: Sun May 2 20:25:23 1999
- * Modified by: Dag Brattli <dagb@cs.uit.no>
- *
- ********************************************************************/
-
-#ifndef IRDA_CRC_H
-#define IRDA_CRC_H
-
-#include <linux/types.h>
-#include <linux/crc-ccitt.h>
-
-#define INIT_FCS 0xffff /* Initial FCS value */
-#define GOOD_FCS 0xf0b8 /* Good final FCS value */
-
-/* Recompute the FCS with one more character appended. */
-#define irda_fcs(fcs, c) crc_ccitt_byte(fcs, c)
-
-/* Recompute the FCS with len bytes appended. */
-#define irda_calc_crc16(fcs, buf, len) crc_ccitt(fcs, buf, len)
-
-#endif
diff --git a/include/net/irda/discovery.h b/include/net/irda/discovery.h
deleted file mode 100644
index 63ae32530567..000000000000
--- a/include/net/irda/discovery.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/*********************************************************************
- *
- * Filename: discovery.h
- * Version:
- * Description:
- * Status: Experimental.
- * Author: Dag Brattli <dagb@cs.uit.no>
- * Created at: Tue Apr 6 16:53:53 1999
- * Modified at: Tue Oct 5 10:05:10 1999
- * Modified by: Dag Brattli <dagb@cs.uit.no>
- *
- * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
- * Copyright (c) 2000-2002 Jean Tourrilhes <jt@hpl.hp.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- *
- ********************************************************************/
-
-#ifndef DISCOVERY_H
-#define DISCOVERY_H
-
-#include <asm/param.h>
-
-#include <net/irda/irda.h>
-#include <net/irda/irqueue.h> /* irda_queue_t */
-#include <net/irda/irlap_event.h> /* LAP_REASON */
-
-#define DISCOVERY_EXPIRE_TIMEOUT (2*sysctl_discovery_timeout*HZ)
-#define DISCOVERY_DEFAULT_SLOTS 0
-
-/*
- * This type is used by the protocols that transmit 16 bits words in
- * little endian format. A little endian machine stores MSB of word in
- * byte[1] and LSB in byte[0]. A big endian machine stores MSB in byte[0]
- * and LSB in byte[1].
- *
- * This structure is used in the code for things that are endian neutral
- * but that fit in a word so that we can manipulate them efficiently.
- * By endian neutral, I mean things that are really an array of bytes,
- * and always used as such, for example the hint bits. Jean II
- */
-typedef union {
- __u16 word;
- __u8 byte[2];
-} __u16_host_order;
-
-/* Types of discovery */
-typedef enum {
- DISCOVERY_LOG, /* What's in our discovery log */
- DISCOVERY_ACTIVE, /* Doing our own discovery on the medium */
- DISCOVERY_PASSIVE, /* Peer doing discovery on the medium */
- EXPIRY_TIMEOUT, /* Entry expired due to timeout */
-} DISCOVERY_MODE;
-
-#define NICKNAME_MAX_LEN 21
-
-/* Basic discovery information about a peer */
-typedef struct irda_device_info discinfo_t; /* linux/irda.h */
-
-/*
- * The DISCOVERY structure is used for both discovery requests and responses
- */
-typedef struct discovery_t {
- irda_queue_t q; /* Must be first! */
-
- discinfo_t data; /* Basic discovery information */
- int name_len; /* Length of nickname */
-
- LAP_REASON condition; /* More info about the discovery */
- int gen_addr_bit; /* Need to generate a new device
- * address? */
- int nslots; /* Number of slots to use when
- * discovering */
- unsigned long timestamp; /* Last time discovered */
- unsigned long firststamp; /* First time discovered */
-} discovery_t;
-
-void irlmp_add_discovery(hashbin_t *cachelog, discovery_t *discovery);
-void irlmp_add_discovery_log(hashbin_t *cachelog, hashbin_t *log);
-void irlmp_expire_discoveries(hashbin_t *log, __u32 saddr, int force);
-struct irda_device_info *irlmp_copy_discoveries(hashbin_t *log, int *pn,
- __u16 mask, int old_entries);
-
-#endif
diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
deleted file mode 100644
index 2a580ce9edad..000000000000
--- a/include/net/irda/ircomm_core.h
+++ /dev/null
@@ -1,106 +0,0 @@
-/*********************************************************************
- *
- * Filename: ircomm_core.h
- * Version:
- * Description:
- * Status: Experimental.
- * Author: Dag Brattli <dagb@cs.uit.no>
- * Created at: Wed Jun 9 08:58:43 1999
- * Modified at: Mon Dec 13 11:52:29 1999
- * Modified by: Dag Brattli <dagb@cs.uit.no>
- *
- * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- *
- ********************************************************************/
-
-#ifndef IRCOMM_CORE_H
-#define IRCOMM_CORE_H
-
-#include <net/irda/irda.h>
-#include <net/irda/irqueue.h>
-#include <net/irda/ircomm_event.h>
-
-#define IRCOMM_MAGIC 0x98347298
-#define IRCOMM_HEADER_SIZE 1
-
-struct ircomm_cb; /* Forward decl. */
-
-/*
- * A small call-table, so we don't have to check the service-type whenever
- * we want to do something
- */
-typedef struct {
- int (*data_request)(struct ircomm_cb *, struct sk_buff *, int clen);
- int (*connect_request)(struct ircomm_cb *, struct sk_buff *,
- struct ircomm_info *);
- int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
- int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
- struct ircomm_info *);
-} call_t;
-
-struct ircomm_cb {
- irda_queue_t queue;
- magic_t magic;
-
- notify_t notify;
- call_t issue;
-
- int state;
- int line; /* Which TTY line we are using */
-
- struct tsap_cb *tsap;
- struct lsap_cb *lsap;
-
- __u8 dlsap_sel; /* Destination LSAP/TSAP selector */
- __u8 slsap_sel; /* Source LSAP/TSAP selector */
-
- __u32 saddr; /* Source device address (link we are using) */
- __u32 daddr; /* Destination device address */
-
- int max_header_size; /* Header space we must reserve for each frame */
- int max_data_size; /* The amount of data we can fill in each frame */
-
- LOCAL_FLOW flow_status; /* Used by ircomm_lmp */
- int pkt_count; /* Number of frames we have sent to IrLAP */
-
- __u8 service_type;
-};
-
-extern hashbin_t *ircomm;
-
-struct ircomm_cb *ircomm_open(notify_t *notify, __u8 service_type, int line);
-int ircomm_close(struct ircomm_cb *self);
-
-int ircomm_data_request(struct ircomm_cb *self, struct sk_buff *skb);
-void ircomm_data_indication(struct ircomm_cb *self, struct sk_buff *skb);
-void ircomm_process_data(struct ircomm_cb *self, struct sk_buff *skb);
-int ircomm_control_request(struct ircomm_cb *self, struct sk_buff *skb);
-int ircomm_connect_request(struct ircomm_cb *self, __u8 dlsap_sel,
- __u32 saddr, __u32 daddr, struct sk_buff *skb,
- __u8 service_type);
-void ircomm_connect_indication(struct ircomm_cb *self, struct sk_buff *skb,
- struct ircomm_info *info);
-void ircomm_connect_confirm(struct ircomm_cb *self, struct sk_buff *skb,
- struct ircomm_info *info);
-int ircomm_connect_response(struct ircomm_cb *self, struct sk_buff *userdata);
-int ircomm_disconnect_request(struct ircomm_cb *self, struct sk_buff *userdata);
-void ircomm_disconnect_indication(struct ircomm_cb *self, struct sk_buff *skb,
- struct ircomm_info *info);
-void ircomm_flow_request(struct ircomm_cb *self, LOCAL_FLOW flow);
-
-#define ircomm_is_connected(self) (self->state == IRCOMM_CONN)
-
-#endif
diff --git a/include/net/irda/ircomm_event.h b/include/net/irda/ircomm_event.h
deleted file mode 100644
index 5bbc32998d57..000000000000
--- a/include/net/irda/ircomm_event.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/*********************************************************************
- *
- * Filename: ircomm_event.h
- * Version:
- * Description:
- * Status: Experimental.
- * Author: Dag Brattli <dagb@cs.uit.no>
- * Created at: Sun Jun 6 23:51:13 1999
- * Modified at: Thu Jun 10 08:36:25 1999
- * Modified by: Dag Brattli <dagb@cs.uit.no>
- *
- * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- *
- ********************************************************************/
-
-#ifndef IRCOMM_EVENT_H
-#define IRCOMM_EVENT_H
-
-#include <net/irda/irmod.h>
-
-typedef enum {
- IRCOMM_IDLE,
- IRCOMM_WAITI,
- IRCOMM_WAITR,
- IRCOMM_CONN,
-} IRCOMM_STATE;
-
-/* IrCOMM Events */
-typedef enum {
- IRCOMM_CONNECT_REQUEST,
- IRCOMM_CONNECT_RESPONSE,
- IRCOMM_TTP_CONNECT_INDICATION,
- IRCOMM_LMP_CONNECT_INDICATION,
- IRCOMM_TTP_CONNECT_CONFIRM,
- IRCOMM_LMP_CONNECT_CONFIRM,
-
- IRCOMM_LMP_DISCONNECT_INDICATION,
- IRCOMM_TTP_DISCONNECT_INDICATION,
- IRCOMM_DISCONNECT_REQUEST,
-
- IRCOMM_TTP_DATA_INDICATION,
- IRCOMM_LMP_DATA_INDICATION,
- IRCOMM_DATA_REQUEST,
- IRCOMM_CONTROL_REQUEST,
- IRCOMM_CONTROL_INDICATION,
-} IRCOMM_EVENT;
-
-/*
- * Used for passing information through the state-machine
- */
-struct ircomm_info {
- __u32 saddr; /* Source device address */
- __u32 daddr; /* Destination device address */
- __u8 dlsap_sel;
- LM_REASON reason; /* Reason for disconnect */
- __u32 max_data_size;
- __u32 max_header_size;
-
- struct qos_info *qos;
-};
-
-extern const char *const ircomm_state[];
-
-struct ircomm_cb; /* Forward decl. */
-
-int ircomm_do_event(struct ircomm_cb *self, IRCOMM_EVENT event,
- struct sk_buff *skb, struct ircomm_info *info);
-void ircomm_next_state(struct ircomm_cb *self, IRCOMM_STATE state);
-
-#endif
diff --git a/include/net/irda/ircomm_lmp.h b/include/net/irda/ircomm_lmp.h
deleted file mode 100644
index 5042a5021a04..000000000000
--- a/include/net/irda/ircomm_lmp.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*********************************************************************
- *
- * Filename: ircomm_lmp.h
- * Version:
- * Description:
- * Status: Experimental.
- * Author: Dag Brattli <dagb@cs.uit.no>
- * Created at: Wed Jun 9 10:06:07 1999
- * Modified at: Fri Aug 13 07:32:32 1999
- * Modified by: Dag Brattli <dagb@cs.uit.no>
- *
- * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- *
- ********************************************************************/
-
-#ifndef IRCOMM_LMP_H
-#define IRCOMM_LMP_H
-
-#include <net/irda/ircomm_core.h>
-
-int ircomm_open_lsap(struct ircomm_cb *self);
-
-#endif
diff --git a/include/net/irda/ircomm_param.h b/include/net/irda/ircomm_param.h
deleted file mode 100644
index 1f67432321c4..000000000000
--- a/include/net/irda/ircomm_param.h
+++ /dev/null
@@ -1,147 +0,0 @@
-/*********************************************************************
- *
- * Filename: ircomm_param.h
- * Version: 1.0
- * Description: Parameter handling for the IrCOMM protocol
- * Status: Experimental.
- * Author: Dag Brattli <dagb@cs.uit.no>
- * Created at: Mon Jun 7 08:47:28 1999
- * Modified at: Wed Aug 25 13:46:33 1999
- * Modified by: Dag Brattli <dagb@cs.uit.no>
- *
- * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- *
- ********************************************************************/
-
-#ifndef IRCOMM_PARAMS_H
-#define IRCOMM_PARAMS_H
-
-#include <net/irda/parameters.h>
-
-/* Parameters common to all service types */
-#define IRCOMM_SERVICE_TYPE 0x00
-#define IRCOMM_PORT_TYPE 0x01 /* Only used in LM-IAS */
-#define IRCOMM_PORT_NAME 0x02 /* Only used in LM-IAS */
-
-/* Parameters for both 3 wire and 9 wire */
-#define IRCOMM_DATA_RATE 0x10
-#define IRCOMM_DATA_FORMAT 0x11
-#define IRCOMM_FLOW_CONTROL 0x12
-#define IRCOMM_XON_XOFF 0x13
-#define IRCOMM_ENQ_ACK 0x14
-#define IRCOMM_LINE_STATUS 0x15
-#define IRCOMM_BREAK 0x16
-
-/* Parameters for 9 wire */
-#define IRCOMM_DTE 0x20
-#define IRCOMM_DCE 0x21
-#define IRCOMM_POLL 0x22
-
-/* Service type (details) */
-#define IRCOMM_3_WIRE_RAW 0x01
-#define IRCOMM_3_WIRE 0x02
-#define IRCOMM_9_WIRE 0x04
-#define IRCOMM_CENTRONICS 0x08
-
-/* Port type (details) */
-#define IRCOMM_SERIAL 0x00
-#define IRCOMM_PARALLEL 0x01
-
-/* Data format (details) */
-#define IRCOMM_WSIZE_5 0x00
-#define IRCOMM_WSIZE_6 0x01
-#define IRCOMM_WSIZE_7 0x02
-#define IRCOMM_WSIZE_8 0x03
-
-#define IRCOMM_1_STOP_BIT 0x00
-#define IRCOMM_2_STOP_BIT 0x04 /* 1.5 if char len 5 */
-
-#define IRCOMM_PARITY_DISABLE 0x00
-#define IRCOMM_PARITY_ENABLE 0x08
-
-#define IRCOMM_PARITY_ODD 0x00
-#define IRCOMM_PARITY_EVEN 0x10
-#define IRCOMM_PARITY_MARK 0x20
-#define IRCOMM_PARITY_SPACE 0x30
-
-/* Flow control */
-#define IRCOMM_XON_XOFF_IN 0x01
-#define IRCOMM_XON_XOFF_OUT 0x02
-#define IRCOMM_RTS_CTS_IN 0x04
-#define IRCOMM_RTS_CTS_OUT 0x08
-#define IRCOMM_DSR_DTR_IN 0x10
-#define IRCOMM_DSR_DTR_OUT 0x20
-#define IRCOMM_ENQ_ACK_IN 0x40
-#define IRCOMM_ENQ_ACK_OUT 0x80
-
-/* Line status */
-#define IRCOMM_OVERRUN_ERROR 0x02
-#define IRCOMM_PARITY_ERROR 0x04
-#define IRCOMM_FRAMING_ERROR 0x08
-
-/* DTE (Data terminal equipment) line settings */
-#define IRCOMM_DELTA_DTR 0x01
-#define IRCOMM_DELTA_RTS 0x02
-#define IRCOMM_DTR 0x04
-#define IRCOMM_RTS 0x08
-
-/* DCE (Data communications equipment) line settings */
-#define IRCOMM_DELTA_CTS 0x01 /* Clear to send has changed */
-#define IRCOMM_DELTA_DSR 0x02 /* Data set ready has changed */
-#define IRCOMM_DELTA_RI 0x04 /* Ring indicator has changed */
-#define IRCOMM_DELTA_CD 0x08 /* Carrier detect has changed */
-#define IRCOMM_CTS 0x10 /* Clear to send is high */
-#define IRCOMM_DSR 0x20 /* Data set ready is high */
-#define IRCOMM_RI 0x40 /* Ring indicator is high */
-#define IRCOMM_CD 0x80 /* Carrier detect is high */
-#define IRCOMM_DCE_DELTA_ANY 0x0f
-
-/*
- * Parameter state
- */
-struct ircomm_params {
- /* General control params */
- __u8 service_type;
- __u8 port_type;
- char port_name[32];
-
- /* Control params for 3- and 9-wire service type */
- __u32 data_rate; /* Data rate in bps */
- __u8 data_format;
- __u8 flow_control;
- char xonxoff[2];
- char enqack[2];
- __u8 line_status;
- __u8 _break;
-
- __u8 null_modem;
-
- /* Control params for 9-wire service type */
- __u8 dte;
- __u8 dce;
- __u8 poll;
-
- /* Control params for Centronics service type */
-};
-
-struct ircomm_tty_cb; /* Forward decl. */
-
-int ircomm_param_request(struct ircomm_tty_cb *self, __u8 pi, int flush);
-
-extern pi_param_info_t ircomm_param_info;
-
-#endif /* IRCOMM_PARAMS_H */
-
diff --git a/include/net/irda/ircomm_ttp.h b/include/net/irda/ircomm_ttp.h
deleted file mode 100644
index c5627288bca3..000000000000
--- a/include/net/irda/ircomm_ttp.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*********************************************************************
- *
- * Filename: ircomm_ttp.h
- * Version:
- * Description:
- * Status: Experimental.
- * Author: Dag Brattli <dagb@cs.uit.no>
- * Created at: Wed Jun 9 10:06:07 1999
- * Modified at: Fri Aug 13 07:32:22 1999
- * Modified by: Dag Brattli <dagb@cs.uit.no>
- *
- * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- *
- ********************************************************************/
-
-#ifndef IRCOMM_TTP_H
-#define IRCOMM_TTP_H
-
-#include <net/irda/ircomm_core.h>
-
-int ircomm_open_tsap(struct ircomm_cb *self);
-
-#endif
-
diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
deleted file mode 100644
index 8d4f588974bc..000000000000
--- a/include/net/irda/ircomm_tty.h
+++ /dev/null
@@ -1,121 +0,0 @@
-/*********************************************************************
- *
- * Filename: ircomm_tty.h
- * Version:
- * Description:
- * Status: Experimental.
- * Author: Dag Brattli <dagb@cs.uit.no>
- * Created at: Sun Jun 6 23:24:22 1999
- * Modified at: Fri Jan 28 13:16:57 2000
- * Modified by: Dag Brattli <dagb@cs.uit.no>
- *
- * Copyright (c) 1999-2000 Dag Brattli, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- *
- ********************************************************************/
-
-#ifndef IRCOMM_TTY_H
-#define IRCOMM_TTY_H
-
-#include <linux/serial.h>
-#include <linux/termios.h>
-#include <linux/timer.h>
-#include <linux/tty.h> /* struct tty_struct */
-
-#include <net/irda/irias_object.h>
-#include <net/irda/ircomm_core.h>
-#include <net/irda/ircomm_param.h>
-
-#define IRCOMM_TTY_PORTS 32
-#define IRCOMM_TTY_MAGIC 0x3432
-#define IRCOMM_TTY_MAJOR 161
-#define IRCOMM_TTY_MINOR 0
-
-/* This is used as an initial value to max_header_size before the proper
- * value is filled in (5 for ttp, 4 for lmp). This allow us to detect
- * the state of the underlying connection. - Jean II */
-#define IRCOMM_TTY_HDR_UNINITIALISED 16
-/* Same for payload size. See qos.c for the smallest max data size */
-#define IRCOMM_TTY_DATA_UNINITIALISED (64 - IRCOMM_TTY_HDR_UNINITIALISED)
-
-/*
- * IrCOMM TTY driver state
- */
-struct ircomm_tty_cb {
- irda_queue_t queue; /* Must be first */
- struct tty_port port;
- magic_t magic;
-
- int state; /* Connect state */
-
- struct ircomm_cb *ircomm; /* IrCOMM layer instance */
-
- struct sk_buff *tx_skb; /* Transmit buffer */
- struct sk_buff *ctrl_skb; /* Control data buffer */
-
- /* Parameters */
- struct ircomm_params settings;
-
- __u8 service_type; /* The service that we support */
- int client; /* True if we are a client */
- LOCAL_FLOW flow; /* IrTTP flow status */
-
- int line;
-
- __u8 dlsap_sel;
- __u8 slsap_sel;
-
- __u32 saddr;
- __u32 daddr;
-
- __u32 max_data_size; /* Max data we can transmit in one packet */
- __u32 max_header_size; /* The amount of header space we must reserve */
- __u32 tx_data_size; /* Max data size of current tx_skb */
-
- struct iriap_cb *iriap; /* Instance used for querying remote IAS */
- struct ias_object* obj;
- void *skey;
- void *ckey;
-
- struct timer_list watchdog_timer;
- struct work_struct tqueue;
-
- /* Protect concurent access to :
- * o self->ctrl_skb
- * o self->tx_skb
- * Maybe other things may gain to be protected as well...
- * Jean II */
- spinlock_t spinlock;
-};
-
-void ircomm_tty_start(struct tty_struct *tty);
-void ircomm_tty_check_modem_status(struct ircomm_tty_cb *self);
-
-int ircomm_tty_tiocmget(struct tty_struct *tty);
-int ircomm_tty_tiocmset(struct tty_struct *tty, unsigned int set,
- unsigned int clear);
-int ircomm_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
- unsigned long arg);
-void ircomm_tty_set_termios(struct tty_struct *tty,
- struct ktermios *old_termios);
-
-#endif
-
-
-
-
-
-
-
diff --git a/include/net/irda/ircomm_tty_attach.h b/include/net/irda/ircomm_tty_attach.h
deleted file mode 100644
index 20dcbdf258cf..000000000000
--- a/include/net/irda/ircomm_tty_attach.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/*********************************************************************
- *
- * Filename: ircomm_tty_attach.h
- * Version:
- * Description:
- * Status: Experimental.
- * Author: Dag Brattli <dagb@cs.uit.no>
- * Created at: Wed Jun 9 15:55:18 1999
- * Modified at: Fri Dec 10 21:04:55 1999
- * Modified by: Dag Brattli <dagb@cs.uit.no>
- *
- * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- *
- ********************************************************************/
-
-#ifndef IRCOMM_TTY_ATTACH_H
-#define IRCOMM_TTY_ATTACH_H
-
-#include <net/irda/ircomm_tty.h>
-
-typedef enum {
- IRCOMM_TTY_IDLE,
- IRCOMM_TTY_SEARCH,
- IRCOMM_TTY_QUERY_PARAMETERS,
- IRCOMM_TTY_QUERY_LSAP_SEL,
- IRCOMM_TTY_SETUP,
- IRCOMM_TTY_READY,
-} IRCOMM_TTY_STATE;
-
-/* IrCOMM TTY Events */
-typedef enum {
- IRCOMM_TTY_ATTACH_CABLE,
- IRCOMM_TTY_DETACH_CABLE,
- IRCOMM_TTY_DATA_REQUEST,
- IRCOMM_TTY_DATA_INDICATION,
- IRCOMM_TTY_DISCOVERY_REQUEST,
- IRCOMM_TTY_DISCOVERY_INDICATION,
- IRCOMM_TTY_CONNECT_CONFIRM,
- IRCOMM_TTY_CONNECT_INDICATION,
- IRCOMM_TTY_DISCONNECT_REQUEST,
- IRCOMM_TTY_DISCONNECT_INDICATION,
- IRCOMM_TTY_WD_TIMER_EXPIRED,
- IRCOMM_TTY_GOT_PARAMETERS,
- IRCOMM_TTY_GOT_LSAPSEL,
-} IRCOMM_TTY_EVENT;
-
-/* Used for passing information through the state-machine */
-struct ircomm_tty_info {
- __u32 saddr; /* Source device address */
- __u32 daddr; /* Destination device address */
- __u8 dlsap_sel;
-};
-
-extern const char *const ircomm_state[];
-extern const char *const ircomm_tty_state[];
-
-int ircomm_tty_do_event(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event,
- struct sk_buff *skb, struct ircomm_tty_info *info);
-
-
-int ircomm_tty_attach_cable(struct ircomm_tty_cb *self);
-void ircomm_tty_detach_cable(struct ircomm_tty_cb *self);
-void ircomm_tty_connect_confirm(void *instance, void *sap,
- struct qos_info *qos,
- __u32 max_sdu_size,
- __u8 max_header_size,
- struct sk_buff *skb);
-void ircomm_tty_disconnect_indication(void *instance, void *sap,
- LM_REASON reason,
- struct sk_buff *skb);
-void ircomm_tty_connect_indication(void *instance, void *sap,
- struct qos_info *qos,
- __u32 max_sdu_size,
- __u8 max_header_size,
- struct sk_buff *skb);
-int ircomm_tty_send_initial_parameters(struct ircomm_tty_cb *self);
-void ircomm_tty_link_established(struct ircomm_tty_cb *self);
-
-#endif /* IRCOMM_TTY_ATTACH_H */
diff --git a/include/net/irda/irda.h b/include/net/irda/irda.h
deleted file mode 100644
index 92c8fb575213..000000000000
--- a/include/net/irda/irda.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/*********************************************************************
- *
- * Filename: irda.h
- * Version: 1.0
- * Description: IrDA common include file for kernel internal use
- * Status: Stable
- * Author: Dag Brattli <dagb@cs.uit.no>
- * Created at: Tue Dec 9 21:13:12 1997
- * Modified at: Fri Jan 28 13:16:32 2000
- * Modified by: Dag Brattli <dagb@cs.uit.no>
- *
- * Copyright (c) 1998-2000 Dag Brattli, All Rights Reserved.
- * Copyright (c) 2000-2002 Jean Tourrilhes <jt@hpl.hp.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * Neither Dag Brattli nor University of Tromsø admit liability nor
- * provide warranty for any of this software. This material is
- * provided "AS-IS" and at no charge.
- *
- ********************************************************************/
-
-#ifndef NET_IRDA_H
-#define NET_IRDA_H
-
-#include <linux/skbuff.h> /* struct sk_buff */
-#include <linux/kernel.h>
-#include <linux/if.h> /* sa_family_t in <linux/irda.h> */
-#include <linux/irda.h>
-
-typedef __u32 magic_t;
-
-#ifndef TRUE
-#define TRUE 1
-#endif
-
-#ifndef FALSE
-#define FALSE 0
-#endif
-
-/* Hack to do small backoff when setting media busy in IrLAP */
-#ifndef SMALL
-#define SMALL 5
-#endif
-
-#ifndef IRDA_MIN /* Lets not mix this MIN with other header files */
-#define IRDA_MIN(a, b) (((a) < (b)) ? (a) : (b))
-#endif
-
-#ifndef IRDA_ALIGN
-# define IRDA_ALIGN __attribute__((aligned))
-#endif
-
-#ifdef CONFIG_IRDA_DEBUG
-#define IRDA_ASSERT(expr, func) \
-do { if(!(expr)) { \
- printk( "Assertion failed! %s:%s:%d %s\n", \
- __FILE__,__func__,__LINE__,(#expr) ); \
- func } } while (0)
-#define IRDA_ASSERT_LABEL(label) label
-#else
-#define IRDA_ASSERT(expr, func) do { (void)(expr); } while (0)
-#define IRDA_ASSERT_LABEL(label)
-#endif /* CONFIG_IRDA_DEBUG */
-
-/*
- * Magic numbers used by Linux-IrDA. Random numbers which must be unique to
- * give the best protection
- */
-
-#define IRTTY_MAGIC 0x2357
-#define LAP_MAGIC 0x1357
-#define LMP_MAGIC 0x4321
-#define LMP_LSAP_MAGIC 0x69333
-#define LMP_LAP_MAGIC 0x3432
-#define IRDA_DEVICE_MAGIC 0x63454
-#define IAS_MAGIC 0x007
-#define TTP_MAGIC 0x241169
-#define TTP_TSAP_MAGIC 0x4345
-#define IROBEX_MAGIC 0x341324
-#define HB_MAGIC 0x64534
-#define IRLAN_MAGIC 0x754
-#define IAS_OBJECT_MAGIC 0x34234
-#define IAS_ATTRIB_MAGIC 0x45232
-#define IRDA_TASK_MAGIC 0x38423
-
-#define IAS_DEVICE_ID 0x0000 /* Defined by IrDA, IrLMP section 4.1 (page 68) */
-#define IAS_PNP_ID 0xd342
-#define IAS_OBEX_ID 0x34323
-#define IAS_IRLAN_ID 0x34234
-#define IAS_IRCOMM_ID 0x2343
-#define IAS_IRLPT_ID 0x9876
-
-struct net_device;
-struct packet_type;
-
-void irda_proc_register(void);
-void irda_proc_unregister(void);
-
-int irda_sysctl_register(void);
-void irda_sysctl_unregister(void);
-
-int irsock_init(void);
-void irsock_cleanup(void);
-
-int irda_nl_register(void);
-void irda_nl_unregister(void);
-
-int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev,
- struct packet_type *ptype, struct net_device *orig_dev);
-
-#endif /* NET_IRDA_H */
diff --git a/include/net/irda/irda_device.h b/include/net/irda/irda_device.h
deleted file mode 100644
index 664bf8178412..000000000000
--- a/include/net/irda/irda_device.h
+++ /dev/null
@@ -1,285 +0,0 @@
-/*********************************************************************
- *
- * Filename: irda_device.h
- * Version: 0.9
- * Description: Contains various declarations used by the drivers
- * Status: Experimental.
- * Author: Dag Brattli <dagb@cs.uit.no>
- * Created at: Tue Apr 14 12:41:42 1998
- * Modified at: Mon Mar 20 09:08:57 2000
- * Modified by: Dag Brattli <dagb@cs.uit.no>
- *
- * Copyright (c) 1999-2000 Dag Brattli, All Rights Reserved.
- * Copyright (c) 1998 Thomas Davis, <ratbert@radiks.net>,
- * Copyright (c) 2000-2002 Jean Tourrilhes <jt@hpl.hp.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- *
- ********************************************************************/
-
-/*
- * This header contains all the IrDA definitions a driver really
- * needs, and therefore the driver should not need to include
- * any other IrDA headers - Jean II
- */
-
-#ifndef IRDA_DEVICE_H
-#define IRDA_DEVICE_H
-
-#include <linux/tty.h>
-#include <linux/netdevice.h>
-#include <linux/spinlock.h>
-#include <linux/skbuff.h> /* struct sk_buff */
-#include <linux/irda.h>
-#include <linux/types.h>
-
-#include <net/pkt_sched.h>
-#include <net/irda/irda.h>
-#include <net/irda/qos.h> /* struct qos_info */
-#include <net/irda/irqueue.h> /* irda_queue_t */
-
-/* A few forward declarations (to make compiler happy) */
-struct irlap_cb;
-
-/* Some non-standard interface flags (should not conflict with any in if.h) */
-#define IFF_SIR 0x0001 /* Supports SIR speeds */
-#define IFF_MIR 0x0002 /* Supports MIR speeds */
-#define IFF_FIR 0x0004 /* Supports FIR speeds */
-#define IFF_VFIR 0x0008 /* Supports VFIR speeds */
-#define IFF_PIO 0x0010 /* Supports PIO transfer of data */
-#define IFF_DMA 0x0020 /* Supports DMA transfer of data */
-#define IFF_SHM 0x0040 /* Supports shared memory data transfers */
-#define IFF_DONGLE 0x0080 /* Interface has a dongle attached */
-#define IFF_AIR 0x0100 /* Supports Advanced IR (AIR) standards */
-
-#define IO_XMIT 0x01
-#define IO_RECV 0x02
-
-typedef enum {
- IRDA_IRLAP, /* IrDA mode, and deliver to IrLAP */
- IRDA_RAW, /* IrDA mode */
- SHARP_ASK,
- TV_REMOTE, /* Also known as Consumer Electronics IR */
-} INFRARED_MODE;
-
-typedef enum {
- IRDA_TASK_INIT, /* All tasks are initialized with this state */
- IRDA_TASK_DONE, /* Signals that the task is finished */
- IRDA_TASK_WAIT,
- IRDA_TASK_WAIT1,
- IRDA_TASK_WAIT2,
- IRDA_TASK_WAIT3,
- IRDA_TASK_CHILD_INIT, /* Initializing child task */
- IRDA_TASK_CHILD_WAIT, /* Waiting for child task to finish */
- IRDA_TASK_CHILD_DONE /* Child task is finished */
-} IRDA_TASK_STATE;
-
-struct irda_task;
-typedef int (*IRDA_TASK_CALLBACK) (struct irda_task *task);
-
-struct irda_task {
- irda_queue_t q;
- magic_t magic;
-
- IRDA_TASK_STATE state;
- IRDA_TASK_CALLBACK function;
- IRDA_TASK_CALLBACK finished;
-
- struct irda_task *parent;
- struct timer_list timer;
-
- void *instance; /* Instance being called */
- void *param; /* Parameter to be used by instance */
-};
-
-/* Dongle info */
-struct dongle_reg;
-typedef struct {
- struct dongle_reg *issue; /* Registration info */
- struct net_device *dev; /* Device we are attached to */
- struct irda_task *speed_task; /* Task handling speed change */
- struct irda_task *reset_task; /* Task handling reset */
- __u32 speed; /* Current speed */
-
- /* Callbacks to the IrDA device driver */
- int (*set_mode)(struct net_device *, int mode);
- int (*read)(struct net_device *dev, __u8 *buf, int len);
- int (*write)(struct net_device *dev, __u8 *buf, int len);
- int (*set_dtr_rts)(struct net_device *dev, int dtr, int rts);
-} dongle_t;
-
-/* Dongle registration info */
-struct dongle_reg {
- irda_queue_t q; /* Must be first */
- IRDA_DONGLE type;
-
- void (*open)(dongle_t *dongle, struct qos_info *qos);
- void (*close)(dongle_t *dongle);
- int (*reset)(struct irda_task *task);
- int (*change_speed)(struct irda_task *task);
- struct module *owner;
-};
-
-/*
- * Per-packet information we need to hide inside sk_buff
- * (must not exceed 48 bytes, check with struct sk_buff)
- * The default_qdisc_pad field is a temporary hack.
- */
-struct irda_skb_cb {
- unsigned int default_qdisc_pad;
- magic_t magic; /* Be sure that we can trust the information */
- __u32 next_speed; /* The Speed to be set *after* this frame */
- __u16 mtt; /* Minimum turn around time */
- __u16 xbofs; /* Number of xbofs required, used by SIR mode */
- __u16 next_xbofs; /* Number of xbofs required *after* this frame */
- void *context; /* May be used by drivers */
- void (*destructor)(struct sk_buff *skb); /* Used for flow control */
- __u16 xbofs_delay; /* Number of xbofs used for generating the mtt */
- __u8 line; /* Used by IrCOMM in IrLPT mode */
-};
-
-/* Chip specific info */
-typedef struct {
- int cfg_base; /* Config register IO base */
- int sir_base; /* SIR IO base */
- int fir_base; /* FIR IO base */
- int mem_base; /* Shared memory base */
- int sir_ext; /* Length of SIR iobase */
- int fir_ext; /* Length of FIR iobase */
- int irq, irq2; /* Interrupts used */
- int dma, dma2; /* DMA channel(s) used */
- int fifo_size; /* FIFO size */
- int irqflags; /* interrupt flags (ie, IRQF_SHARED) */
- int direction; /* Link direction, used by some FIR drivers */
- int enabled; /* Powered on? */
- int suspended; /* Suspended by APM */
- __u32 speed; /* Currently used speed */
- __u32 new_speed; /* Speed we must change to when Tx is finished */
- int dongle_id; /* Dongle or transceiver currently used */
-} chipio_t;
-
-/* IO buffer specific info (inspired by struct sk_buff) */
-typedef struct {
- int state; /* Receiving state (transmit state not used) */
- int in_frame; /* True if receiving frame */
-
- __u8 *head; /* start of buffer */
- __u8 *data; /* start of data in buffer */
-
- int len; /* current length of data */
- int truesize; /* total allocated size of buffer */
- __u16 fcs;
-
- struct sk_buff *skb; /* ZeroCopy Rx in async_unwrap_char() */
-} iobuff_t;
-
-/* Maximum SIR frame (skb) that we expect to receive *unwrapped*.
- * Max LAP MTU (I field) is 2048 bytes max (IrLAP 1.1, chapt 6.6.5, p40).
- * Max LAP header is 2 bytes (for now).
- * Max CRC is 2 bytes at SIR, 4 bytes at FIR.
- * Need 1 byte for skb_reserve() to align IP header for IrLAN.
- * Add a few extra bytes just to be safe (buffer is power of two anyway)
- * Jean II */
-#define IRDA_SKB_MAX_MTU 2064
-/* Maximum SIR frame that we expect to send, wrapped (i.e. with XBOFS
- * and escaped characters on top of above). */
-#define IRDA_SIR_MAX_FRAME 4269
-
-/* The SIR unwrapper async_unwrap_char() will use a Rx-copy-break mechanism
- * when using the optional ZeroCopy Rx, where only small frames are memcpy
- * to a smaller skb to save memory. This is the threshold under which copy
- * will happen (and over which it won't happen).
- * Some FIR drivers may use this #define as well...
- * This is the same value as various Ethernet drivers. - Jean II */
-#define IRDA_RX_COPY_THRESHOLD 256
-
-/* Function prototypes */
-int irda_device_init(void);
-void irda_device_cleanup(void);
-
-/* IrLAP entry points used by the drivers.
- * We declare them here to avoid the driver pulling a whole bunch stack
- * headers they don't really need - Jean II */
-struct irlap_cb *irlap_open(struct net_device *dev, struct qos_info *qos,
- const char *hw_name);
-void irlap_close(struct irlap_cb *self);
-
-/* Interface to be uses by IrLAP */
-void irda_device_set_media_busy(struct net_device *dev, int status);
-int irda_device_is_media_busy(struct net_device *dev);
-int irda_device_is_receiving(struct net_device *dev);
-
-/* Interface for internal use */
-static inline int irda_device_txqueue_empty(const struct net_device *dev)
-{
- return qdisc_all_tx_empty(dev);
-}
-int irda_device_set_raw_mode(struct net_device* self, int status);
-struct net_device *alloc_irdadev(int sizeof_priv);
-
-void irda_setup_dma(int channel, dma_addr_t buffer, int count, int mode);
-
-/*
- * Function irda_get_mtt (skb)
- *
- * Utility function for getting the minimum turnaround time out of
- * the skb, where it has been hidden in the cb field.
- */
-static inline __u16 irda_get_mtt(const struct sk_buff *skb)
-{
- const struct irda_skb_cb *cb = (const struct irda_skb_cb *) skb->cb;
- return (cb->magic == LAP_MAGIC) ? cb->mtt : 10000;
-}
-
-/*
- * Function irda_get_next_speed (skb)
- *
- * Extract the speed that should be set *after* this frame from the skb
- *
- * Note : return -1 for user space frames
- */
-static inline __u32 irda_get_next_speed(const struct sk_buff *skb)
-{
- const struct irda_skb_cb *cb = (const struct irda_skb_cb *) skb->cb;
- return (cb->magic == LAP_MAGIC) ? cb->next_speed : -1;
-}
-
-/*
- * Function irda_get_next_xbofs (skb)
- *
- * Extract the xbofs that should be set for this frame from the skb
- *
- * Note : default to 10 for user space frames
- */
-static inline __u16 irda_get_xbofs(const struct sk_buff *skb)
-{
- const struct irda_skb_cb *cb = (const struct irda_skb_cb *) skb->cb;
- return (cb->magic == LAP_MAGIC) ? cb->xbofs : 10;
-}
-
-/*
- * Function irda_get_next_xbofs (skb)
- *
- * Extract the xbofs that should be set *after* this frame from the skb
- *
- * Note : return -1 for user space frames
- */
-static inline __u16 irda_get_next_xbofs(const struct sk_buff *skb)
-{
- const struct irda_skb_cb *cb = (const struct irda_skb_cb *) skb->cb;
- return (cb->magic == LAP_MAGIC) ? cb->next_xbofs : -1;
-}
-#endif /* IRDA_DEVICE_H */
-
-
diff --git a/include/net/irda/iriap.h b/include/net/irda/iriap.h
deleted file mode 100644
index fcc896491a95..000000000000
--- a/include/net/irda/iriap.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/*********************************************************************
- *
- * Filename: iriap.h
- * Version: 0.5
- * Description: Information Access Protocol (IAP)
- * Status: Experimental.
- * Author: Dag Brattli <dagb@cs.uit.no>
- * Created at: Thu Aug 21 00:02:07 1997
- * Modified at: Sat Dec 25 16:42:09 1999
- * Modified by: Dag Brattli <dagb@cs.uit.no>
- *
- * Copyright (c) 1997-1999 Dag Brattli <dagb@cs.uit.no>,
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * Neither Dag Brattli nor University of Tromsø admit liability nor
- * provide warranty for any of this software. This material is
- * provided "AS-IS" and at no charge.
- *
- ********************************************************************/
-
-#ifndef IRIAP_H
-#define IRIAP_H
-
-#include <linux/types.h>
-#include <linux/skbuff.h>
-
-#include <net/irda/iriap_event.h>
-#include <net/irda/irias_object.h>
-#include <net/irda/irqueue.h> /* irda_queue_t */
-#include <net/irda/timer.h> /* struct timer_list */
-
-#define IAP_LST 0x80
-#define IAP_ACK 0x40
-
-#define IAS_SERVER 0
-#define IAS_CLIENT 1
-
-/* IrIAP Op-codes */
-#define GET_INFO_BASE 0x01
-#define GET_OBJECTS 0x02
-#define GET_VALUE 0x03
-#define GET_VALUE_BY_CLASS 0x04
-#define GET_OBJECT_INFO 0x05
-#define GET_ATTRIB_NAMES 0x06
-
-#define IAS_SUCCESS 0
-#define IAS_CLASS_UNKNOWN 1
-#define IAS_ATTRIB_UNKNOWN 2
-#define IAS_DISCONNECT 10
-
-typedef void (*CONFIRM_CALLBACK)(int result, __u16 obj_id,
- struct ias_value *value, void *priv);
-
-struct iriap_cb {
- irda_queue_t q; /* Must be first */
- magic_t magic; /* Magic cookie */
-
- int mode; /* Client or server */
-
- __u32 saddr;
- __u32 daddr;
- __u8 operation;
-
- struct sk_buff *request_skb;
- struct lsap_cb *lsap;
- __u8 slsap_sel;
-
- /* Client states */
- IRIAP_STATE client_state;
- IRIAP_STATE call_state;
-
- /* Server states */
- IRIAP_STATE server_state;
- IRIAP_STATE r_connect_state;
-
- CONFIRM_CALLBACK confirm;
- void *priv; /* Used to identify client */
-
- __u8 max_header_size;
- __u32 max_data_size;
-
- struct timer_list watchdog_timer;
-};
-
-int iriap_init(void);
-void iriap_cleanup(void);
-
-struct iriap_cb *iriap_open(__u8 slsap_sel, int mode, void *priv,
- CONFIRM_CALLBACK callback);
-void iriap_close(struct iriap_cb *self);
-
-int iriap_getvaluebyclass_request(struct iriap_cb *self,
- __u32 saddr, __u32 daddr,
- char *name, char *attr);
-void iriap_connect_request(struct iriap_cb *self);
-void iriap_send_ack( struct iriap_cb *self);
-void iriap_call_indication(struct iriap_cb *self, struct sk_buff *skb);
-
-void iriap_register_server(void);
-
-#endif
-
-
diff --git a/include/net/irda/iriap_event.h b/include/net/irda/iriap_event.h
deleted file mode 100644
index 89747f06d9eb..000000000000
--- a/include/net/irda/iriap_event.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/*********************************************************************
- *
- * Filename: iriap_event.h
- * Version:
- * Description:
- * Status: Experimental.
- * Author: Dag Brattli <dagb@cs.uit.no>
- * Created at: Mon Aug 4 20:40:53 1997
- * Modified at: Sun Oct 31 22:02:54 1999
- * Modified by: Dag Brattli <dagb@cs.uit.no>
- *
- * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * Neither Dag Brattli nor University of Tromsø admit liability nor
- * provide warranty for any of this software. This material is
- * provided "AS-IS" and at no charge.
- *
- ********************************************************************/
-
-#ifndef IRIAP_FSM_H
-#define IRIAP_FSM_H
-
-/* Forward because of circular include dependecies */
-struct iriap_cb;
-
-/* IrIAP states */
-typedef enum {
- /* Client */
- S_DISCONNECT,
- S_CONNECTING,
- S_CALL,
-
- /* S-Call */
- S_MAKE_CALL,
- S_CALLING,
- S_OUTSTANDING,
- S_REPLYING,
- S_WAIT_FOR_CALL,
- S_WAIT_ACTIVE,
-
- /* Server */
- R_DISCONNECT,
- R_CALL,
-
- /* R-Connect */
- R_WAITING,
- R_WAIT_ACTIVE,
- R_RECEIVING,
- R_EXECUTE,
- R_RETURNING,
-} IRIAP_STATE;
-
-typedef enum {
- IAP_CALL_REQUEST,
- IAP_CALL_REQUEST_GVBC,
- IAP_CALL_RESPONSE,
- IAP_RECV_F_LST,
- IAP_LM_DISCONNECT_INDICATION,
- IAP_LM_CONNECT_INDICATION,
- IAP_LM_CONNECT_CONFIRM,
-} IRIAP_EVENT;
-
-void iriap_next_client_state (struct iriap_cb *self, IRIAP_STATE state);
-void iriap_next_call_state (struct iriap_cb *self, IRIAP_STATE state);
-void iriap_next_server_state (struct iriap_cb *self, IRIAP_STATE state);
-void iriap_next_r_connect_state(struct iriap_cb *self, IRIAP_STATE state);
-
-
-void iriap_do_client_event(struct iriap_cb *self, IRIAP_EVENT event,
- struct sk_buff *skb);
-void iriap_do_call_event (struct iriap_cb *self, IRIAP_EVENT event,
- struct sk_buff *skb);
-
-void iriap_do_server_event (struct iriap_cb *self, IRIAP_EVENT event,
- struct sk_buff *skb);
-void iriap_do_r_connect_event(struct iriap_cb *self, IRIAP_EVENT event,
- struct sk_buff *skb);
-
-#endif /* IRIAP_FSM_H */
-
diff --git a/include/net/irda/irias_object.h b/include/net/irda/irias_object.h
deleted file mode 100644
index 83f78081799c..000000000000
--- a/include/net/irda/irias_object.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/*********************************************************************
- *
- * Filename: irias_object.h
- * Version:
- * Description:
- * Status: Experimental.
- * Author: Dag Brattli <dagb@cs.uit.no>
- * Created at: Thu Oct 1 22:49:50 1998
- * Modified at: Wed Dec 15 11:20:57 1999
- * Modified by: Dag Brattli <dagb@cs.uit.no>
- *
- * Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * Neither Dag Brattli nor University of Tromsø admit liability nor
- * provide warranty for any of this software. This material is
- * provided "AS-IS" and at no charge.
- *
- ********************************************************************/
-
-#ifndef LM_IAS_OBJECT_H
-#define LM_IAS_OBJECT_H
-
-#include <net/irda/irda.h>
-#include <net/irda/irqueue.h>
-
-/* LM-IAS Attribute types */
-#define IAS_MISSING 0
-#define IAS_INTEGER 1
-#define IAS_OCT_SEQ 2
-#define IAS_STRING 3
-
-/* Object ownership of attributes (user or kernel) */
-#define IAS_KERNEL_ATTR 0
-#define IAS_USER_ATTR 1
-
-/*
- * LM-IAS Object
- */
-struct ias_object {
- irda_queue_t q; /* Must be first! */
- magic_t magic;
-
- char *name;
- int id;
- hashbin_t *attribs;
-};
-
-/*
- * Values used by LM-IAS attributes
- */
-struct ias_value {
- __u8 type; /* Value description */
- __u8 owner; /* Managed from user/kernel space */
- int charset; /* Only used by string type */
- int len;
-
- /* Value */
- union {
- int integer;
- char *string;
- __u8 *oct_seq;
- } t;
-};
-
-/*
- * Attributes used by LM-IAS objects
- */
-struct ias_attrib {
- irda_queue_t q; /* Must be first! */
- int magic;
-
- char *name; /* Attribute name */
- struct ias_value *value; /* Attribute value */
-};
-
-struct ias_object *irias_new_object(char *name, int id);
-void irias_insert_object(struct ias_object *obj);
-int irias_delete_object(struct ias_object *obj);
-int irias_delete_attrib(struct ias_object *obj, struct ias_attrib *attrib,
- int cleanobject);
-void __irias_delete_object(struct ias_object *obj);
-
-void irias_add_integer_attrib(struct ias_object *obj, char *name, int value,
- int user);
-void irias_add_string_attrib(struct ias_object *obj, char *name, char *value,
- int user);
-void irias_add_octseq_attrib(struct ias_object *obj, char *name, __u8 *octets,
- int len, int user);
-int irias_object_change_attribute(char *obj_name, char *attrib_name,
- struct ias_value *new_value);
-struct ias_object *irias_find_object(char *name);
-struct ias_attrib *irias_find_attrib(struct ias_object *obj, char *name);
-
-struct ias_value *irias_new_string_value(char *string);
-struct ias_value *irias_new_integer_value(int integer);
-struct ias_value *irias_new_octseq_value(__u8 *octseq , int len);
-struct ias_value *irias_new_missing_value(void);
-void irias_delete_value(struct ias_value *value);
-
-extern struct ias_value irias_missing;
-extern hashbin_t *irias_objects;
-
-#endif
diff --git a/include/net/irda/irlan_client.h b/include/net/irda/irlan_client.h
deleted file mode 100644
index fa8455eda280..000000000000
--- a/include/net/irda/irlan_client.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*********************************************************************
- *
- * Filename: irlan_client.h
- * Version: 0.3
- * Description: IrDA LAN access layer
- * Status: Experimental.
- * Author: Dag Brattli <dagb@cs.uit.no>
- * Created at: Sun Aug 31 20:14:37 1997
- * Modified at: Thu Apr 22 14:13:34 1999
- * Modified by: Dag Brattli <dagb@cs.uit.no>
- *
- * Copyright (c) 1998 Dag Brattli <dagb@cs.uit.no>, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * Neither Dag Brattli nor University of Tromsø admit liability nor
- * provide warranty for any of this software. This material is
- * provided "AS-IS" and at no charge.
- *
- ********************************************************************/
-
-#ifndef IRLAN_CLIENT_H
-#define IRLAN_CLIENT_H
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-
-#include <net/irda/irias_object.h>
-#include <net/irda/irlan_event.h>
-
-void irlan_client_discovery_indication(discinfo_t *, DISCOVERY_MODE, void *);
-void irlan_client_wakeup(struct irlan_cb *self, __u32 saddr, __u32 daddr);
-
-void irlan_client_parse_response(struct irlan_cb *self, struct sk_buff *skb);
-void irlan_client_get_value_confirm(int result, __u16 obj_id,
- struct ias_value *value, void *priv);
-#endif
diff --git a/include/net/irda/irlan_common.h b/include/net/irda/irlan_common.h
deleted file mode 100644
index 550c2d6ec7ff..000000000000
--- a/include/net/irda/irlan_common.h
+++ /dev/null
@@ -1,230 +0,0 @@
-/*********************************************************************
- *
- * Filename: irlan_common.h
- * Version: 0.8
- * Description: IrDA LAN access layer
- * Status: Experimental.
- * Author: Dag Brattli <dagb@cs.uit.no>
- * Created at: Sun Aug 31 20:14:37 1997
- * Modified at: Sun Oct 31 19:41:24 1999
- * Modified by: Dag Brattli <dagb@cs.uit.no>
- *
- * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * Neither Dag Brattli nor University of Tromsø admit liability nor
- * provide warranty for any of this software. This material is
- * provided "AS-IS" and at no charge.
- *
- ********************************************************************/
-
-#ifndef IRLAN_H
-#define IRLAN_H
-
-#include <asm/param.h> /* for HZ */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/if_ether.h>
-
-#include <net/irda/irttp.h>
-
-#define IRLAN_MTU 1518
-#define IRLAN_TIMEOUT 10*HZ /* 10 seconds */
-
-/* Command packet types */
-#define CMD_GET_PROVIDER_INFO 0
-#define CMD_GET_MEDIA_CHAR 1
-#define CMD_OPEN_DATA_CHANNEL 2
-#define CMD_CLOSE_DATA_CHAN 3
-#define CMD_RECONNECT_DATA_CHAN 4
-#define CMD_FILTER_OPERATION 5
-
-/* Some responses */
-#define RSP_SUCCESS 0
-#define RSP_INSUFFICIENT_RESOURCES 1
-#define RSP_INVALID_COMMAND_FORMAT 2
-#define RSP_COMMAND_NOT_SUPPORTED 3
-#define RSP_PARAM_NOT_SUPPORTED 4
-#define RSP_VALUE_NOT_SUPPORTED 5
-#define RSP_NOT_OPEN 6
-#define RSP_AUTHENTICATION_REQUIRED 7
-#define RSP_INVALID_PASSWORD 8
-#define RSP_PROTOCOL_ERROR 9
-#define RSP_ASYNCHRONOUS_ERROR 255
-
-/* Media types */
-#define MEDIA_802_3 1
-#define MEDIA_802_5 2
-
-/* Filter parameters */
-#define DATA_CHAN 1
-#define FILTER_TYPE 2
-#define FILTER_MODE 3
-
-/* Filter types */
-#define IRLAN_DIRECTED 0x01
-#define IRLAN_FUNCTIONAL 0x02
-#define IRLAN_GROUP 0x04
-#define IRLAN_MAC_FRAME 0x08
-#define IRLAN_MULTICAST 0x10
-#define IRLAN_BROADCAST 0x20
-#define IRLAN_IPX_SOCKET 0x40
-
-/* Filter modes */
-#define ALL 1
-#define FILTER 2
-#define NONE 3
-
-/* Filter operations */
-#define GET 1
-#define CLEAR 2
-#define ADD 3
-#define REMOVE 4
-#define DYNAMIC 5
-
-/* Access types */
-#define ACCESS_DIRECT 1
-#define ACCESS_PEER 2
-#define ACCESS_HOSTED 3
-
-#define IRLAN_BYTE 0
-#define IRLAN_SHORT 1
-#define IRLAN_ARRAY 2
-
-/* IrLAN sits on top if IrTTP */
-#define IRLAN_MAX_HEADER (TTP_HEADER+LMP_HEADER)
-/* 1 byte for the command code and 1 byte for the parameter count */
-#define IRLAN_CMD_HEADER 2
-
-#define IRLAN_STRING_PARAMETER_LEN(name, value) (1 + strlen((name)) + 2 \
- + strlen ((value)))
-#define IRLAN_BYTE_PARAMETER_LEN(name) (1 + strlen((name)) + 2 + 1)
-#define IRLAN_SHORT_PARAMETER_LEN(name) (1 + strlen((name)) + 2 + 2)
-
-/*
- * IrLAN client
- */
-struct irlan_client_cb {
- int state;
-
- int open_retries;
-
- struct tsap_cb *tsap_ctrl;
- __u32 max_sdu_size;
- __u8 max_header_size;
-
- int access_type; /* Access type of provider */
- __u8 reconnect_key[255];
- __u8 key_len;
-
- __u16 recv_arb_val;
- __u16 max_frame;
- int filter_type;
-
- int unicast_open;
- int broadcast_open;
-
- int tx_busy;
- struct sk_buff_head txq; /* Transmit control queue */
-
- struct iriap_cb *iriap;
-
- struct timer_list kick_timer;
-};
-
-/*
- * IrLAN provider
- */
-struct irlan_provider_cb {
- int state;
-
- struct tsap_cb *tsap_ctrl;
- __u32 max_sdu_size;
- __u8 max_header_size;
-
- /*
- * Store some values here which are used by the provider to parse
- * the filter operations
- */
- int data_chan;
- int filter_type;
- int filter_mode;
- int filter_operation;
- int filter_entry;
- int access_type; /* Access type */
- __u16 send_arb_val;
-
- __u8 mac_address[ETH_ALEN]; /* Generated MAC address for peer device */
-};
-
-/*
- * IrLAN control block
- */
-struct irlan_cb {
- int magic;
- struct list_head dev_list;
- struct net_device *dev; /* Ethernet device structure*/
-
- __u32 saddr; /* Source device address */
- __u32 daddr; /* Destination device address */
- int disconnect_reason; /* Why we got disconnected */
-
- int media; /* Media type */
- __u8 version[2]; /* IrLAN version */
-
- struct tsap_cb *tsap_data; /* Data TSAP */
-
- int use_udata; /* Use Unit Data transfers */
-
- __u8 stsap_sel_data; /* Source data TSAP selector */
- __u8 dtsap_sel_data; /* Destination data TSAP selector */
- __u8 dtsap_sel_ctrl; /* Destination ctrl TSAP selector */
-
- struct irlan_client_cb client; /* Client specific fields */
- struct irlan_provider_cb provider; /* Provider specific fields */
-
- __u32 max_sdu_size;
- __u8 max_header_size;
-
- wait_queue_head_t open_wait;
- struct timer_list watchdog_timer;
-};
-
-void irlan_close(struct irlan_cb *self);
-void irlan_close_tsaps(struct irlan_cb *self);
-
-int irlan_register_netdev(struct irlan_cb *self);
-void irlan_ias_register(struct irlan_cb *self, __u8 tsap_sel);
-void irlan_start_watchdog_timer(struct irlan_cb *self, int timeout);
-
-void irlan_open_data_tsap(struct irlan_cb *self);
-
-int irlan_run_ctrl_tx_queue(struct irlan_cb *self);
-
-struct irlan_cb *irlan_get_any(void);
-void irlan_get_provider_info(struct irlan_cb *self);
-void irlan_get_media_char(struct irlan_cb *self);
-void irlan_open_data_channel(struct irlan_cb *self);
-void irlan_close_data_channel(struct irlan_cb *self);
-void irlan_set_multicast_filter(struct irlan_cb *self, int status);
-void irlan_set_broadcast_filter(struct irlan_cb *self, int status);
-
-int irlan_insert_byte_param(struct sk_buff *skb, char *param, __u8 value);
-int irlan_insert_short_param(struct sk_buff *skb, char *param, __u16 value);
-int irlan_insert_string_param(struct sk_buff *skb, char *param, char *value);
-int irlan_insert_array_param(struct sk_buff *skb, char *name, __u8 *value,
- __u16 value_len);
-
-int irlan_extract_param(__u8 *buf, char *name, char *value, __u16 *len);
-
-#endif
-
-
diff --git a/include/net/irda/irlan_eth.h b/include/net/irda/irlan_eth.h
deleted file mode 100644
index de5c81691f33..000000000000
--- a/include/net/irda/irlan_eth.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*********************************************************************
- *
- * Filename: irlan_eth.h
- * Version:
- * Description:
- * Status: Experimental.
- * Author: Dag Brattli <dagb@cs.uit.no>
- * Created at: Thu Oct 15 08:36:58 1998
- * Modified at: Fri May 14 23:29:00 1999
- * Modified by: Dag Brattli <dagb@cs.uit.no>
- *
- * Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * Neither Dag Brattli nor University of Tromsø admit liability nor
- * provide warranty for any of this software. This material is
- * provided "AS-IS" and at no charge.
- *
- ********************************************************************/
-
-#ifndef IRLAN_ETH_H
-#define IRLAN_ETH_H
-
-struct net_device *alloc_irlandev(const char *name);
-int irlan_eth_receive(void *instance, void *sap, struct sk_buff *skb);
-
-void irlan_eth_flow_indication( void *instance, void *sap, LOCAL_FLOW flow);
-#endif
diff --git a/include/net/irda/irlan_event.h b/include/net/irda/irlan_event.h
deleted file mode 100644
index 018b5a77e610..000000000000
--- a/include/net/irda/irlan_event.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*********************************************************************
- *
- * Filename: irlan_event.h
- * Version:
- * Description: LAN access
- * Status: Experimental.
- * Author: Dag Brattli <dagb@cs.uit.no>
- * Created at: Sun Aug 31 20:14:37 1997
- * Modified at: Tue Feb 2 09:45:17 1999
- * Modified by: Dag Brattli <dagb@cs.uit.no>
- *
- * Copyright (c) 1997 Dag Brattli <dagb@cs.uit.no>, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * Neither Dag Brattli nor University of Tromsø admit liability nor
- * provide warranty for any of this software. This material is
- * provided "AS-IS" and at no charge.
- *
- ********************************************************************/
-
-#ifndef IRLAN_EVENT_H
-#define IRLAN_EVENT_H
-
-#include <linux/kernel.h>
-#include <linux/skbuff.h>
-
-#include <net/irda/irlan_common.h>
-
-typedef enum {
- IRLAN_IDLE,
- IRLAN_QUERY,
- IRLAN_CONN,
- IRLAN_INFO,
- IRLAN_MEDIA,
- IRLAN_OPEN,
- IRLAN_WAIT,
- IRLAN_ARB,
- IRLAN_DATA,
- IRLAN_CLOSE,
- IRLAN_SYNC
-} IRLAN_STATE;
-
-typedef enum {
- IRLAN_DISCOVERY_INDICATION,
- IRLAN_IAS_PROVIDER_AVAIL,
- IRLAN_IAS_PROVIDER_NOT_AVAIL,
- IRLAN_LAP_DISCONNECT,
- IRLAN_LMP_DISCONNECT,
- IRLAN_CONNECT_COMPLETE,
- IRLAN_DATA_INDICATION,
- IRLAN_DATA_CONNECT_INDICATION,
- IRLAN_RETRY_CONNECT,
-
- IRLAN_CONNECT_INDICATION,
- IRLAN_GET_INFO_CMD,
- IRLAN_GET_MEDIA_CMD,
- IRLAN_OPEN_DATA_CMD,
- IRLAN_FILTER_CONFIG_CMD,
-
- IRLAN_CHECK_CON_ARB,
- IRLAN_PROVIDER_SIGNAL,
-
- IRLAN_WATCHDOG_TIMEOUT,
-} IRLAN_EVENT;
-
-extern const char * const irlan_state[];
-
-void irlan_do_client_event(struct irlan_cb *self, IRLAN_EVENT event,
- struct sk_buff *skb);
-
-void irlan_do_provider_event(struct irlan_cb *self, IRLAN_EVENT event,
- struct sk_buff *skb);
-
-void irlan_next_client_state(struct irlan_cb *self, IRLAN_STATE state);
-void irlan_next_provider_state(struct irlan_cb *self, IRLAN_STATE state);
-
-#endif
diff --git a/include/net/irda/irlan_filter.h b/include/net/irda/irlan_filter.h
deleted file mode 100644
index a5a2539485bd..000000000000
--- a/include/net/irda/irlan_filter.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*********************************************************************
- *
- * Filename: irlan_filter.h
- * Version:
- * Description:
- * Status: Experimental.
- * Author: Dag Brattli <dagb@cs.uit.no>
- * Created at: Fri Jan 29 15:24:08 1999
- * Modified at: Sun Feb 7 23:35:31 1999
- * Modified by: Dag Brattli <dagb@cs.uit.no>
- *
- * Copyright (c) 1998 Dag Brattli, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * Neither Dag Brattli nor University of Tromsø admit liability nor
- * provide warranty for any of this software. This material is
- * provided "AS-IS" and at no charge.
- *
- ********************************************************************/
-
-#ifndef IRLAN_FILTER_H
-#define IRLAN_FILTER_H
-
-void irlan_check_command_param(struct irlan_cb *self, char *param,
- char *value);
-void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb);
-#ifdef CONFIG_PROC_FS
-void irlan_print_filter(struct seq_file *seq, int filter_type);
-#endif
-
-#endif /* IRLAN_FILTER_H */
diff --git a/include/net/irda/irlan_provider.h b/include/net/irda/irlan_provider.h
deleted file mode 100644
index 92f3b0e1029b..000000000000
--- a/include/net/irda/irlan_provider.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*********************************************************************
- *
- * Filename: irlan_provider.h
- * Version: 0.1
- * Description: IrDA LAN access layer
- * Status: Experimental.
- * Author: Dag Brattli <dagb@cs.uit.no>
- * Created at: Sun Aug 31 20:14:37 1997
- * Modified at: Sun May 9 12:26:11 1999
- * Modified by: Dag Brattli <dagb@cs.uit.no>
- *
- * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * Neither Dag Brattli nor University of Tromsø admit liability nor
- * provide warranty for any of this software. This material is
- * provided "AS-IS" and at no charge.
- *
- ********************************************************************/
-
-#ifndef IRLAN_SERVER_H
-#define IRLAN_SERVER_H
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-
-#include <net/irda/irlan_common.h>
-
-void irlan_provider_ctrl_disconnect_indication(void *instance, void *sap,
- LM_REASON reason,
- struct sk_buff *skb);
-
-
-void irlan_provider_connect_response(struct irlan_cb *, struct tsap_cb *);
-
-int irlan_parse_open_data_cmd(struct irlan_cb *self, struct sk_buff *skb);
-int irlan_provider_parse_command(struct irlan_cb *self, int cmd,
- struct sk_buff *skb);
-
-void irlan_provider_send_reply(struct irlan_cb *self, int command,
- int ret_code);
-int irlan_provider_open_ctrl_tsap(struct irlan_cb *self);
-
-#endif
-
-
diff --git a/include/net/irda/irlap.h b/include/net/irda/irlap.h
deleted file mode 100644
index 6f23e820618c..000000000000
--- a/include/net/irda/irlap.h
+++ /dev/null
@@ -1,311 +0,0 @@
-/*********************************************************************
- *
- * Filename: irlap.h
- * Version: 0.8
- * Description: An IrDA LAP driver for Linux
- * Status: Experimental.
- * Author: Dag Brattli <dagb@cs.uit.no>
- * Created at: Mon Aug 4 20:40:53 1997
- * Modified at: Fri Dec 10 13:21:17 1999
- * Modified by: Dag Brattli <dagb@cs.uit.no>
- *
- * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
- * All Rights Reserved.
- * Copyright (c) 2000-2002 Jean Tourrilhes <jt@hpl.hp.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * Neither Dag Brattli nor University of Tromsø admit liability nor
- * provide warranty for any of this software. This material is
- * provided "AS-IS" and at no charge.
- *
- ********************************************************************/
-
-#ifndef IRLAP_H
-#define IRLAP_H
-
-#include <linux/types.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/timer.h>
-
-#include <net/irda/irqueue.h> /* irda_queue_t */
-#include <net/irda/qos.h> /* struct qos_info */
-#include <net/irda/discovery.h> /* discovery_t */
-#include <net/irda/irlap_event.h> /* IRLAP_STATE, ... */
-#include <net/irda/irmod.h> /* struct notify_t */
-
-#define CONFIG_IRDA_DYNAMIC_WINDOW 1
-
-#define LAP_RELIABLE 1
-#define LAP_UNRELIABLE 0
-
-#define LAP_ADDR_HEADER 1 /* IrLAP Address Header */
-#define LAP_CTRL_HEADER 1 /* IrLAP Control Header */
-
-/* May be different when we get VFIR */
-#define LAP_MAX_HEADER (LAP_ADDR_HEADER + LAP_CTRL_HEADER)
-
-/* Each IrDA device gets a random 32 bits IRLAP device address */
-#define LAP_ALEN 4
-
-#define BROADCAST 0xffffffff /* Broadcast device address */
-#define CBROADCAST 0xfe /* Connection broadcast address */
-#define XID_FORMAT 0x01 /* Discovery XID format */
-
-/* Nobody seems to use this constant. */
-#define LAP_WINDOW_SIZE 8
-/* We keep the LAP queue very small to minimise the amount of buffering.
- * this improve latency and reduce resource consumption.
- * This work only because we have synchronous refilling of IrLAP through
- * the flow control mechanism (via scheduler and IrTTP).
- * 2 buffers is the minimum we can work with, one that we send while polling
- * IrTTP, and another to know that we should not send the pf bit.
- * Jean II */
-#define LAP_HIGH_THRESHOLD 2
-/* Some rare non TTP clients don't implement flow control, and
- * so don't comply with the above limit (and neither with this one).
- * For IAP and management, it doesn't matter, because they never transmit much.
- *.For IrLPT, this should be fixed.
- * - Jean II */
-#define LAP_MAX_QUEUE 10
-/* Please note that all IrDA management frames (LMP/TTP conn req/disc and
- * IAS queries) fall in the second category and are sent to LAP even if TTP
- * is stopped. This means that those frames will wait only a maximum of
- * two (2) data frames before beeing sent on the "wire", which speed up
- * new socket setup when the link is saturated.
- * Same story for two sockets competing for the medium : if one saturates
- * the LAP, when the other want to transmit it only has to wait for
- * maximum three (3) packets (2 + one scheduling), which improve performance
- * of delay sensitive applications.
- * Jean II */
-
-#define NR_EXPECTED 1
-#define NR_UNEXPECTED 0
-#define NR_INVALID -1
-
-#define NS_EXPECTED 1
-#define NS_UNEXPECTED 0
-#define NS_INVALID -1
-
-/*
- * Meta information passed within the IrLAP state machine
- */
-struct irlap_info {
- __u8 caddr; /* Connection address */
- __u8 control; /* Frame type */
- __u8 cmd;
-
- __u32 saddr;
- __u32 daddr;
-
- int pf; /* Poll/final bit set */
-
- __u8 nr; /* Sequence number of next frame expected */
- __u8 ns; /* Sequence number of frame sent */
-
- int S; /* Number of slots */
- int slot; /* Random chosen slot */
- int s; /* Current slot */
-
- discovery_t *discovery; /* Discovery information */
-};
-
-/* Main structure of IrLAP */
-struct irlap_cb {
- irda_queue_t q; /* Must be first */
- magic_t magic;
-
- /* Device we are attached to */
- struct net_device *netdev;
- char hw_name[2*IFNAMSIZ + 1];
-
- /* Connection state */
- volatile IRLAP_STATE state; /* Current state */
-
- /* Timers used by IrLAP */
- struct timer_list query_timer;
- struct timer_list slot_timer;
- struct timer_list discovery_timer;
- struct timer_list final_timer;
- struct timer_list poll_timer;
- struct timer_list wd_timer;
- struct timer_list backoff_timer;
-
- /* Media busy stuff */
- struct timer_list media_busy_timer;
- int media_busy;
-
- /* Timeouts which will be different with different turn time */
- int slot_timeout;
- int poll_timeout;
- int final_timeout;
- int wd_timeout;
-
- struct sk_buff_head txq; /* Frames to be transmitted */
- struct sk_buff_head txq_ultra;
-
- __u8 caddr; /* Connection address */
- __u32 saddr; /* Source device address */
- __u32 daddr; /* Destination device address */
-
- int retry_count; /* Times tried to establish connection */
- int add_wait; /* True if we are waiting for frame */
-
- __u8 connect_pending;
- __u8 disconnect_pending;
-
- /* To send a faster RR if tx queue empty */
-#ifdef CONFIG_IRDA_FAST_RR
- int fast_RR_timeout;
- int fast_RR;
-#endif /* CONFIG_IRDA_FAST_RR */
-
- int N1; /* N1 * F-timer = Negitiated link disconnect warning threshold */
- int N2; /* N2 * F-timer = Negitiated link disconnect time */
- int N3; /* Connection retry count */
-
- int local_busy;
- int remote_busy;
- int xmitflag;
-
- __u8 vs; /* Next frame to be sent */
- __u8 vr; /* Next frame to be received */
- __u8 va; /* Last frame acked */
- int window; /* Nr of I-frames allowed to send */
- int window_size; /* Current negotiated window size */
-
-#ifdef CONFIG_IRDA_DYNAMIC_WINDOW
- __u32 line_capacity; /* Number of bytes allowed to send */
- __u32 bytes_left; /* Number of bytes still allowed to transmit */
-#endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
-
- struct sk_buff_head wx_list;
-
- __u8 ack_required;
-
- /* XID parameters */
- __u8 S; /* Number of slots */
- __u8 slot; /* Random chosen slot */
- __u8 s; /* Current slot */
- int frame_sent; /* Have we sent reply? */
-
- hashbin_t *discovery_log;
- discovery_t *discovery_cmd;
-
- __u32 speed; /* Link speed */
-
- struct qos_info qos_tx; /* QoS requested by peer */
- struct qos_info qos_rx; /* QoS requested by self */
- struct qos_info *qos_dev; /* QoS supported by device */
-
- notify_t notify; /* Callbacks to IrLMP */
-
- int mtt_required; /* Minimum turnaround time required */
- int xbofs_delay; /* Nr of XBOF's used to MTT */
- int bofs_count; /* Negotiated extra BOFs */
- int next_bofs; /* Negotiated extra BOFs after next frame */
-
- int mode; /* IrLAP mode (primary, secondary or monitor) */
-};
-
-/*
- * Function prototypes
- */
-int irlap_init(void);
-void irlap_cleanup(void);
-
-struct irlap_cb *irlap_open(struct net_device *dev, struct qos_info *qos,
- const char *hw_name);
-void irlap_close(struct irlap_cb *self);
-
-void irlap_connect_request(struct irlap_cb *self, __u32 daddr,
- struct qos_info *qos, int sniff);
-void irlap_connect_response(struct irlap_cb *self, struct sk_buff *skb);
-void irlap_connect_indication(struct irlap_cb *self, struct sk_buff *skb);
-void irlap_connect_confirm(struct irlap_cb *, struct sk_buff *skb);
-
-void irlap_data_indication(struct irlap_cb *, struct sk_buff *, int unreliable);
-void irlap_data_request(struct irlap_cb *, struct sk_buff *, int unreliable);
-
-#ifdef CONFIG_IRDA_ULTRA
-void irlap_unitdata_request(struct irlap_cb *, struct sk_buff *);
-void irlap_unitdata_indication(struct irlap_cb *, struct sk_buff *);
-#endif /* CONFIG_IRDA_ULTRA */
-
-void irlap_disconnect_request(struct irlap_cb *);
-void irlap_disconnect_indication(struct irlap_cb *, LAP_REASON reason);
-
-void irlap_status_indication(struct irlap_cb *, int quality_of_link);
-
-void irlap_test_request(__u8 *info, int len);
-
-void irlap_discovery_request(struct irlap_cb *, discovery_t *discovery);
-void irlap_discovery_confirm(struct irlap_cb *, hashbin_t *discovery_log);
-void irlap_discovery_indication(struct irlap_cb *, discovery_t *discovery);
-
-void irlap_reset_indication(struct irlap_cb *self);
-void irlap_reset_confirm(void);
-
-void irlap_update_nr_received(struct irlap_cb *, int nr);
-int irlap_validate_nr_received(struct irlap_cb *, int nr);
-int irlap_validate_ns_received(struct irlap_cb *, int ns);
-
-int irlap_generate_rand_time_slot(int S, int s);
-void irlap_initiate_connection_state(struct irlap_cb *);
-void irlap_flush_all_queues(struct irlap_cb *);
-void irlap_wait_min_turn_around(struct irlap_cb *, struct qos_info *);
-
-void irlap_apply_default_connection_parameters(struct irlap_cb *self);
-void irlap_apply_connection_parameters(struct irlap_cb *self, int now);
-
-#define IRLAP_GET_HEADER_SIZE(self) (LAP_MAX_HEADER)
-#define IRLAP_GET_TX_QUEUE_LEN(self) skb_queue_len(&self->txq)
-
-/* Return TRUE if the node is in primary mode (i.e. master)
- * - Jean II */
-static inline int irlap_is_primary(struct irlap_cb *self)
-{
- int ret;
- switch(self->state) {
- case LAP_XMIT_P:
- case LAP_NRM_P:
- ret = 1;
- break;
- case LAP_XMIT_S:
- case LAP_NRM_S:
- ret = 0;
- break;
- default:
- ret = -1;
- }
- return ret;
-}
-
-/* Clear a pending IrLAP disconnect. - Jean II */
-static inline void irlap_clear_disconnect(struct irlap_cb *self)
-{
- self->disconnect_pending = FALSE;
-}
-
-/*
- * Function irlap_next_state (self, state)
- *
- * Switches state and provides debug information
- *
- */
-static inline void irlap_next_state(struct irlap_cb *self, IRLAP_STATE state)
-{
- /*
- if (!self || self->magic != LAP_MAGIC)
- return;
-
- pr_debug("next LAP state = %s\n", irlap_state[state]);
- */
- self->state = state;
-}
-
-#endif
diff --git a/include/net/irda/irlap_event.h b/include/net/irda/irlap_event.h
deleted file mode 100644
index e4325fee1267..000000000000
--- a/include/net/irda/irlap_event.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/*********************************************************************
- *
- *
- * Filename: irlap_event.h
- * Version: 0.1
- * Description:
- * Status: Experimental.
- * Author: Dag Brattli <dagb@cs.uit.no>
- * Created at: Sat Aug 16 00:59:29 1997
- * Modified at: Tue Dec 21 11:20:30 1999
- * Modified by: Dag Brattli <dagb@cs.uit.no>
- *
- * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
- * All Rights Reserved.
- * Copyright (c) 2000-2002 Jean Tourrilhes <jt@hpl.hp.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- *
- ********************************************************************/
-
-#ifndef IRLAP_EVENT_H
-#define IRLAP_EVENT_H
-
-#include <net/irda/irda.h>
-
-/* A few forward declarations (to make compiler happy) */
-struct irlap_cb;
-struct irlap_info;
-
-/* IrLAP States */
-typedef enum {
- LAP_NDM, /* Normal disconnected mode */
- LAP_QUERY,
- LAP_REPLY,
- LAP_CONN, /* Connect indication */
- LAP_SETUP, /* Setting up connection */
- LAP_OFFLINE, /* A really boring state */
- LAP_XMIT_P,
- LAP_PCLOSE,
- LAP_NRM_P, /* Normal response mode as primary */
- LAP_RESET_WAIT,
- LAP_RESET,
- LAP_NRM_S, /* Normal response mode as secondary */
- LAP_XMIT_S,
- LAP_SCLOSE,
- LAP_RESET_CHECK,
-} IRLAP_STATE;
-
-/* IrLAP Events */
-typedef enum {
- /* Services events */
- DISCOVERY_REQUEST,
- CONNECT_REQUEST,
- CONNECT_RESPONSE,
- DISCONNECT_REQUEST,
- DATA_REQUEST,
- RESET_REQUEST,
- RESET_RESPONSE,
-
- /* Send events */
- SEND_I_CMD,
- SEND_UI_FRAME,
-
- /* Receive events */
- RECV_DISCOVERY_XID_CMD,
- RECV_DISCOVERY_XID_RSP,
- RECV_SNRM_CMD,
- RECV_TEST_CMD,
- RECV_TEST_RSP,
- RECV_UA_RSP,
- RECV_DM_RSP,
- RECV_RD_RSP,
- RECV_I_CMD,
- RECV_I_RSP,
- RECV_UI_FRAME,
- RECV_FRMR_RSP,
- RECV_RR_CMD,
- RECV_RR_RSP,
- RECV_RNR_CMD,
- RECV_RNR_RSP,
- RECV_REJ_CMD,
- RECV_REJ_RSP,
- RECV_SREJ_CMD,
- RECV_SREJ_RSP,
- RECV_DISC_CMD,
-
- /* Timer events */
- SLOT_TIMER_EXPIRED,
- QUERY_TIMER_EXPIRED,
- FINAL_TIMER_EXPIRED,
- POLL_TIMER_EXPIRED,
- DISCOVERY_TIMER_EXPIRED,
- WD_TIMER_EXPIRED,
- BACKOFF_TIMER_EXPIRED,
- MEDIA_BUSY_TIMER_EXPIRED,
-} IRLAP_EVENT;
-
-/*
- * Disconnect reason code
- */
-typedef enum { /* FIXME check the two first reason codes */
- LAP_DISC_INDICATION=1, /* Received a disconnect request from peer */
- LAP_NO_RESPONSE, /* To many retransmits without response */
- LAP_RESET_INDICATION, /* To many retransmits, or invalid nr/ns */
- LAP_FOUND_NONE, /* No devices were discovered */
- LAP_MEDIA_BUSY,
- LAP_PRIMARY_CONFLICT,
-} LAP_REASON;
-
-extern const char *const irlap_state[];
-
-void irlap_do_event(struct irlap_cb *self, IRLAP_EVENT event,
- struct sk_buff *skb, struct irlap_info *info);
-void irlap_print_event(IRLAP_EVENT event);
-
-int irlap_qos_negotiate(struct irlap_cb *self, struct sk_buff *skb);
-
-#endif
diff --git a/include/net/irda/irlap_frame.h b/include/net/irda/irlap_frame.h
deleted file mode 100644
index cbc12a926e5f..000000000000
--- a/include/net/irda/irlap_frame.h
+++ /dev/null
@@ -1,167 +0,0 @@
-/*********************************************************************
- *
- * Filename: irlap_frame.h
- * Version: 0.9
- * Description: IrLAP frame declarations
- * Status: Experimental.
- * Author: Dag Brattli <dagb@cs.uit.no>
- * Created at: Tue Aug 19 10:27:26 1997
- * Modified at: Sat Dec 25 21:07:26 1999
- * Modified by: Dag Brattli <dagb@cs.uit.no>
- *
- * Copyright (c) 1997-1999 Dag Brattli <dagb@cs.uit.no>,
- * All Rights Reserved.
- * Copyright (c) 2000-2002 Jean Tourrilhes <jt@hpl.hp.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- *
- ********************************************************************/
-
-#ifndef IRLAP_FRAME_H
-#define IRLAP_FRAME_H
-
-#include <linux/skbuff.h>
-
-#include <net/irda/irda.h>
-
-/* A few forward declarations (to make compiler happy) */
-struct irlap_cb;
-struct discovery_t;
-
-/* Frame types and templates */
-#define INVALID 0xff
-
-/* Unnumbered (U) commands */
-#define SNRM_CMD 0x83 /* Set Normal Response Mode */
-#define DISC_CMD 0x43 /* Disconnect */
-#define XID_CMD 0x2f /* Exchange Station Identification */
-#define TEST_CMD 0xe3 /* Test */
-
-/* Unnumbered responses */
-#define RNRM_RSP 0x83 /* Request Normal Response Mode */
-#define UA_RSP 0x63 /* Unnumbered Acknowledgement */
-#define FRMR_RSP 0x87 /* Frame Reject */
-#define DM_RSP 0x0f /* Disconnect Mode */
-#define RD_RSP 0x43 /* Request Disconnection */
-#define XID_RSP 0xaf /* Exchange Station Identification */
-#define TEST_RSP 0xe3 /* Test frame */
-
-/* Supervisory (S) */
-#define RR 0x01 /* Receive Ready */
-#define REJ 0x09 /* Reject */
-#define RNR 0x05 /* Receive Not Ready */
-#define SREJ 0x0d /* Selective Reject */
-
-/* Information (I) */
-#define I_FRAME 0x00 /* Information Format */
-#define UI_FRAME 0x03 /* Unnumbered Information */
-
-#define CMD_FRAME 0x01
-#define RSP_FRAME 0x00
-
-#define PF_BIT 0x10 /* Poll/final bit */
-
-/* Some IrLAP field lengths */
-/*
- * Only baud rate triplet is 4 bytes (PV can be 2 bytes).
- * All others params (7) are 3 bytes, so that's 7*3 + 1*4 bytes.
- */
-#define IRLAP_NEGOCIATION_PARAMS_LEN 25
-#define IRLAP_DISCOVERY_INFO_LEN 32
-
-struct disc_frame {
- __u8 caddr; /* Connection address */
- __u8 control;
-} __packed;
-
-struct xid_frame {
- __u8 caddr; /* Connection address */
- __u8 control;
- __u8 ident; /* Should always be XID_FORMAT */
- __le32 saddr; /* Source device address */
- __le32 daddr; /* Destination device address */
- __u8 flags; /* Discovery flags */
- __u8 slotnr;
- __u8 version;
-} __packed;
-
-struct test_frame {
- __u8 caddr; /* Connection address */
- __u8 control;
- __le32 saddr; /* Source device address */
- __le32 daddr; /* Destination device address */
-} __packed;
-
-struct ua_frame {
- __u8 caddr;
- __u8 control;
- __le32 saddr; /* Source device address */
- __le32 daddr; /* Dest device address */
-} __packed;
-
-struct dm_frame {
- __u8 caddr; /* Connection address */
- __u8 control;
-} __packed;
-
-struct rd_frame {
- __u8 caddr; /* Connection address */
- __u8 control;
-} __packed;
-
-struct rr_frame {
- __u8 caddr; /* Connection address */
- __u8 control;
-} __packed;
-
-struct i_frame {
- __u8 caddr;
- __u8 control;
-} __packed;
-
-struct snrm_frame {
- __u8 caddr;
- __u8 control;
- __le32 saddr;
- __le32 daddr;
- __u8 ncaddr;
-} __packed;
-
-void irlap_queue_xmit(struct irlap_cb *self, struct sk_buff *skb);
-void irlap_send_discovery_xid_frame(struct irlap_cb *, int S, __u8 s,
- __u8 command,
- struct discovery_t *discovery);
-void irlap_send_snrm_frame(struct irlap_cb *, struct qos_info *);
-void irlap_send_test_frame(struct irlap_cb *self, __u8 caddr, __u32 daddr,
- struct sk_buff *cmd);
-void irlap_send_ua_response_frame(struct irlap_cb *, struct qos_info *);
-void irlap_send_dm_frame(struct irlap_cb *self);
-void irlap_send_rd_frame(struct irlap_cb *self);
-void irlap_send_disc_frame(struct irlap_cb *self);
-void irlap_send_rr_frame(struct irlap_cb *self, int command);
-
-void irlap_send_data_primary(struct irlap_cb *, struct sk_buff *);
-void irlap_send_data_primary_poll(struct irlap_cb *, struct sk_buff *);
-void irlap_send_data_secondary(struct irlap_cb *, struct sk_buff *);
-void irlap_send_data_secondary_final(struct irlap_cb *, struct sk_buff *);
-void irlap_resend_rejected_frames(struct irlap_cb *, int command);
-void irlap_resend_rejected_frame(struct irlap_cb *self, int command);
-
-void irlap_send_ui_frame(struct irlap_cb *self, struct sk_buff *skb,
- __u8 caddr, int command);
-
-int irlap_insert_qos_negotiation_params(struct irlap_cb *self,
- struct sk_buff *skb);
-
-#endif
diff --git a/include/net/irda/irlmp.h b/include/net/irda/irlmp.h
deleted file mode 100644
index f132924cc9da..000000000000
--- a/include/net/irda/irlmp.h
+++ /dev/null
@@ -1,295 +0,0 @@
-/*********************************************************************
- *
- * Filename: irlmp.h
- * Version: 0.9
- * Description: IrDA Link Management Protocol (LMP) layer
- * Status: Experimental.
- * Author: Dag Brattli <dagb@cs.uit.no>
- * Created at: Sun Aug 17 20:54:32 1997
- * Modified at: Fri Dec 10 13:23:01 1999
- * Modified by: Dag Brattli <dagb@cs.uit.no>
- *
- * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
- * All Rights Reserved.
- * Copyright (c) 2000-2002 Jean Tourrilhes <jt@hpl.hp.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * Neither Dag Brattli nor University of Tromsø admit liability nor
- * provide warranty for any of this software. This material is
- * provided "AS-IS" and at no charge.
- *
- ********************************************************************/
-
-#ifndef IRLMP_H
-#define IRLMP_H
-
-#include <asm/param.h> /* for HZ */
-
-#include <linux/types.h>
-
-#include <net/irda/irda.h>
-#include <net/irda/qos.h>
-#include <net/irda/irlap.h> /* LAP_MAX_HEADER, ... */
-#include <net/irda/irlmp_event.h>
-#include <net/irda/irqueue.h>
-#include <net/irda/discovery.h>
-
-/* LSAP-SEL's */
-#define LSAP_MASK 0x7f
-#define LSAP_IAS 0x00
-#define LSAP_ANY 0xff
-#define LSAP_MAX 0x6f /* 0x70-0x7f are reserved */
-#define LSAP_CONNLESS 0x70 /* Connectionless LSAP, mostly used for Ultra */
-
-#define DEV_ADDR_ANY 0xffffffff
-
-#define LMP_HEADER 2 /* Dest LSAP + Source LSAP */
-#define LMP_CONTROL_HEADER 4 /* LMP_HEADER + opcode + parameter */
-#define LMP_PID_HEADER 1 /* Used by Ultra */
-#define LMP_MAX_HEADER (LMP_CONTROL_HEADER+LAP_MAX_HEADER)
-
-#define LM_MAX_CONNECTIONS 10
-
-#define LM_IDLE_TIMEOUT 2*HZ /* 2 seconds for now */
-
-typedef enum {
- S_PNP = 0,
- S_PDA,
- S_COMPUTER,
- S_PRINTER,
- S_MODEM,
- S_FAX,
- S_LAN,
- S_TELEPHONY,
- S_COMM,
- S_OBEX,
- S_ANY,
- S_END,
-} SERVICE;
-
-/* For selective discovery */
-typedef void (*DISCOVERY_CALLBACK1) (discinfo_t *, DISCOVERY_MODE, void *);
-/* For expiry (the same) */
-typedef void (*DISCOVERY_CALLBACK2) (discinfo_t *, DISCOVERY_MODE, void *);
-
-typedef struct {
- irda_queue_t queue; /* Must be first */
-
- __u16_host_order hints; /* Hint bits */
-} irlmp_service_t;
-
-typedef struct {
- irda_queue_t queue; /* Must be first */
-
- __u16_host_order hint_mask;
-
- DISCOVERY_CALLBACK1 disco_callback; /* Selective discovery */
- DISCOVERY_CALLBACK2 expir_callback; /* Selective expiration */
- void *priv; /* Used to identify client */
-} irlmp_client_t;
-
-/*
- * Information about each logical LSAP connection
- */
-struct lsap_cb {
- irda_queue_t queue; /* Must be first */
- magic_t magic;
-
- unsigned long connected; /* set_bit used on this */
- int persistent;
-
- __u8 slsap_sel; /* Source (this) LSAP address */
- __u8 dlsap_sel; /* Destination LSAP address (if connected) */
-#ifdef CONFIG_IRDA_ULTRA
- __u8 pid; /* Used by connectionless LSAP */
-#endif /* CONFIG_IRDA_ULTRA */
- struct sk_buff *conn_skb; /* Store skb here while connecting */
-
- struct timer_list watchdog_timer;
-
- LSAP_STATE lsap_state; /* Connection state */
- notify_t notify; /* Indication/Confirm entry points */
- struct qos_info qos; /* QoS for this connection */
-
- struct lap_cb *lap; /* Pointer to LAP connection structure */
-};
-
-/*
- * Used for caching the last slsap->dlsap->handle mapping
- *
- * We don't need to keep/match the remote address in the cache because
- * we are associated with a specific LAP (which implies it).
- * Jean II
- */
-typedef struct {
- int valid;
-
- __u8 slsap_sel;
- __u8 dlsap_sel;
- struct lsap_cb *lsap;
-} CACHE_ENTRY;
-
-/*
- * Information about each registered IrLAP layer
- */
-struct lap_cb {
- irda_queue_t queue; /* Must be first */
- magic_t magic;
-
- int reason; /* LAP disconnect reason */
-
- IRLMP_STATE lap_state;
-
- struct irlap_cb *irlap; /* Instance of IrLAP layer */
- hashbin_t *lsaps; /* LSAP associated with this link */
- struct lsap_cb *flow_next; /* Next lsap to be polled for Tx */
-
- __u8 caddr; /* Connection address */
- __u32 saddr; /* Source device address */
- __u32 daddr; /* Destination device address */
-
- struct qos_info *qos; /* LAP QoS for this session */
- struct timer_list idle_timer;
-
-#ifdef CONFIG_IRDA_CACHE_LAST_LSAP
- /* The lsap cache was moved from struct irlmp_cb to here because
- * it must be associated with the specific LAP. Also, this
- * improves performance. - Jean II */
- CACHE_ENTRY cache; /* Caching last slsap->dlsap->handle mapping */
-#endif
-};
-
-/*
- * Main structure for IrLMP
- */
-struct irlmp_cb {
- magic_t magic;
-
- __u8 conflict_flag;
-
- discovery_t discovery_cmd; /* Discovery command to use by IrLAP */
- discovery_t discovery_rsp; /* Discovery response to use by IrLAP */
-
- /* Last lsap picked automatically by irlmp_find_free_slsap() */
- int last_lsap_sel;
-
- struct timer_list discovery_timer;
-
- hashbin_t *links; /* IrLAP connection table */
- hashbin_t *unconnected_lsaps;
- hashbin_t *clients;
- hashbin_t *services;
-
- hashbin_t *cachelog; /* Current discovery log */
-
- int running;
-
- __u16_host_order hints; /* Hint bits */
-};
-
-/* Prototype declarations */
-int irlmp_init(void);
-void irlmp_cleanup(void);
-struct lsap_cb *irlmp_open_lsap(__u8 slsap, notify_t *notify, __u8 pid);
-void irlmp_close_lsap( struct lsap_cb *self);
-
-__u16 irlmp_service_to_hint(int service);
-void *irlmp_register_service(__u16 hints);
-int irlmp_unregister_service(void *handle);
-void *irlmp_register_client(__u16 hint_mask, DISCOVERY_CALLBACK1 disco_clb,
- DISCOVERY_CALLBACK2 expir_clb, void *priv);
-int irlmp_unregister_client(void *handle);
-int irlmp_update_client(void *handle, __u16 hint_mask,
- DISCOVERY_CALLBACK1 disco_clb,
- DISCOVERY_CALLBACK2 expir_clb, void *priv);
-
-void irlmp_register_link(struct irlap_cb *, __u32 saddr, notify_t *);
-void irlmp_unregister_link(__u32 saddr);
-
-int irlmp_connect_request(struct lsap_cb *, __u8 dlsap_sel,
- __u32 saddr, __u32 daddr,
- struct qos_info *, struct sk_buff *);
-void irlmp_connect_indication(struct lsap_cb *self, struct sk_buff *skb);
-int irlmp_connect_response(struct lsap_cb *, struct sk_buff *);
-void irlmp_connect_confirm(struct lsap_cb *, struct sk_buff *);
-struct lsap_cb *irlmp_dup(struct lsap_cb *self, void *instance);
-
-void irlmp_disconnect_indication(struct lsap_cb *self, LM_REASON reason,
- struct sk_buff *userdata);
-int irlmp_disconnect_request(struct lsap_cb *, struct sk_buff *userdata);
-
-void irlmp_discovery_confirm(hashbin_t *discovery_log, DISCOVERY_MODE mode);
-void irlmp_discovery_request(int nslots);
-discinfo_t *irlmp_get_discoveries(int *pn, __u16 mask, int nslots);
-void irlmp_do_expiry(void);
-void irlmp_do_discovery(int nslots);
-discovery_t *irlmp_get_discovery_response(void);
-void irlmp_discovery_expiry(discinfo_t *expiry, int number);
-
-int irlmp_data_request(struct lsap_cb *, struct sk_buff *);
-void irlmp_data_indication(struct lsap_cb *, struct sk_buff *);
-
-int irlmp_udata_request(struct lsap_cb *, struct sk_buff *);
-void irlmp_udata_indication(struct lsap_cb *, struct sk_buff *);
-
-#ifdef CONFIG_IRDA_ULTRA
-int irlmp_connless_data_request(struct lsap_cb *, struct sk_buff *, __u8);
-void irlmp_connless_data_indication(struct lsap_cb *, struct sk_buff *);
-#endif /* CONFIG_IRDA_ULTRA */
-
-void irlmp_status_indication(struct lap_cb *, LINK_STATUS link, LOCK_STATUS lock);
-void irlmp_flow_indication(struct lap_cb *self, LOCAL_FLOW flow);
-
-LM_REASON irlmp_convert_lap_reason(LAP_REASON);
-
-static inline __u32 irlmp_get_saddr(const struct lsap_cb *self)
-{
- return (self && self->lap) ? self->lap->saddr : 0;
-}
-
-static inline __u32 irlmp_get_daddr(const struct lsap_cb *self)
-{
- return (self && self->lap) ? self->lap->daddr : 0;
-}
-
-const char *irlmp_reason_str(LM_REASON reason);
-
-extern int sysctl_discovery_timeout;
-extern int sysctl_discovery_slots;
-extern int sysctl_discovery;
-extern int sysctl_lap_keepalive_time; /* in ms, default is LM_IDLE_TIMEOUT */
-extern struct irlmp_cb *irlmp;
-
-/* Check if LAP queue is full.
- * Used by IrTTP for low control, see comments in irlap.h - Jean II */
-static inline int irlmp_lap_tx_queue_full(struct lsap_cb *self)
-{
- if (self == NULL)
- return 0;
- if (self->lap == NULL)
- return 0;
- if (self->lap->irlap == NULL)
- return 0;
-
- return IRLAP_GET_TX_QUEUE_LEN(self->lap->irlap) >= LAP_HIGH_THRESHOLD;
-}
-
-/* After doing a irlmp_dup(), this get one of the two socket back into
- * a state where it's waiting incoming connections.
- * Note : this can be used *only* if the socket is not yet connected
- * (i.e. NO irlmp_connect_response() done on this socket).
- * - Jean II */
-static inline void irlmp_listen(struct lsap_cb *self)
-{
- self->dlsap_sel = LSAP_ANY;
- self->lap = NULL;
- self->lsap_state = LSAP_DISCONNECTED;
- /* Started when we received the LM_CONNECT_INDICATION */
- del_timer(&self->watchdog_timer);
-}
-
-#endif
diff --git a/include/net/irda/irlmp_event.h b/include/net/irda/irlmp_event.h
deleted file mode 100644
index 9e4ec17a7449..000000000000
--- a/include/net/irda/irlmp_event.h
+++ /dev/null
@@ -1,98 +0,0 @@
-/*********************************************************************
- *
- * Filename: irlmp_event.h
- * Version: 0.1
- * Description: IrDA-LMP event handling
- * Status: Experimental.
- * Author: Dag Brattli <dagb@cs.uit.no>
- * Created at: Mon Aug 4 20:40:53 1997
- * Modified at: Thu Jul 8 12:18:54 1999
- * Modified by: Dag Brattli <dagb@cs.uit.no>
- *
- * Copyright (c) 1997, 1999 Dag Brattli <dagb@cs.uit.no>,
- * All Rights Reserved.
- * Copyright (c) 2000-2002 Jean Tourrilhes <jt@hpl.hp.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * Neither Dag Brattli nor University of Tromsø admit liability nor
- * provide warranty for any of this software. This material is
- * provided "AS-IS" and at no charge.
- *
- ********************************************************************/
-
-#ifndef IRLMP_EVENT_H
-#define IRLMP_EVENT_H
-
-/* A few forward declarations (to make compiler happy) */
-struct irlmp_cb;
-struct lsap_cb;
-struct lap_cb;
-struct discovery_t;
-
-/* LAP states */
-typedef enum {
- /* IrLAP connection control states */
- LAP_STANDBY, /* No LAP connection */
- LAP_U_CONNECT, /* Starting LAP connection */
- LAP_ACTIVE, /* LAP connection is active */
-} IRLMP_STATE;
-
-/* LSAP connection control states */
-typedef enum {
- LSAP_DISCONNECTED, /* No LSAP connection */
- LSAP_CONNECT, /* Connect indication from peer */
- LSAP_CONNECT_PEND, /* Connect request from service user */
- LSAP_DATA_TRANSFER_READY, /* LSAP connection established */
- LSAP_SETUP, /* Trying to set up LSAP connection */
- LSAP_SETUP_PEND, /* Request to start LAP connection */
-} LSAP_STATE;
-
-typedef enum {
- /* LSAP events */
- LM_CONNECT_REQUEST,
- LM_CONNECT_CONFIRM,
- LM_CONNECT_RESPONSE,
- LM_CONNECT_INDICATION,
-
- LM_DISCONNECT_INDICATION,
- LM_DISCONNECT_REQUEST,
-
- LM_DATA_REQUEST,
- LM_UDATA_REQUEST,
- LM_DATA_INDICATION,
- LM_UDATA_INDICATION,
-
- LM_WATCHDOG_TIMEOUT,
-
- /* IrLAP events */
- LM_LAP_CONNECT_REQUEST,
- LM_LAP_CONNECT_INDICATION,
- LM_LAP_CONNECT_CONFIRM,
- LM_LAP_DISCONNECT_INDICATION,
- LM_LAP_DISCONNECT_REQUEST,
- LM_LAP_DISCOVERY_REQUEST,
- LM_LAP_DISCOVERY_CONFIRM,
- LM_LAP_IDLE_TIMEOUT,
-} IRLMP_EVENT;
-
-extern const char *const irlmp_state[];
-extern const char *const irlsap_state[];
-
-void irlmp_watchdog_timer_expired(void *data);
-void irlmp_discovery_timer_expired(void *data);
-void irlmp_idle_timer_expired(void *data);
-
-void irlmp_do_lap_event(struct lap_cb *self, IRLMP_EVENT event,
- struct sk_buff *skb);
-int irlmp_do_lsap_event(struct lsap_cb *self, IRLMP_EVENT event,
- struct sk_buff *skb);
-
-#endif /* IRLMP_EVENT_H */
-
-
-
-
diff --git a/include/net/irda/irlmp_frame.h b/include/net/irda/irlmp_frame.h
deleted file mode 100644
index 1906eb71422e..000000000000
--- a/include/net/irda/irlmp_frame.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*********************************************************************
- *
- * Filename: irlmp_frame.h
- * Version: 0.9
- * Description:
- * Status: Experimental.
- * Author: Dag Brattli <dagb@cs.uit.no>
- * Created at: Tue Aug 19 02:09:59 1997
- * Modified at: Fri Dec 10 13:21:53 1999
- * Modified by: Dag Brattli <dagb@cs.uit.no>
- *
- * Copyright (c) 1997, 1999 Dag Brattli <dagb@cs.uit.no>,
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * Neither Dag Brattli nor University of Tromsø admit liability nor
- * provide warranty for any of this software. This material is
- * provided "AS-IS" and at no charge.
- *
- ********************************************************************/
-
-#ifndef IRMLP_FRAME_H
-#define IRMLP_FRAME_H
-
-#include <linux/skbuff.h>
-
-#include <net/irda/discovery.h>
-
-/* IrLMP frame opcodes */
-#define CONNECT_CMD 0x01
-#define CONNECT_CNF 0x81
-#define DISCONNECT 0x02
-#define ACCESSMODE_CMD 0x03
-#define ACCESSMODE_CNF 0x83
-
-#define CONTROL_BIT 0x80
-
-void irlmp_send_data_pdu(struct lap_cb *self, __u8 dlsap, __u8 slsap,
- int expedited, struct sk_buff *skb);
-void irlmp_send_lcf_pdu(struct lap_cb *self, __u8 dlsap, __u8 slsap,
- __u8 opcode, struct sk_buff *skb);
-void irlmp_link_data_indication(struct lap_cb *, struct sk_buff *,
- int unreliable);
-#ifdef CONFIG_IRDA_ULTRA
-void irlmp_link_unitdata_indication(struct lap_cb *, struct sk_buff *);
-#endif /* CONFIG_IRDA_ULTRA */
-
-void irlmp_link_connect_indication(struct lap_cb *, __u32 saddr, __u32 daddr,
- struct qos_info *qos, struct sk_buff *skb);
-void irlmp_link_connect_request(__u32 daddr);
-void irlmp_link_connect_confirm(struct lap_cb *self, struct qos_info *qos,
- struct sk_buff *skb);
-void irlmp_link_disconnect_indication(struct lap_cb *, struct irlap_cb *,
- LAP_REASON reason, struct sk_buff *);
-void irlmp_link_discovery_confirm(struct lap_cb *self, hashbin_t *log);
-void irlmp_link_discovery_indication(struct lap_cb *, discovery_t *discovery);
-
-#endif
diff --git a/include/net/irda/irmod.h b/include/net/irda/irmod.h
deleted file mode 100644
index 86f0dbb8ee5d..000000000000
--- a/include/net/irda/irmod.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/*********************************************************************
- *
- * Filename: irmod.h
- * Version: 0.3
- * Description: IrDA module and utilities functions
- * Status: Experimental.
- * Author: Dag Brattli <dagb@cs.uit.no>
- * Created at: Mon Dec 15 13:58:52 1997
- * Modified at: Fri Jan 28 13:15:24 2000
- * Modified by: Dag Brattli <dagb@cs.uit.no>
- *
- * Copyright (c) 1998-2000 Dag Brattli, All Rights Reserved.
- * Copyright (c) 2000-2002 Jean Tourrilhes <jt@hpl.hp.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * Neither Dag Brattli nor University of Tromsø admit liability nor
- * provide warranty for any of this software. This material is
- * provided "AS-IS" and at no charg.
- *
- ********************************************************************/
-
-#ifndef IRMOD_H
-#define IRMOD_H
-
-/* Misc status information */
-typedef enum {
- STATUS_OK,
- STATUS_ABORTED,
- STATUS_NO_ACTIVITY,
- STATUS_NOISY,
- STATUS_REMOTE,
-} LINK_STATUS;
-
-typedef enum {
- LOCK_NO_CHANGE,
- LOCK_LOCKED,
- LOCK_UNLOCKED,
-} LOCK_STATUS;
-
-typedef enum { FLOW_STOP, FLOW_START } LOCAL_FLOW;
-
-/*
- * IrLMP disconnect reasons. The order is very important, since they
- * correspond to disconnect reasons sent in IrLMP disconnect frames, so
- * please do not touch :-)
- */
-typedef enum {
- LM_USER_REQUEST = 1, /* User request */
- LM_LAP_DISCONNECT, /* Unexpected IrLAP disconnect */
- LM_CONNECT_FAILURE, /* Failed to establish IrLAP connection */
- LM_LAP_RESET, /* IrLAP reset */
- LM_INIT_DISCONNECT, /* Link Management initiated disconnect */
- LM_LSAP_NOTCONN, /* Data delivered on unconnected LSAP */
- LM_NON_RESP_CLIENT, /* Non responsive LM-MUX client */
- LM_NO_AVAIL_CLIENT, /* No available LM-MUX client */
- LM_CONN_HALF_OPEN, /* Connection is half open */
- LM_BAD_SOURCE_ADDR, /* Illegal source address (i.e 0x00) */
-} LM_REASON;
-#define LM_UNKNOWN 0xff /* Unspecified disconnect reason */
-
-/* A few forward declarations (to make compiler happy) */
-struct qos_info; /* in <net/irda/qos.h> */
-
-/*
- * Notify structure used between transport and link management layers
- */
-typedef struct {
- int (*data_indication)(void *priv, void *sap, struct sk_buff *skb);
- int (*udata_indication)(void *priv, void *sap, struct sk_buff *skb);
- void (*connect_confirm)(void *instance, void *sap,
- struct qos_info *qos, __u32 max_sdu_size,
- __u8 max_header_size, struct sk_buff *skb);
- void (*connect_indication)(void *instance, void *sap,
- struct qos_info *qos, __u32 max_sdu_size,
- __u8 max_header_size, struct sk_buff *skb);
- void (*disconnect_indication)(void *instance, void *sap,
- LM_REASON reason, struct sk_buff *);
- void (*flow_indication)(void *instance, void *sap, LOCAL_FLOW flow);
- void (*status_indication)(void *instance,
- LINK_STATUS link, LOCK_STATUS lock);
- void *instance; /* Layer instance pointer */
- char name[16]; /* Name of layer */
-} notify_t;
-
-#define NOTIFY_MAX_NAME 16
-
-/* Zero the notify structure */
-void irda_notify_init(notify_t *notify);
-
-/* Locking wrapper - Note the inverted logic on irda_lock().
- * Those function basically return false if the lock is already in the
- * position you want to set it. - Jean II */
-#define irda_lock(lock) (! test_and_set_bit(0, (void *) (lock)))
-#define irda_unlock(lock) (test_and_clear_bit(0, (void *) (lock)))
-
-#endif /* IRMOD_H */
-
-
-
-
-
-
-
-
-
diff --git a/include/net/irda/irqueue.h b/include/net/irda/irqueue.h
deleted file mode 100644
index 37f512bd6733..000000000000
--- a/include/net/irda/irqueue.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/*********************************************************************
- *
- * Filename: irqueue.h
- * Version: 0.3
- * Description: General queue implementation
- * Status: Experimental.
- * Author: Dag Brattli <dagb@cs.uit.no>
- * Created at: Tue Jun 9 13:26:50 1998
- * Modified at: Thu Oct 7 13:25:16 1999
- * Modified by: Dag Brattli <dagb@cs.uit.no>
- *
- * Copyright (C) 1998-1999, Aage Kvalnes <aage@cs.uit.no>
- * Copyright (c) 1998, Dag Brattli
- * All Rights Reserved.
- *
- * This code is taken from the Vortex Operating System written by Aage
- * Kvalnes and has been ported to Linux and Linux/IR by Dag Brattli
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * Neither Dag Brattli nor University of Tromsø admit liability nor
- * provide warranty for any of this software. This material is
- * provided "AS-IS" and at no charge.
- *
- ********************************************************************/
-
-#include <linux/types.h>
-#include <linux/spinlock.h>
-
-#ifndef IRDA_QUEUE_H
-#define IRDA_QUEUE_H
-
-#define NAME_SIZE 32
-
-/*
- * Hash types (some flags can be xored)
- * See comments in irqueue.c for which one to use...
- */
-#define HB_NOLOCK 0 /* No concurent access prevention */
-#define HB_LOCK 1 /* Prevent concurent write with global lock */
-
-/*
- * Hash defines
- */
-#define HASHBIN_SIZE 8
-#define HASHBIN_MASK 0x7
-
-#ifndef IRDA_ALIGN
-#define IRDA_ALIGN __attribute__((aligned))
-#endif
-
-#define Q_NULL { NULL, NULL, "", 0 }
-
-typedef void (*FREE_FUNC)(void *arg);
-
-struct irda_queue {
- struct irda_queue *q_next;
- struct irda_queue *q_prev;
-
- char q_name[NAME_SIZE];
- long q_hash; /* Must be able to cast a (void *) */
-};
-typedef struct irda_queue irda_queue_t;
-
-typedef struct hashbin_t {
- __u32 magic;
- int hb_type;
- int hb_size;
- spinlock_t hb_spinlock; /* HB_LOCK - Can be used by the user */
-
- irda_queue_t* hb_queue[HASHBIN_SIZE] IRDA_ALIGN;
-
- irda_queue_t* hb_current;
-} hashbin_t;
-
-hashbin_t *hashbin_new(int type);
-int hashbin_delete(hashbin_t* hashbin, FREE_FUNC func);
-int hashbin_clear(hashbin_t* hashbin, FREE_FUNC free_func);
-void hashbin_insert(hashbin_t* hashbin, irda_queue_t* entry, long hashv,
- const char* name);
-void* hashbin_remove(hashbin_t* hashbin, long hashv, const char* name);
-void* hashbin_remove_first(hashbin_t *hashbin);
-void* hashbin_remove_this( hashbin_t* hashbin, irda_queue_t* entry);
-void* hashbin_find(hashbin_t* hashbin, long hashv, const char* name);
-void* hashbin_lock_find(hashbin_t* hashbin, long hashv, const char* name);
-void* hashbin_find_next(hashbin_t* hashbin, long hashv, const char* name,
- void ** pnext);
-irda_queue_t *hashbin_get_first(hashbin_t *hashbin);
-irda_queue_t *hashbin_get_next(hashbin_t *hashbin);
-
-#define HASHBIN_GET_SIZE(hashbin) hashbin->hb_size
-
-#endif
diff --git a/include/net/irda/irttp.h b/include/net/irda/irttp.h
deleted file mode 100644
index 98682d4bae8f..000000000000
--- a/include/net/irda/irttp.h
+++ /dev/null
@@ -1,210 +0,0 @@
-/*********************************************************************
- *
- * Filename: irttp.h
- * Version: 1.0
- * Description: Tiny Transport Protocol (TTP) definitions
- * Status: Experimental.
- * Author: Dag Brattli <dagb@cs.uit.no>
- * Created at: Sun Aug 31 20:14:31 1997
- * Modified at: Sun Dec 12 13:09:07 1999
- * Modified by: Dag Brattli <dagb@cs.uit.no>
- *
- * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
- * All Rights Reserved.
- * Copyright (c) 2000-2002 Jean Tourrilhes <jt@hpl.hp.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * Neither Dag Brattli nor University of Tromsø admit liability nor
- * provide warranty for any of this software. This material is
- * provided "AS-IS" and at no charge.
- *
- ********************************************************************/
-
-#ifndef IRTTP_H
-#define IRTTP_H
-
-#include <linux/types.h>
-#include <linux/skbuff.h>
-#include <linux/spinlock.h>
-
-#include <net/irda/irda.h>
-#include <net/irda/irlmp.h> /* struct lsap_cb */
-#include <net/irda/qos.h> /* struct qos_info */
-#include <net/irda/irqueue.h>
-
-#define TTP_MAX_CONNECTIONS LM_MAX_CONNECTIONS
-#define TTP_HEADER 1
-#define TTP_MAX_HEADER (TTP_HEADER + LMP_MAX_HEADER)
-#define TTP_SAR_HEADER 5
-#define TTP_PARAMETERS 0x80
-#define TTP_MORE 0x80
-
-/* Transmission queue sizes */
-/* Worst case scenario, two window of data - Jean II */
-#define TTP_TX_MAX_QUEUE 14
-/* We need to keep at least 5 frames to make sure that we can refill
- * appropriately the LAP layer. LAP keeps only two buffers, and we need
- * to have 7 to make a full window - Jean II */
-#define TTP_TX_LOW_THRESHOLD 5
-/* Most clients are synchronous with respect to flow control, so we can
- * keep a low number of Tx buffers in TTP - Jean II */
-#define TTP_TX_HIGH_THRESHOLD 7
-
-/* Receive queue sizes */
-/* Minimum of credit that the peer should hold.
- * If the peer has less credits than 9 frames, we will explicitly send
- * him some credits (through irttp_give_credit() and a specific frame).
- * Note that when we give credits it's likely that it won't be sent in
- * this LAP window, but in the next one. So, we make sure that the peer
- * has something to send while waiting for credits (one LAP window == 7
- * + 1 frames while he process the credits). - Jean II */
-#define TTP_RX_MIN_CREDIT 8
-/* This is the default maximum number of credits held by the peer, so the
- * default maximum number of frames he can send us before needing flow
- * control answer from us (this may be negociated differently at TSAP setup).
- * We want to minimise the number of times we have to explicitly send some
- * credit to the peer, hoping we can piggyback it on the return data. In
- * particular, it doesn't make sense for us to send credit more than once
- * per LAP window.
- * Moreover, giving credits has some latency, so we need strictly more than
- * a LAP window, otherwise we may already have credits in our Tx queue.
- * But on the other hand, we don't want to keep too many Rx buffer here
- * before starting to flow control the other end, so make it exactly one
- * LAP window + 1 + MIN_CREDITS. - Jean II */
-#define TTP_RX_DEFAULT_CREDIT 16
-/* Maximum number of credits we can allow the peer to have, and therefore
- * maximum Rx queue size.
- * Note that we try to deliver packets to the higher layer every time we
- * receive something, so in normal mode the Rx queue will never contains
- * more than one or two packets. - Jean II */
-#define TTP_RX_MAX_CREDIT 21
-
-/* What clients should use when calling ttp_open_tsap() */
-#define DEFAULT_INITIAL_CREDIT TTP_RX_DEFAULT_CREDIT
-
-/* Some priorities for disconnect requests */
-#define P_NORMAL 0
-#define P_HIGH 1
-
-#define TTP_SAR_DISABLE 0
-#define TTP_SAR_UNBOUND 0xffffffff
-
-/* Parameters */
-#define TTP_MAX_SDU_SIZE 0x01
-
-/*
- * This structure contains all data associated with one instance of a TTP
- * connection.
- */
-struct tsap_cb {
- irda_queue_t q; /* Must be first */
- magic_t magic; /* Just in case */
-
- __u8 stsap_sel; /* Source TSAP */
- __u8 dtsap_sel; /* Destination TSAP */
-
- struct lsap_cb *lsap; /* Corresponding LSAP to this TSAP */
-
- __u8 connected; /* TSAP connected */
-
- __u8 initial_credit; /* Initial credit to give peer */
-
- int avail_credit; /* Available credit to return to peer */
- int remote_credit; /* Credit held by peer TTP entity */
- int send_credit; /* Credit held by local TTP entity */
-
- struct sk_buff_head tx_queue; /* Frames to be transmitted */
- struct sk_buff_head rx_queue; /* Received frames */
- struct sk_buff_head rx_fragments;
- int tx_queue_lock;
- int rx_queue_lock;
- spinlock_t lock;
-
- notify_t notify; /* Callbacks to client layer */
-
- struct net_device_stats stats;
- struct timer_list todo_timer;
-
- __u32 max_seg_size; /* Max data that fit into an IrLAP frame */
- __u8 max_header_size;
-
- int rx_sdu_busy; /* RxSdu.busy */
- __u32 rx_sdu_size; /* Current size of a partially received frame */
- __u32 rx_max_sdu_size; /* Max receive user data size */
-
- int tx_sdu_busy; /* TxSdu.busy */
- __u32 tx_max_sdu_size; /* Max transmit user data size */
-
- int close_pend; /* Close, but disconnect_pend */
- unsigned long disconnect_pend; /* Disconnect, but still data to send */
- struct sk_buff *disconnect_skb;
-};
-
-struct irttp_cb {
- magic_t magic;
- hashbin_t *tsaps;
-};
-
-int irttp_init(void);
-void irttp_cleanup(void);
-
-struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify);
-int irttp_close_tsap(struct tsap_cb *self);
-
-int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb);
-int irttp_udata_request(struct tsap_cb *self, struct sk_buff *skb);
-
-int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel,
- __u32 saddr, __u32 daddr,
- struct qos_info *qos, __u32 max_sdu_size,
- struct sk_buff *userdata);
-int irttp_connect_response(struct tsap_cb *self, __u32 max_sdu_size,
- struct sk_buff *userdata);
-int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *skb,
- int priority);
-void irttp_flow_request(struct tsap_cb *self, LOCAL_FLOW flow);
-struct tsap_cb *irttp_dup(struct tsap_cb *self, void *instance);
-
-static inline __u32 irttp_get_saddr(struct tsap_cb *self)
-{
- return irlmp_get_saddr(self->lsap);
-}
-
-static inline __u32 irttp_get_daddr(struct tsap_cb *self)
-{
- return irlmp_get_daddr(self->lsap);
-}
-
-static inline __u32 irttp_get_max_seg_size(struct tsap_cb *self)
-{
- return self->max_seg_size;
-}
-
-/* After doing a irttp_dup(), this get one of the two socket back into
- * a state where it's waiting incoming connections.
- * Note : this can be used *only* if the socket is not yet connected
- * (i.e. NO irttp_connect_response() done on this socket).
- * - Jean II */
-static inline void irttp_listen(struct tsap_cb *self)
-{
- irlmp_listen(self->lsap);
- self->dtsap_sel = LSAP_ANY;
-}
-
-/* Return TRUE if the node is in primary mode (i.e. master)
- * - Jean II */
-static inline int irttp_is_primary(struct tsap_cb *self)
-{
- if ((self == NULL) ||
- (self->lsap == NULL) ||
- (self->lsap->lap == NULL) ||
- (self->lsap->lap->irlap == NULL))
- return -2;
- return irlap_is_primary(self->lsap->lap->irlap);
-}
-
-#endif /* IRTTP_H */
diff --git a/include/net/irda/parameters.h b/include/net/irda/parameters.h
deleted file mode 100644
index 2d9cd0007cba..000000000000
--- a/include/net/irda/parameters.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/*********************************************************************
- *
- * Filename: parameters.h
- * Version: 1.0
- * Description: A more general way to handle (pi,pl,pv) parameters
- * Status: Experimental.
- * Author: Dag Brattli <dagb@cs.uit.no>
- * Created at: Mon Jun 7 08:47:28 1999
- * Modified at: Sun Jan 30 14:05:14 2000
- * Modified by: Dag Brattli <dagb@cs.uit.no>
- *
- * Copyright (c) 1999-2000 Dag Brattli, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * Michel Dänzer <daenzer@debian.org>, 10/2001
- * - simplify irda_pv_t to avoid endianness issues
- *
- ********************************************************************/
-
-#ifndef IRDA_PARAMS_H
-#define IRDA_PARAMS_H
-
-/*
- * The currently supported types. Beware not to change the sequence since
- * it a good reason why the sized integers has a value equal to their size
- */
-typedef enum {
- PV_INTEGER, /* Integer of any (pl) length */
- PV_INT_8_BITS, /* Integer of 8 bits in length */
- PV_INT_16_BITS, /* Integer of 16 bits in length */
- PV_STRING, /* \0 terminated string */
- PV_INT_32_BITS, /* Integer of 32 bits in length */
- PV_OCT_SEQ, /* Octet sequence */
- PV_NO_VALUE /* Does not contain any value (pl=0) */
-} PV_TYPE;
-
-/* Bit 7 of type field */
-#define PV_BIG_ENDIAN 0x80
-#define PV_LITTLE_ENDIAN 0x00
-#define PV_MASK 0x7f /* To mask away endian bit */
-
-#define PV_PUT 0
-#define PV_GET 1
-
-typedef union {
- char *c;
- __u32 i;
- __u32 *ip;
-} irda_pv_t;
-
-typedef struct {
- __u8 pi;
- __u8 pl;
- irda_pv_t pv;
-} irda_param_t;
-
-typedef int (*PI_HANDLER)(void *self, irda_param_t *param, int get);
-typedef int (*PV_HANDLER)(void *self, __u8 *buf, int len, __u8 pi,
- PV_TYPE type, PI_HANDLER func);
-
-typedef struct {
- const PI_HANDLER func; /* Handler for this parameter identifier */
- PV_TYPE type; /* Data type for this parameter */
-} pi_minor_info_t;
-
-typedef struct {
- const pi_minor_info_t *pi_minor_call_table;
- int len;
-} pi_major_info_t;
-
-typedef struct {
- const pi_major_info_t *tables;
- int len;
- __u8 pi_mask;
- int pi_major_offset;
-} pi_param_info_t;
-
-int irda_param_pack(__u8 *buf, char *fmt, ...);
-
-int irda_param_insert(void *self, __u8 pi, __u8 *buf, int len,
- pi_param_info_t *info);
-int irda_param_extract_all(void *self, __u8 *buf, int len,
- pi_param_info_t *info);
-
-#define irda_param_insert_byte(buf,pi,pv) irda_param_pack(buf,"bbb",pi,1,pv)
-
-#endif /* IRDA_PARAMS_H */
-
diff --git a/include/net/irda/qos.h b/include/net/irda/qos.h
deleted file mode 100644
index 05a5a249956f..000000000000
--- a/include/net/irda/qos.h
+++ /dev/null
@@ -1,101 +0,0 @@
-/*********************************************************************
- *
- * Filename: qos.h
- * Version: 1.0
- * Description: Quality of Service definitions
- * Status: Experimental.
- * Author: Dag Brattli <dagb@cs.uit.no>
- * Created at: Fri Sep 19 23:21:09 1997
- * Modified at: Thu Dec 2 13:51:54 1999
- * Modified by: Dag Brattli <dagb@cs.uit.no>
- *
- * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- *
- ********************************************************************/
-
-#ifndef IRDA_QOS_H
-#define IRDA_QOS_H
-
-#include <linux/skbuff.h>
-
-#include <net/irda/parameters.h>
-
-#define PI_BAUD_RATE 0x01
-#define PI_MAX_TURN_TIME 0x82
-#define PI_DATA_SIZE 0x83
-#define PI_WINDOW_SIZE 0x84
-#define PI_ADD_BOFS 0x85
-#define PI_MIN_TURN_TIME 0x86
-#define PI_LINK_DISC 0x08
-
-#define IR_115200_MAX 0x3f
-
-/* Baud rates (first byte) */
-#define IR_2400 0x01
-#define IR_9600 0x02
-#define IR_19200 0x04
-#define IR_38400 0x08
-#define IR_57600 0x10
-#define IR_115200 0x20
-#define IR_576000 0x40
-#define IR_1152000 0x80
-
-/* Baud rates (second byte) */
-#define IR_4000000 0x01
-#define IR_16000000 0x02
-
-/* Quality of Service information */
-typedef struct {
- __u32 value;
- __u16 bits; /* LSB is first byte, MSB is second byte */
-} qos_value_t;
-
-struct qos_info {
- magic_t magic;
-
- qos_value_t baud_rate; /* IR_11520O | ... */
- qos_value_t max_turn_time;
- qos_value_t data_size;
- qos_value_t window_size;
- qos_value_t additional_bofs;
- qos_value_t min_turn_time;
- qos_value_t link_disc_time;
-
- qos_value_t power;
-};
-
-extern int sysctl_max_baud_rate;
-extern int sysctl_max_inactive_time;
-
-void irda_init_max_qos_capabilies(struct qos_info *qos);
-void irda_qos_compute_intersection(struct qos_info *, struct qos_info *);
-
-__u32 irlap_max_line_capacity(__u32 speed, __u32 max_turn_time);
-
-void irda_qos_bits_to_value(struct qos_info *qos);
-
-/* So simple, how could we not inline those two ?
- * Note : one byte is 10 bits if you include start and stop bits
- * Jean II */
-#define irlap_min_turn_time_in_bytes(speed, min_turn_time) ( \
- speed * min_turn_time / 10000000 \
-)
-#define irlap_xbofs_in_usec(speed, xbofs) ( \
- xbofs * 10000000 / speed \
-)
-
-#endif
-
diff --git a/include/net/irda/timer.h b/include/net/irda/timer.h
deleted file mode 100644
index d784f242cf7b..000000000000
--- a/include/net/irda/timer.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/*********************************************************************
- *
- * Filename: timer.h
- * Version:
- * Description:
- * Status: Experimental.
- * Author: Dag Brattli <dagb@cs.uit.no>
- * Created at: Sat Aug 16 00:59:29 1997
- * Modified at: Thu Oct 7 12:25:24 1999
- * Modified by: Dag Brattli <dagb@cs.uit.no>
- *
- * Copyright (c) 1997, 1998-1999 Dag Brattli <dagb@cs.uit.no>,
- * All Rights Reserved.
- * Copyright (c) 2000-2002 Jean Tourrilhes <jt@hpl.hp.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * Neither Dag Brattli nor University of Tromsø admit liability nor
- * provide warranty for any of this software. This material is
- * provided "AS-IS" and at no charge.
- *
- ********************************************************************/
-
-#ifndef TIMER_H
-#define TIMER_H
-
-#include <linux/timer.h>
-#include <linux/jiffies.h>
-
-#include <asm/param.h> /* for HZ */
-
-#include <net/irda/irda.h>
-
-/* A few forward declarations (to make compiler happy) */
-struct irlmp_cb;
-struct irlap_cb;
-struct lsap_cb;
-struct lap_cb;
-
-/*
- * Timeout definitions, some defined in IrLAP 6.13.5 - p. 92
- */
-#define POLL_TIMEOUT (450*HZ/1000) /* Must never exceed 500 ms */
-#define FINAL_TIMEOUT (500*HZ/1000) /* Must never exceed 500 ms */
-
-/*
- * Normally twice of p-timer. Note 3, IrLAP 6.3.11.2 - p. 60 suggests
- * at least twice duration of the P-timer.
- */
-#define WD_TIMEOUT (POLL_TIMEOUT*2)
-
-#define MEDIABUSY_TIMEOUT (500*HZ/1000) /* 500 msec */
-#define SMALLBUSY_TIMEOUT (100*HZ/1000) /* 100 msec - IrLAP 6.13.4 */
-
-/*
- * Slot timer must never exceed 85 ms, and must always be at least 25 ms,
- * suggested to 75-85 msec by IrDA lite. This doesn't work with a lot of
- * devices, and other stackes uses a lot more, so it's best we do it as well
- * (Note : this is the default value and sysctl overrides it - Jean II)
- */
-#define SLOT_TIMEOUT (90*HZ/1000)
-
-/*
- * The latest discovery frame (XID) is longer due to the extra discovery
- * information (hints, device name...). This is its extra length.
- * We use that when setting the query timeout. Jean II
- */
-#define XIDEXTRA_TIMEOUT (34*HZ/1000) /* 34 msec */
-
-#define WATCHDOG_TIMEOUT (20*HZ) /* 20 sec */
-
-typedef void (*TIMER_CALLBACK)(void *);
-
-static inline void irda_start_timer(struct timer_list *ptimer, int timeout,
- void* data, TIMER_CALLBACK callback)
-{
- ptimer->function = (void (*)(unsigned long)) callback;
- ptimer->data = (unsigned long) data;
-
- /* Set new value for timer (update or add timer).
- * We use mod_timer() because it's more efficient and also
- * safer with respect to race conditions - Jean II */
- mod_timer(ptimer, jiffies + timeout);
-}
-
-
-void irlap_start_slot_timer(struct irlap_cb *self, int timeout);
-void irlap_start_query_timer(struct irlap_cb *self, int S, int s);
-void irlap_start_final_timer(struct irlap_cb *self, int timeout);
-void irlap_start_wd_timer(struct irlap_cb *self, int timeout);
-void irlap_start_backoff_timer(struct irlap_cb *self, int timeout);
-
-void irlap_start_mbusy_timer(struct irlap_cb *self, int timeout);
-void irlap_stop_mbusy_timer(struct irlap_cb *);
-
-void irlmp_start_watchdog_timer(struct lsap_cb *, int timeout);
-void irlmp_start_discovery_timer(struct irlmp_cb *, int timeout);
-void irlmp_start_idle_timer(struct lap_cb *, int timeout);
-void irlmp_stop_idle_timer(struct lap_cb *self);
-
-#endif
-
diff --git a/include/net/irda/wrapper.h b/include/net/irda/wrapper.h
deleted file mode 100644
index eef53ebe3d76..000000000000
--- a/include/net/irda/wrapper.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*********************************************************************
- *
- * Filename: wrapper.h
- * Version: 1.2
- * Description: IrDA SIR async wrapper layer
- * Status: Experimental.
- * Author: Dag Brattli <dagb@cs.uit.no>
- * Created at: Mon Aug 4 20:40:53 1997
- * Modified at: Tue Jan 11 12:37:29 2000
- * Modified by: Dag Brattli <dagb@cs.uit.no>
- *
- * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>,
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * Neither Dag Brattli nor University of Tromsø admit liability nor
- * provide warranty for any of this software. This material is
- * provided "AS-IS" and at no charge.
- *
- ********************************************************************/
-
-#ifndef WRAPPER_H
-#define WRAPPER_H
-
-#include <linux/types.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-
-#include <net/irda/irda_device.h> /* iobuff_t */
-
-#define BOF 0xc0 /* Beginning of frame */
-#define XBOF 0xff
-#define EOF 0xc1 /* End of frame */
-#define CE 0x7d /* Control escape */
-
-#define STA BOF /* Start flag */
-#define STO EOF /* End flag */
-
-#define IRDA_TRANS 0x20 /* Asynchronous transparency modifier */
-
-/* States for receiving a frame in async mode */
-enum {
- OUTSIDE_FRAME,
- BEGIN_FRAME,
- LINK_ESCAPE,
- INSIDE_FRAME
-};
-
-/* Proto definitions */
-int async_wrap_skb(struct sk_buff *skb, __u8 *tx_buff, int buffsize);
-void async_unwrap_char(struct net_device *dev, struct net_device_stats *stats,
- iobuff_t *buf, __u8 byte);
-
-#endif
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index b2b5419467cc..f8149ca192b4 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -5499,6 +5499,21 @@ static inline void ieee80211_stop_rx_ba_session_offl(struct ieee80211_vif *vif,
ieee80211_manage_rx_ba_offl(vif, addr, tid + IEEE80211_NUM_TIDS);
}
+/**
+ * ieee80211_rx_ba_timer_expired - stop a Rx BA session due to timeout
+ *
+ * Some device drivers do not offload AddBa/DelBa negotiation, but handle rx
+ * buffer reording internally, and therefore also handle the session timer.
+ *
+ * Trigger the timeout flow, which sends a DelBa.
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback
+ * @addr: station mac address
+ * @tid: the rx tid
+ */
+void ieee80211_rx_ba_timer_expired(struct ieee80211_vif *vif,
+ const u8 *addr, unsigned int tid);
+
/* Rate control API */
/**
diff --git a/include/net/ncsi.h b/include/net/ncsi.h
index 68680baac0fd..fdc60ff2511d 100644
--- a/include/net/ncsi.h
+++ b/include/net/ncsi.h
@@ -28,12 +28,24 @@ struct ncsi_dev {
};
#ifdef CONFIG_NET_NCSI
+int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid);
+int ncsi_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid);
struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
void (*notifier)(struct ncsi_dev *nd));
int ncsi_start_dev(struct ncsi_dev *nd);
void ncsi_stop_dev(struct ncsi_dev *nd);
void ncsi_unregister_dev(struct ncsi_dev *nd);
#else /* !CONFIG_NET_NCSI */
+static inline int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+ return -EINVAL;
+}
+
+static inline int ncsi_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+ return -EINVAL;
+}
+
static inline struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
void (*notifier)(struct ncsi_dev *nd))
{
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index afc39e3a3f7c..9816df225af3 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -156,7 +156,7 @@ struct neighbour {
struct rcu_head rcu;
struct net_device *dev;
u8 primary_key[0];
-};
+} __randomize_layout;
struct neigh_ops {
int family;
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 31a2b51bef2c..57faa375eab9 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -88,6 +88,7 @@ struct net {
/* core fib_rules */
struct list_head rules_ops;
+ struct list_head fib_notifier_ops; /* protected by net_mutex */
struct net_device *loopback_dev; /* The loopback */
struct netns_core core;
@@ -148,7 +149,7 @@ struct net {
#endif
struct sock *diag_nlsk;
atomic_t fnhe_genid;
-};
+} __randomize_layout;
#include <linux/seq_file_net.h>
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 48407569585d..fdc9c64a1c94 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -44,12 +44,6 @@ union nf_conntrack_expect_proto {
#include <linux/types.h>
#include <linux/skbuff.h>
-#ifdef CONFIG_NETFILTER_DEBUG
-#define NF_CT_ASSERT(x) WARN_ON(!(x))
-#else
-#define NF_CT_ASSERT(x)
-#endif
-
#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
@@ -159,7 +153,7 @@ nf_ct_get(const struct sk_buff *skb, enum ip_conntrack_info *ctinfo)
/* decrement reference count on a conntrack */
static inline void nf_ct_put(struct nf_conn *ct)
{
- NF_CT_ASSERT(ct);
+ WARN_ON(!ct);
nf_conntrack_put(&ct->ct_general);
}
@@ -224,6 +218,9 @@ extern s32 (*nf_ct_nat_offset)(const struct nf_conn *ct,
enum ip_conntrack_dir dir,
u32 seq);
+/* Set all unconfirmed conntrack as dying */
+void nf_ct_unconfirmed_destroy(struct net *);
+
/* Iterate over all conntracks: if iter returns true, it's deleted. */
void nf_ct_iterate_cleanup_net(struct net *net,
int (*iter)(struct nf_conn *i, void *data),
diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h
index 2ba54feaccd8..818def011110 100644
--- a/include/net/netfilter/nf_conntrack_expect.h
+++ b/include/net/netfilter/nf_conntrack_expect.h
@@ -107,6 +107,11 @@ void nf_ct_remove_expectations(struct nf_conn *ct);
void nf_ct_unexpect_related(struct nf_conntrack_expect *exp);
bool nf_ct_remove_expect(struct nf_conntrack_expect *exp);
+void nf_ct_expect_iterate_destroy(bool (*iter)(struct nf_conntrack_expect *e, void *data), void *data);
+void nf_ct_expect_iterate_net(struct net *net,
+ bool (*iter)(struct nf_conntrack_expect *e, void *data),
+ void *data, u32 portid, int report);
+
/* Allocate space for an expectation: this is mandatory before calling
nf_ct_expect_related. You will have to call put afterwards. */
struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me);
diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h
index 6d14b36e3a49..6269deecbee7 100644
--- a/include/net/netfilter/nf_conntrack_l3proto.h
+++ b/include/net/netfilter/nf_conntrack_l3proto.h
@@ -20,8 +20,8 @@ struct nf_conntrack_l3proto {
/* L3 Protocol Family number. ex) PF_INET */
u_int16_t l3proto;
- /* Protocol name */
- const char *name;
+ /* size of tuple nlattr, fills a hole */
+ u16 nla_size;
/*
* Try to fill in the third arg: nhoff is offset of l3 proto
@@ -37,10 +37,6 @@ struct nf_conntrack_l3proto {
bool (*invert_tuple)(struct nf_conntrack_tuple *inverse,
const struct nf_conntrack_tuple *orig);
- /* Print out the per-protocol part of the tuple. */
- void (*print_tuple)(struct seq_file *s,
- const struct nf_conntrack_tuple *);
-
/*
* Called before tracking.
* *dataoff: offset of protocol header (TCP, UDP,...) in skb
@@ -49,23 +45,17 @@ struct nf_conntrack_l3proto {
int (*get_l4proto)(const struct sk_buff *skb, unsigned int nhoff,
unsigned int *dataoff, u_int8_t *protonum);
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
int (*tuple_to_nlattr)(struct sk_buff *skb,
const struct nf_conntrack_tuple *t);
-
- /* Called when netns wants to use connection tracking */
- int (*net_ns_get)(struct net *);
- void (*net_ns_put)(struct net *);
-
- /*
- * Calculate size of tuple nlattr
- */
- int (*nlattr_tuple_size)(void);
-
int (*nlattr_to_tuple)(struct nlattr *tb[],
struct nf_conntrack_tuple *t);
const struct nla_policy *nla_policy;
+#endif
- size_t nla_size;
+ /* Called when netns wants to use connection tracking */
+ int (*net_ns_get)(struct net *);
+ void (*net_ns_put)(struct net *);
/* Module (if any) which this is connected to. */
struct module *me;
@@ -73,26 +63,11 @@ struct nf_conntrack_l3proto {
extern struct nf_conntrack_l3proto __rcu *nf_ct_l3protos[NFPROTO_NUMPROTO];
-#ifdef CONFIG_SYSCTL
-/* Protocol pernet registration. */
-int nf_ct_l3proto_pernet_register(struct net *net,
- struct nf_conntrack_l3proto *proto);
-#else
-static inline int nf_ct_l3proto_pernet_register(struct net *n,
- struct nf_conntrack_l3proto *p)
-{
- return 0;
-}
-#endif
-
-void nf_ct_l3proto_pernet_unregister(struct net *net,
- struct nf_conntrack_l3proto *proto);
-
/* Protocol global registration. */
-int nf_ct_l3proto_register(struct nf_conntrack_l3proto *proto);
-void nf_ct_l3proto_unregister(struct nf_conntrack_l3proto *proto);
+int nf_ct_l3proto_register(const struct nf_conntrack_l3proto *proto);
+void nf_ct_l3proto_unregister(const struct nf_conntrack_l3proto *proto);
-struct nf_conntrack_l3proto *nf_ct_l3proto_find_get(u_int16_t l3proto);
+const struct nf_conntrack_l3proto *nf_ct_l3proto_find_get(u_int16_t l3proto);
/* Existing built-in protocols */
extern struct nf_conntrack_l3proto nf_conntrack_l3proto_generic;
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index 7032e044bbe2..738a0307a96b 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -43,7 +43,6 @@ struct nf_conntrack_l4proto {
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
u_int8_t pf,
- unsigned int hooknum,
unsigned int *timeouts);
/* Called when a new connection for this protocol found;
@@ -61,13 +60,6 @@ struct nf_conntrack_l4proto {
/* called by gc worker if table is full */
bool (*can_early_drop)(const struct nf_conn *ct);
- /* Print out the per-protocol part of the tuple. Return like seq_* */
- void (*print_tuple)(struct seq_file *s,
- const struct nf_conntrack_tuple *);
-
- /* Print out the private part of the conntrack. */
- void (*print_conntrack)(struct seq_file *s, struct nf_conn *);
-
/* Return the array of timeouts for this protocol. */
unsigned int *(*get_timeouts)(struct net *net);
@@ -92,15 +84,19 @@ struct nf_conntrack_l4proto {
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
struct {
- size_t obj_size;
int (*nlattr_to_obj)(struct nlattr *tb[],
struct net *net, void *data);
int (*obj_to_nlattr)(struct sk_buff *skb, const void *data);
- unsigned int nlattr_max;
+ u16 obj_size;
+ u16 nlattr_max;
const struct nla_policy *nla_policy;
} ctnl_timeout;
#endif
+#ifdef CONFIG_NF_CONNTRACK_PROCFS
+ /* Print out the private part of the conntrack. */
+ void (*print_conntrack)(struct seq_file *s, struct nf_conn *);
+#endif
unsigned int *net_id;
/* Init l4proto pernet data */
int (*init_net)(struct net *net, u_int16_t proto);
@@ -108,9 +104,6 @@ struct nf_conntrack_l4proto {
/* Return the per-net protocol part. */
struct nf_proto_net *(*get_net_proto)(struct net *net);
- /* Protocol name */
- const char *name;
-
/* Module (if any) which this is connected to. */
struct module *me;
};
@@ -120,28 +113,28 @@ extern struct nf_conntrack_l4proto nf_conntrack_l4proto_generic;
#define MAX_NF_CT_PROTO 256
-struct nf_conntrack_l4proto *__nf_ct_l4proto_find(u_int16_t l3proto,
+const struct nf_conntrack_l4proto *__nf_ct_l4proto_find(u_int16_t l3proto,
u_int8_t l4proto);
-struct nf_conntrack_l4proto *nf_ct_l4proto_find_get(u_int16_t l3proto,
+const struct nf_conntrack_l4proto *nf_ct_l4proto_find_get(u_int16_t l3proto,
u_int8_t l4proto);
-void nf_ct_l4proto_put(struct nf_conntrack_l4proto *p);
+void nf_ct_l4proto_put(const struct nf_conntrack_l4proto *p);
/* Protocol pernet registration. */
int nf_ct_l4proto_pernet_register_one(struct net *net,
- struct nf_conntrack_l4proto *proto);
+ const struct nf_conntrack_l4proto *proto);
void nf_ct_l4proto_pernet_unregister_one(struct net *net,
- struct nf_conntrack_l4proto *proto);
+ const struct nf_conntrack_l4proto *proto);
int nf_ct_l4proto_pernet_register(struct net *net,
- struct nf_conntrack_l4proto *proto[],
+ struct nf_conntrack_l4proto *const proto[],
unsigned int num_proto);
void nf_ct_l4proto_pernet_unregister(struct net *net,
- struct nf_conntrack_l4proto *proto[],
- unsigned int num_proto);
+ struct nf_conntrack_l4proto *const proto[],
+ unsigned int num_proto);
/* Protocol global registration. */
int nf_ct_l4proto_register_one(struct nf_conntrack_l4proto *proto);
-void nf_ct_l4proto_unregister_one(struct nf_conntrack_l4proto *proto);
+void nf_ct_l4proto_unregister_one(const struct nf_conntrack_l4proto *proto);
int nf_ct_l4proto_register(struct nf_conntrack_l4proto *proto[],
unsigned int num_proto);
void nf_ct_l4proto_unregister(struct nf_conntrack_l4proto *proto[],
diff --git a/include/net/netfilter/nf_conntrack_timeout.h b/include/net/netfilter/nf_conntrack_timeout.h
index d40b89355fdd..483d104fa254 100644
--- a/include/net/netfilter/nf_conntrack_timeout.h
+++ b/include/net/netfilter/nf_conntrack_timeout.h
@@ -16,7 +16,7 @@ struct ctnl_timeout {
refcount_t refcnt;
char name[CTNL_TIMEOUT_NAME_MAX];
__u16 l3num;
- struct nf_conntrack_l4proto *l4proto;
+ const struct nf_conntrack_l4proto *l4proto;
char data[0];
};
@@ -68,7 +68,7 @@ struct nf_conn_timeout *nf_ct_timeout_ext_add(struct nf_conn *ct,
static inline unsigned int *
nf_ct_timeout_lookup(struct net *net, struct nf_conn *ct,
- struct nf_conntrack_l4proto *l4proto)
+ const struct nf_conntrack_l4proto *l4proto)
{
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
struct nf_conn_timeout *timeout_ext;
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
index 4454719ff849..39468720fc19 100644
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -10,9 +10,9 @@ struct nf_queue_entry {
struct list_head list;
struct sk_buff *skb;
unsigned int id;
+ unsigned int hook_index; /* index in hook_entries->hook[] */
struct nf_hook_state state;
- struct nf_hook_entry *hook;
u16 size; /* sizeof(entry) + saved route keys */
/* extra space to store route keys */
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index bd5be0d691d5..0f5b12a4ad09 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -396,7 +396,7 @@ void nft_unregister_set(struct nft_set_type *type);
struct nft_set {
struct list_head list;
struct list_head bindings;
- char name[NFT_SET_MAXNAMELEN];
+ char *name;
u32 ktype;
u32 dtype;
u32 objtype;
@@ -859,7 +859,7 @@ struct nft_chain {
u16 level;
u8 flags:6,
genmask:2;
- char name[NFT_CHAIN_MAXNAMELEN];
+ char *name;
};
enum nft_chain_type {
@@ -957,7 +957,7 @@ struct nft_table {
u32 use;
u16 flags:14,
genmask:2;
- char name[NFT_TABLE_MAXNAMELEN];
+ char *name;
};
enum nft_af_flags {
@@ -1007,21 +1007,21 @@ int nft_verdict_dump(struct sk_buff *skb, int type,
*
* @list: table stateful object list node
* @table: table this object belongs to
- * @type: pointer to object type
- * @data: pointer to object data
* @name: name of this stateful object
* @genmask: generation mask
* @use: number of references to this stateful object
* @data: object data, layout depends on type
+ * @ops: object operations
+ * @data: pointer to object data
*/
struct nft_object {
struct list_head list;
- char name[NFT_OBJ_MAXNAMELEN];
+ char *name;
struct nft_table *table;
u32 genmask:2,
use:30;
/* runtime data below here */
- const struct nft_object_type *type ____cacheline_aligned;
+ const struct nft_object_ops *ops ____cacheline_aligned;
unsigned char data[]
__attribute__((aligned(__alignof__(u64))));
};
@@ -1044,27 +1044,39 @@ void nft_obj_notify(struct net *net, struct nft_table *table,
/**
* struct nft_object_type - stateful object type
*
- * @eval: stateful object evaluation function
+ * @select_ops: function to select nft_object_ops
+ * @ops: default ops, used when no select_ops functions is present
* @list: list node in list of object types
* @type: stateful object numeric type
- * @size: stateful object size
* @owner: module owner
* @maxattr: maximum netlink attribute
* @policy: netlink attribute policy
+ */
+struct nft_object_type {
+ const struct nft_object_ops *(*select_ops)(const struct nft_ctx *,
+ const struct nlattr * const tb[]);
+ const struct nft_object_ops *ops;
+ struct list_head list;
+ u32 type;
+ unsigned int maxattr;
+ struct module *owner;
+ const struct nla_policy *policy;
+};
+
+/**
+ * struct nft_object_ops - stateful object operations
+ *
+ * @eval: stateful object evaluation function
+ * @size: stateful object size
* @init: initialize object from netlink attributes
* @destroy: release existing stateful object
* @dump: netlink dump stateful object
*/
-struct nft_object_type {
+struct nft_object_ops {
void (*eval)(struct nft_object *obj,
struct nft_regs *regs,
const struct nft_pktinfo *pkt);
- struct list_head list;
- u32 type;
unsigned int size;
- unsigned int maxattr;
- struct module *owner;
- const struct nla_policy *policy;
int (*init)(const struct nft_ctx *ctx,
const struct nlattr *const tb[],
struct nft_object *obj);
@@ -1072,6 +1084,7 @@ struct nft_object_type {
int (*dump)(struct sk_buff *skb,
struct nft_object *obj,
bool reset);
+ const struct nft_object_type *type;
};
int nft_register_obj(struct nft_object_type *obj_type);
@@ -1272,7 +1285,7 @@ struct nft_trans_set {
struct nft_trans_chain {
bool update;
- char name[NFT_CHAIN_MAXNAMELEN];
+ char *name;
struct nft_stats __percpu *stats;
u8 policy;
};
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
index 8f690effec37..424684c33771 100644
--- a/include/net/netfilter/nf_tables_core.h
+++ b/include/net/netfilter/nf_tables_core.h
@@ -49,6 +49,8 @@ struct nft_payload_set {
};
extern const struct nft_expr_ops nft_payload_fast_ops;
+
+extern struct static_key_false nft_counters_enabled;
extern struct static_key_false nft_trace_enabled;
#endif /* _NET_NF_TABLES_CORE_H */
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 01709172b3d3..e51cf5f81597 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -98,8 +98,8 @@
* nla_put_u8(skb, type, value) add u8 attribute to skb
* nla_put_u16(skb, type, value) add u16 attribute to skb
* nla_put_u32(skb, type, value) add u32 attribute to skb
- * nla_put_u64_64bits(skb, type,
- * value, padattr) add u64 attribute to skb
+ * nla_put_u64_64bit(skb, type,
+ * value, padattr) add u64 attribute to skb
* nla_put_s8(skb, type, value) add s8 attribute to skb
* nla_put_s16(skb, type, value) add s16 attribute to skb
* nla_put_s32(skb, type, value) add s32 attribute to skb
@@ -178,6 +178,7 @@ enum {
NLA_S16,
NLA_S32,
NLA_S64,
+ NLA_BITFIELD32,
__NLA_TYPE_MAX,
};
@@ -206,6 +207,7 @@ enum {
* NLA_MSECS Leaving the length field zero will verify the
* given type fits, using it verifies minimum length
* just like "All other"
+ * NLA_BITFIELD32 A 32-bit bitmap/bitselector attribute
* All other Minimum length of attribute payload
*
* Example:
@@ -213,11 +215,13 @@ enum {
* [ATTR_FOO] = { .type = NLA_U16 },
* [ATTR_BAR] = { .type = NLA_STRING, .len = BARSIZ },
* [ATTR_BAZ] = { .len = sizeof(struct mystruct) },
+ * [ATTR_GOO] = { .type = NLA_BITFIELD32, .validation_data = &myvalidflags },
* };
*/
struct nla_policy {
u16 type;
u16 len;
+ void *validation_data;
};
/**
@@ -247,6 +251,7 @@ int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
int nla_policy_len(const struct nla_policy *, int);
struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype);
size_t nla_strlcpy(char *dst, const struct nlattr *nla, size_t dstsize);
+char *nla_strdup(const struct nlattr *nla, gfp_t flags);
int nla_memcpy(void *dest, const struct nlattr *src, int count);
int nla_memcmp(const struct nlattr *nla, const void *data, size_t size);
int nla_strcmp(const struct nlattr *nla, const char *str);
@@ -1203,6 +1208,18 @@ static inline struct in6_addr nla_get_in6_addr(const struct nlattr *nla)
}
/**
+ * nla_get_bitfield32 - return payload of 32 bitfield attribute
+ * @nla: nla_bitfield32 attribute
+ */
+static inline struct nla_bitfield32 nla_get_bitfield32(const struct nlattr *nla)
+{
+ struct nla_bitfield32 tmp;
+
+ nla_memcpy(&tmp, nla, sizeof(tmp));
+ return tmp;
+}
+
+/**
* nla_memdup - duplicate attribute memory (kmemdup)
* @src: netlink attribute to duplicate from
* @gfp: GFP mask
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 9a14a0850b0e..20d061c805e3 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -159,6 +159,7 @@ struct netns_ipv4 {
int sysctl_fib_multipath_hash_policy;
#endif
+ struct fib_notifier_ops *notifier_ops;
unsigned int fib_seq; /* protected by rtnl_mutex */
atomic_t rt_genid;
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index de7745e2edcc..2544f9760a42 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -36,6 +36,7 @@ struct netns_sysctl_ipv6 {
int idgen_retries;
int idgen_delay;
int flowlabel_state_ranges;
+ int flowlabel_reflect;
};
struct netns_ipv6 {
@@ -65,6 +66,7 @@ struct netns_ipv6 {
unsigned int ip6_rt_gc_expire;
unsigned long ip6_rt_last_gc;
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
+ bool fib6_has_custom_rules;
struct rt6_info *ip6_prohibit_entry;
struct rt6_info *ip6_blk_hole_entry;
struct fib6_table *fib6_local_tbl;
@@ -86,6 +88,7 @@ struct netns_ipv6 {
atomic_t dev_addr_genid;
atomic_t fib6_sernum;
struct seg6_pernet_data *seg6_data;
+ struct fib_notifier_ops *notifier_ops;
};
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h
index cea396b53a60..72d66c8763d0 100644
--- a/include/net/netns/netfilter.h
+++ b/include/net/netns/netfilter.h
@@ -16,7 +16,7 @@ struct netns_nf {
#ifdef CONFIG_SYSCTL
struct ctl_table_header *nf_log_dir_header;
#endif
- struct nf_hook_entry __rcu *hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
+ struct nf_hook_entries __rcu *hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
bool defrag_ipv4;
#endif
diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
index 27bb9633c69d..611521646dd4 100644
--- a/include/net/netns/xfrm.h
+++ b/include/net/netns/xfrm.h
@@ -6,7 +6,6 @@
#include <linux/workqueue.h>
#include <linux/xfrm.h>
#include <net/dst_ops.h>
-#include <net/flowcache.h>
struct ctl_table_header;
@@ -73,16 +72,6 @@ struct netns_xfrm {
spinlock_t xfrm_state_lock;
spinlock_t xfrm_policy_lock;
struct mutex xfrm_cfg_mutex;
-
- /* flow cache part */
- struct flow_cache flow_cache_global;
- atomic_t flow_cache_genid;
- struct list_head flow_cache_gc_list;
- atomic_t flow_cache_gc_count;
- spinlock_t flow_cache_gc_lock;
- struct work_struct flow_cache_gc_work;
- struct work_struct flow_cache_flush_work;
- struct mutex flow_flush_sem;
};
#endif
diff --git a/include/net/nsh.h b/include/net/nsh.h
new file mode 100644
index 000000000000..a1eaea20be96
--- /dev/null
+++ b/include/net/nsh.h
@@ -0,0 +1,307 @@
+#ifndef __NET_NSH_H
+#define __NET_NSH_H 1
+
+#include <linux/skbuff.h>
+
+/*
+ * Network Service Header:
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |Ver|O|U| TTL | Length |U|U|U|U|MD Type| Next Protocol |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Service Path Identifier (SPI) | Service Index |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | |
+ * ~ Mandatory/Optional Context Headers ~
+ * | |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Version: The version field is used to ensure backward compatibility
+ * going forward with future NSH specification updates. It MUST be set
+ * to 0x0 by the sender, in this first revision of NSH. Given the
+ * widespread implementation of existing hardware that uses the first
+ * nibble after an MPLS label stack for ECMP decision processing, this
+ * document reserves version 01b and this value MUST NOT be used in
+ * future versions of the protocol. Please see [RFC7325] for further
+ * discussion of MPLS-related forwarding requirements.
+ *
+ * O bit: Setting this bit indicates an Operations, Administration, and
+ * Maintenance (OAM) packet. The actual format and processing of SFC
+ * OAM packets is outside the scope of this specification (see for
+ * example [I-D.ietf-sfc-oam-framework] for one approach).
+ *
+ * The O bit MUST be set for OAM packets and MUST NOT be set for non-OAM
+ * packets. The O bit MUST NOT be modified along the SFP.
+ *
+ * SF/SFF/SFC Proxy/Classifier implementations that do not support SFC
+ * OAM procedures SHOULD discard packets with O bit set, but MAY support
+ * a configurable parameter to enable forwarding received SFC OAM
+ * packets unmodified to the next element in the chain. Forwarding OAM
+ * packets unmodified by SFC elements that do not support SFC OAM
+ * procedures may be acceptable for a subset of OAM functions, but can
+ * result in unexpected outcomes for others, thus it is recommended to
+ * analyze the impact of forwarding an OAM packet for all OAM functions
+ * prior to enabling this behavior. The configurable parameter MUST be
+ * disabled by default.
+ *
+ * TTL: Indicates the maximum SFF hops for an SFP. This field is used
+ * for service plane loop detection. The initial TTL value SHOULD be
+ * configurable via the control plane; the configured initial value can
+ * be specific to one or more SFPs. If no initial value is explicitly
+ * provided, the default initial TTL value of 63 MUST be used. Each SFF
+ * involved in forwarding an NSH packet MUST decrement the TTL value by
+ * 1 prior to NSH forwarding lookup. Decrementing by 1 from an incoming
+ * value of 0 shall result in a TTL value of 63. The packet MUST NOT be
+ * forwarded if TTL is, after decrement, 0.
+ *
+ * All other flag fields, marked U, are unassigned and available for
+ * future use, see Section 11.2.1. Unassigned bits MUST be set to zero
+ * upon origination, and MUST be ignored and preserved unmodified by
+ * other NSH supporting elements. Elements which do not understand the
+ * meaning of any of these bits MUST NOT modify their actions based on
+ * those unknown bits.
+ *
+ * Length: The total length, in 4-byte words, of NSH including the Base
+ * Header, the Service Path Header, the Fixed Length Context Header or
+ * Variable Length Context Header(s). The length MUST be 0x6 for MD
+ * Type equal to 0x1, and MUST be 0x2 or greater for MD Type equal to
+ * 0x2. The length of the NSH header MUST be an integer multiple of 4
+ * bytes, thus variable length metadata is always padded out to a
+ * multiple of 4 bytes.
+ *
+ * MD Type: Indicates the format of NSH beyond the mandatory Base Header
+ * and the Service Path Header. MD Type defines the format of the
+ * metadata being carried.
+ *
+ * 0x0 - This is a reserved value. Implementations SHOULD silently
+ * discard packets with MD Type 0x0.
+ *
+ * 0x1 - This indicates that the format of the header includes a fixed
+ * length Context Header (see Figure 4 below).
+ *
+ * 0x2 - This does not mandate any headers beyond the Base Header and
+ * Service Path Header, but may contain optional variable length Context
+ * Header(s). The semantics of the variable length Context Header(s)
+ * are not defined in this document. The format of the optional
+ * variable length Context Headers is provided in Section 2.5.1.
+ *
+ * 0xF - This value is reserved for experimentation and testing, as per
+ * [RFC3692]. Implementations not explicitly configured to be part of
+ * an experiment SHOULD silently discard packets with MD Type 0xF.
+ *
+ * Next Protocol: indicates the protocol type of the encapsulated data.
+ * NSH does not alter the inner payload, and the semantics on the inner
+ * protocol remain unchanged due to NSH service function chaining.
+ * Please see the IANA Considerations section below, Section 11.2.5.
+ *
+ * This document defines the following Next Protocol values:
+ *
+ * 0x1: IPv4
+ * 0x2: IPv6
+ * 0x3: Ethernet
+ * 0x4: NSH
+ * 0x5: MPLS
+ * 0xFE: Experiment 1
+ * 0xFF: Experiment 2
+ *
+ * Packets with Next Protocol values not supported SHOULD be silently
+ * dropped by default, although an implementation MAY provide a
+ * configuration parameter to forward them. Additionally, an
+ * implementation not explicitly configured for a specific experiment
+ * [RFC3692] SHOULD silently drop packets with Next Protocol values 0xFE
+ * and 0xFF.
+ *
+ * Service Path Identifier (SPI): Identifies a service path.
+ * Participating nodes MUST use this identifier for Service Function
+ * Path selection. The initial classifier MUST set the appropriate SPI
+ * for a given classification result.
+ *
+ * Service Index (SI): Provides location within the SFP. The initial
+ * classifier for a given SFP SHOULD set the SI to 255, however the
+ * control plane MAY configure the initial value of SI as appropriate
+ * (i.e., taking into account the length of the service function path).
+ * The Service Index MUST be decremented by a value of 1 by Service
+ * Functions or by SFC Proxy nodes after performing required services
+ * and the new decremented SI value MUST be used in the egress packet's
+ * NSH. The initial Classifier MUST send the packet to the first SFF in
+ * the identified SFP for forwarding along an SFP. If re-classification
+ * occurs, and that re-classification results in a new SPI, the
+ * (re)classifier is, in effect, the initial classifier for the
+ * resultant SPI.
+ *
+ * The SI is used in conjunction the with Service Path Identifier for
+ * Service Function Path Selection and for determining the next SFF/SF
+ * in the path. The SI is also valuable when troubleshooting or
+ * reporting service paths. Additionally, while the TTL field is the
+ * main mechanism for service plane loop detection, the SI can also be
+ * used for detecting service plane loops.
+ *
+ * When the Base Header specifies MD Type = 0x1, a Fixed Length Context
+ * Header (16-bytes) MUST be present immediately following the Service
+ * Path Header. The value of a Fixed Length Context
+ * Header that carries no metadata MUST be set to zero.
+ *
+ * When the base header specifies MD Type = 0x2, zero or more Variable
+ * Length Context Headers MAY be added, immediately following the
+ * Service Path Header (see Figure 5). Therefore, Length = 0x2,
+ * indicates that only the Base Header followed by the Service Path
+ * Header are present. The optional Variable Length Context Headers
+ * MUST be of an integer number of 4-bytes. The base header Length
+ * field MUST be used to determine the offset to locate the original
+ * packet or frame for SFC nodes that require access to that
+ * information.
+ *
+ * The format of the optional variable length Context Headers
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Metadata Class | Type |U| Length |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Variable Metadata |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Metadata Class (MD Class): Defines the scope of the 'Type' field to
+ * provide a hierarchical namespace. The IANA Considerations
+ * Section 11.2.4 defines how the MD Class values can be allocated to
+ * standards bodies, vendors, and others.
+ *
+ * Type: Indicates the explicit type of metadata being carried. The
+ * definition of the Type is the responsibility of the MD Class owner.
+ *
+ * Unassigned bit: One unassigned bit is available for future use. This
+ * bit MUST NOT be set, and MUST be ignored on receipt.
+ *
+ * Length: Indicates the length of the variable metadata, in bytes. In
+ * case the metadata length is not an integer number of 4-byte words,
+ * the sender MUST add pad bytes immediately following the last metadata
+ * byte to extend the metadata to an integer number of 4-byte words.
+ * The receiver MUST round up the length field to the nearest 4-byte
+ * word boundary, to locate and process the next field in the packet.
+ * The receiver MUST access only those bytes in the metadata indicated
+ * by the length field (i.e., actual number of bytes) and MUST ignore
+ * the remaining bytes up to the nearest 4-byte word boundary. The
+ * Length may be 0 or greater.
+ *
+ * A value of 0 denotes a Context Header without a Variable Metadata
+ * field.
+ *
+ * [0] https://datatracker.ietf.org/doc/draft-ietf-sfc-nsh/
+ */
+
+/**
+ * struct nsh_md1_ctx - Keeps track of NSH context data
+ * @nshc<1-4>: NSH Contexts.
+ */
+struct nsh_md1_ctx {
+ __be32 context[4];
+};
+
+struct nsh_md2_tlv {
+ __be16 md_class;
+ u8 type;
+ u8 length;
+ u8 md_value[];
+};
+
+struct nshhdr {
+ __be16 ver_flags_ttl_len;
+ u8 mdtype;
+ u8 np;
+ __be32 path_hdr;
+ union {
+ struct nsh_md1_ctx md1;
+ struct nsh_md2_tlv md2;
+ };
+};
+
+/* Masking NSH header fields. */
+#define NSH_VER_MASK 0xc000
+#define NSH_VER_SHIFT 14
+#define NSH_FLAGS_MASK 0x3000
+#define NSH_FLAGS_SHIFT 12
+#define NSH_TTL_MASK 0x0fc0
+#define NSH_TTL_SHIFT 6
+#define NSH_LEN_MASK 0x003f
+#define NSH_LEN_SHIFT 0
+
+#define NSH_MDTYPE_MASK 0x0f
+#define NSH_MDTYPE_SHIFT 0
+
+#define NSH_SPI_MASK 0xffffff00
+#define NSH_SPI_SHIFT 8
+#define NSH_SI_MASK 0x000000ff
+#define NSH_SI_SHIFT 0
+
+/* MD Type Registry. */
+#define NSH_M_TYPE1 0x01
+#define NSH_M_TYPE2 0x02
+#define NSH_M_EXP1 0xFE
+#define NSH_M_EXP2 0xFF
+
+/* NSH Base Header Length */
+#define NSH_BASE_HDR_LEN 8
+
+/* NSH MD Type 1 header Length. */
+#define NSH_M_TYPE1_LEN 24
+
+/* NSH header maximum Length. */
+#define NSH_HDR_MAX_LEN 256
+
+/* NSH context headers maximum Length. */
+#define NSH_CTX_HDRS_MAX_LEN 248
+
+static inline struct nshhdr *nsh_hdr(struct sk_buff *skb)
+{
+ return (struct nshhdr *)skb_network_header(skb);
+}
+
+static inline u16 nsh_hdr_len(const struct nshhdr *nsh)
+{
+ return ((ntohs(nsh->ver_flags_ttl_len) & NSH_LEN_MASK)
+ >> NSH_LEN_SHIFT) << 2;
+}
+
+static inline u8 nsh_get_ver(const struct nshhdr *nsh)
+{
+ return (ntohs(nsh->ver_flags_ttl_len) & NSH_VER_MASK)
+ >> NSH_VER_SHIFT;
+}
+
+static inline u8 nsh_get_flags(const struct nshhdr *nsh)
+{
+ return (ntohs(nsh->ver_flags_ttl_len) & NSH_FLAGS_MASK)
+ >> NSH_FLAGS_SHIFT;
+}
+
+static inline u8 nsh_get_ttl(const struct nshhdr *nsh)
+{
+ return (ntohs(nsh->ver_flags_ttl_len) & NSH_TTL_MASK)
+ >> NSH_TTL_SHIFT;
+}
+
+static inline void __nsh_set_xflag(struct nshhdr *nsh, u16 xflag, u16 xmask)
+{
+ nsh->ver_flags_ttl_len
+ = (nsh->ver_flags_ttl_len & ~htons(xmask)) | htons(xflag);
+}
+
+static inline void nsh_set_flags_and_ttl(struct nshhdr *nsh, u8 flags, u8 ttl)
+{
+ __nsh_set_xflag(nsh, ((flags << NSH_FLAGS_SHIFT) & NSH_FLAGS_MASK) |
+ ((ttl << NSH_TTL_SHIFT) & NSH_TTL_MASK),
+ NSH_FLAGS_MASK | NSH_TTL_MASK);
+}
+
+static inline void nsh_set_flags_ttl_len(struct nshhdr *nsh, u8 flags,
+ u8 ttl, u8 len)
+{
+ len = len >> 2;
+ __nsh_set_xflag(nsh, ((flags << NSH_FLAGS_SHIFT) & NSH_FLAGS_MASK) |
+ ((ttl << NSH_TTL_SHIFT) & NSH_TTL_MASK) |
+ ((len << NSH_LEN_SHIFT) & NSH_LEN_MASK),
+ NSH_FLAGS_MASK | NSH_TTL_MASK | NSH_LEN_MASK);
+}
+
+#endif /* __NET_NSH_H */
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 537d0a0ad4c4..e80edd8879ef 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -11,7 +11,7 @@ struct tcf_walker {
int stop;
int skip;
int count;
- int (*fn)(struct tcf_proto *, unsigned long node, struct tcf_walker *);
+ int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
};
int register_tcf_proto_ops(struct tcf_proto_ops *ops);
@@ -113,36 +113,6 @@ static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police)
return 0;
}
-/**
- * tcf_exts_is_predicative - check if a predicative extension is present
- * @exts: tc filter extensions handle
- *
- * Returns 1 if a predicative extension is present, i.e. an extension which
- * might cause further actions and thus overrule the regular tcf_result.
- */
-static inline int
-tcf_exts_is_predicative(struct tcf_exts *exts)
-{
-#ifdef CONFIG_NET_CLS_ACT
- return exts->nr_actions;
-#else
- return 0;
-#endif
-}
-
-/**
- * tcf_exts_is_available - check if at least one extension is present
- * @exts: tc filter extensions handle
- *
- * Returns 1 if at least one extension is present.
- */
-static inline int
-tcf_exts_is_available(struct tcf_exts *exts)
-{
- /* All non-predicative extensions must be added here. */
- return tcf_exts_is_predicative(exts);
-}
-
static inline void tcf_exts_to_list(const struct tcf_exts *exts,
struct list_head *actions)
{
@@ -177,46 +147,61 @@ tcf_exts_stats_update(const struct tcf_exts *exts,
}
/**
+ * tcf_exts_has_actions - check if at least one action is present
+ * @exts: tc filter extensions handle
+ *
+ * Returns true if at least one action is present.
+ */
+static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ return exts->nr_actions;
+#else
+ return false;
+#endif
+}
+
+/**
+ * tcf_exts_has_one_action - check if exactly one action is present
+ * @exts: tc filter extensions handle
+ *
+ * Returns true if exactly one action is present.
+ */
+static inline bool tcf_exts_has_one_action(struct tcf_exts *exts)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ return exts->nr_actions == 1;
+#else
+ return false;
+#endif
+}
+
+/**
* tcf_exts_exec - execute tc filter extensions
* @skb: socket buffer
* @exts: tc filter extensions handle
* @res: desired result
*
- * Executes all configured extensions. Returns 0 on a normal execution,
+ * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
* a negative number if the filter must be considered unmatched or
* a positive action code (TC_ACT_*) which must be returned to the
* underlying layer.
*/
static inline int
tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
- struct tcf_result *res)
+ struct tcf_result *res)
{
#ifdef CONFIG_NET_CLS_ACT
- if (exts->nr_actions)
- return tcf_action_exec(skb, exts->actions, exts->nr_actions,
- res);
+ return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
#endif
- return 0;
+ return TC_ACT_OK;
}
-#ifdef CONFIG_NET_CLS_ACT
-
-#define tc_no_actions(_exts) ((_exts)->nr_actions == 0)
-#define tc_single_action(_exts) ((_exts)->nr_actions == 1)
-
-#else /* CONFIG_NET_CLS_ACT */
-
-#define tc_no_actions(_exts) true
-#define tc_single_action(_exts) false
-
-#endif /* CONFIG_NET_CLS_ACT */
-
int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
struct nlattr **tb, struct nlattr *rate_tlv,
struct tcf_exts *exts, bool ovr);
void tcf_exts_destroy(struct tcf_exts *exts);
-void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
- struct tcf_exts *src);
+void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
int tcf_exts_get_dev(struct net_device *dev, struct tcf_exts *exts,
@@ -333,26 +318,6 @@ int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
struct tcf_pkt_info *);
/**
- * tcf_em_tree_change - replace ematch tree of a running classifier
- *
- * @tp: classifier kind handle
- * @dst: destination ematch tree variable
- * @src: source ematch tree (temporary tree from tcf_em_tree_validate)
- *
- * This functions replaces the ematch tree in @dst with the ematch
- * tree in @src. The classifier in charge of the ematch tree may be
- * running.
- */
-static inline void tcf_em_tree_change(struct tcf_proto *tp,
- struct tcf_ematch_tree *dst,
- struct tcf_ematch_tree *src)
-{
- tcf_tree_lock(tp);
- memcpy(dst, src, sizeof(*dst));
- tcf_tree_unlock(tp);
-}
-
-/**
* tcf_em_tree_match - evaulate an ematch tree
*
* @skb: socket buffer of the packet in question
@@ -386,7 +351,6 @@ struct tcf_ematch_tree {
#define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
#define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
#define tcf_em_tree_dump(skb, t, tlv) (0)
-#define tcf_em_tree_change(tp, dst, src) do { } while(0)
#define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
#endif /* CONFIG_NET_EMATCH */
@@ -441,6 +405,23 @@ tcf_match_indev(struct sk_buff *skb, int ifindex)
}
#endif /* CONFIG_NET_CLS_IND */
+struct tc_cls_common_offload {
+ u32 chain_index;
+ __be16 protocol;
+ u32 prio;
+ u32 classid;
+};
+
+static inline void
+tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common,
+ const struct tcf_proto *tp)
+{
+ cls_common->chain_index = tp->chain->index;
+ cls_common->protocol = tp->protocol;
+ cls_common->prio = tp->prio;
+ cls_common->classid = tp->classid;
+}
+
struct tc_cls_u32_knode {
struct tcf_exts *exts;
struct tc_u32_sel *sel;
@@ -467,6 +448,7 @@ enum tc_clsu32_command {
};
struct tc_cls_u32_offload {
+ struct tc_cls_common_offload common;
/* knode values */
enum tc_clsu32_command command;
union {
@@ -475,19 +457,12 @@ struct tc_cls_u32_offload {
};
};
-static inline bool tc_can_offload(const struct net_device *dev,
- const struct tcf_proto *tp)
+static inline bool tc_can_offload(const struct net_device *dev)
{
- const struct Qdisc *sch = tp->q;
- const struct Qdisc_class_ops *cops = sch->ops->cl_ops;
-
if (!(dev->features & NETIF_F_HW_TC))
return false;
if (!dev->netdev_ops->ndo_setup_tc)
return false;
- if (cops && cops->tcf_cl_offload)
- return cops->tcf_cl_offload(tp->classid);
-
return true;
}
@@ -496,12 +471,11 @@ static inline bool tc_skip_hw(u32 flags)
return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
}
-static inline bool tc_should_offload(const struct net_device *dev,
- const struct tcf_proto *tp, u32 flags)
+static inline bool tc_should_offload(const struct net_device *dev, u32 flags)
{
if (tc_skip_hw(flags))
return false;
- return tc_can_offload(dev, tp);
+ return tc_can_offload(dev);
}
static inline bool tc_skip_sw(u32 flags)
@@ -533,13 +507,14 @@ enum tc_fl_command {
};
struct tc_cls_flower_offload {
+ struct tc_cls_common_offload common;
enum tc_fl_command command;
- u32 prio;
unsigned long cookie;
struct flow_dissector *dissector;
struct fl_flow_key *mask;
struct fl_flow_key *key;
struct tcf_exts *exts;
+ bool egress_dev;
};
enum tc_matchall_command {
@@ -548,6 +523,7 @@ enum tc_matchall_command {
};
struct tc_cls_matchall_offload {
+ struct tc_cls_common_offload common;
enum tc_matchall_command command;
struct tcf_exts *exts;
unsigned long cookie;
@@ -561,6 +537,7 @@ enum tc_clsbpf_command {
};
struct tc_cls_bpf_offload {
+ struct tc_cls_common_offload common;
enum tc_clsbpf_command command;
struct tcf_exts *exts;
struct bpf_prog *prog;
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 2579c209ea51..259bc191ba59 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -5,6 +5,7 @@
#include <linux/ktime.h>
#include <linux/if_vlan.h>
#include <net/sch_generic.h>
+#include <uapi/linux/pkt_sched.h>
#define DEFAULT_TX_QUEUE_LEN 1000
@@ -132,4 +133,17 @@ static inline unsigned int psched_mtu(const struct net_device *dev)
return dev->mtu + dev->hard_header_len;
}
+static inline bool is_classid_clsact_ingress(u32 classid)
+{
+ /* This also returns true for ingress qdisc */
+ return TC_H_MAJ(classid) == TC_H_MAJ(TC_H_CLSACT) &&
+ TC_H_MIN(classid) != TC_H_MIN(TC_H_MIN_EGRESS);
+}
+
+static inline bool is_classid_clsact_egress(u32 classid)
+{
+ return TC_H_MAJ(classid) == TC_H_MAJ(TC_H_CLSACT) &&
+ TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_EGRESS);
+}
+
#endif
diff --git a/include/net/raw.h b/include/net/raw.h
index 57c33dd22ec4..99d26d0c4a19 100644
--- a/include/net/raw.h
+++ b/include/net/raw.h
@@ -26,7 +26,7 @@ extern struct proto raw_prot;
extern struct raw_hashinfo raw_v4_hashinfo;
struct sock *__raw_v4_lookup(struct net *net, struct sock *sk,
unsigned short num, __be32 raddr,
- __be32 laddr, int dif);
+ __be32 laddr, int dif, int sdif);
int raw_abort(struct sock *sk, int err);
void raw_icmp_error(struct sk_buff *, int, u32);
diff --git a/include/net/rawv6.h b/include/net/rawv6.h
index cbe4e9de1894..4addc5c988e0 100644
--- a/include/net/rawv6.h
+++ b/include/net/rawv6.h
@@ -6,7 +6,7 @@
extern struct raw_hashinfo raw_v6_hashinfo;
struct sock *__raw_v6_lookup(struct net *net, struct sock *sk,
unsigned short num, const struct in6_addr *loc_addr,
- const struct in6_addr *rmt_addr, int dif);
+ const struct in6_addr *rmt_addr, int dif, int sdif);
int raw_abort(struct sock *sk, int err);
diff --git a/include/net/route.h b/include/net/route.h
index cb0a76d9dde1..1b09a9368c68 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -189,10 +189,11 @@ static inline int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src,
rcu_read_lock();
err = ip_route_input_noref(skb, dst, src, tos, devin);
- if (!err)
+ if (!err) {
skb_dst_force_safe(skb);
- if (!skb_dst(skb))
- err = -EINVAL;
+ if (!skb_dst(skb))
+ err = -EINVAL;
+ }
rcu_read_unlock();
return err;
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index abe6b733d473..21837ca68ecc 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -7,12 +7,15 @@
typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *,
struct netlink_ext_ack *);
typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *);
-typedef u16 (*rtnl_calcit_func)(struct sk_buff *, struct nlmsghdr *);
+
+enum rtnl_link_flags {
+ RTNL_FLAG_DOIT_UNLOCKED = 1,
+};
int __rtnl_register(int protocol, int msgtype,
- rtnl_doit_func, rtnl_dumpit_func, rtnl_calcit_func);
+ rtnl_doit_func, rtnl_dumpit_func, unsigned int flags);
void rtnl_register(int protocol, int msgtype,
- rtnl_doit_func, rtnl_dumpit_func, rtnl_calcit_func);
+ rtnl_doit_func, rtnl_dumpit_func, unsigned int flags);
int rtnl_unregister(int protocol, int msgtype);
void rtnl_unregister_all(int protocol);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 1c123e2b2415..135f5a2dd931 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -75,7 +75,6 @@ struct Qdisc {
struct hlist_node hash;
u32 handle;
u32 parent;
- void *u32_node;
struct netdev_queue *dev_queue;
@@ -101,6 +100,13 @@ struct Qdisc {
spinlock_t busylock ____cacheline_aligned_in_smp;
};
+static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
+{
+ if (qdisc->flags & TCQ_F_BUILTIN)
+ return;
+ refcount_inc(&qdisc->refcnt);
+}
+
static inline bool qdisc_is_running(const struct Qdisc *qdisc)
{
return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
@@ -147,8 +153,7 @@ struct Qdisc_class_ops {
void (*qlen_notify)(struct Qdisc *, unsigned long);
/* Class manipulation routines */
- unsigned long (*get)(struct Qdisc *, u32 classid);
- void (*put)(struct Qdisc *, unsigned long);
+ unsigned long (*find)(struct Qdisc *, u32 classid);
int (*change)(struct Qdisc *, u32, u32,
struct nlattr **, unsigned long *);
int (*delete)(struct Qdisc *, unsigned long);
@@ -156,7 +161,6 @@ struct Qdisc_class_ops {
/* Filter manipulation */
struct tcf_block * (*tcf_block)(struct Qdisc *, unsigned long);
- bool (*tcf_cl_offload)(u32 classid);
unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
u32 classid);
void (*unbind_tcf)(struct Qdisc *, unsigned long);
@@ -213,16 +217,17 @@ struct tcf_proto_ops {
int (*init)(struct tcf_proto*);
void (*destroy)(struct tcf_proto*);
- unsigned long (*get)(struct tcf_proto*, u32 handle);
+ void* (*get)(struct tcf_proto*, u32 handle);
int (*change)(struct net *net, struct sk_buff *,
struct tcf_proto*, unsigned long,
u32 handle, struct nlattr **,
- unsigned long *, bool);
- int (*delete)(struct tcf_proto*, unsigned long, bool*);
+ void **, bool);
+ int (*delete)(struct tcf_proto*, void *, bool*);
void (*walk)(struct tcf_proto*, struct tcf_walker *arg);
+ void (*bind_class)(void *, u32, unsigned long);
/* rtnetlink specific */
- int (*dump)(struct net*, struct tcf_proto*, unsigned long,
+ int (*dump)(struct net*, struct tcf_proto*, void *,
struct sk_buff *skb, struct tcmsg*);
struct module *owner;
@@ -394,6 +399,9 @@ qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
struct Qdisc_class_common *cl;
unsigned int h;
+ if (!id)
+ return NULL;
+
h = qdisc_class_hash(id, hash->hashmask);
hlist_for_each_entry(cl, &hash->hash[h], hnode) {
if (cl->classid == id)
@@ -806,8 +814,11 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
old = *pold;
*pold = new;
if (old != NULL) {
- qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog);
+ unsigned int qlen = old->q.qlen;
+ unsigned int backlog = old->qstats.backlog;
+
qdisc_reset(old);
+ qdisc_tree_reduce_backlog(old, qlen, backlog);
}
sch_tree_unlock(sch);
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
index d4679e7a5ed5..b55c6a48a206 100644
--- a/include/net/sctp/command.h
+++ b/include/net/sctp/command.h
@@ -40,7 +40,7 @@
#include <net/sctp/structs.h>
-typedef enum {
+enum sctp_verb {
SCTP_CMD_NOP = 0, /* Do nothing. */
SCTP_CMD_NEW_ASOC, /* Register a new association. */
SCTP_CMD_DELETE_TCB, /* Delete the current association. */
@@ -108,16 +108,16 @@ typedef enum {
SCTP_CMD_PURGE_ASCONF_QUEUE, /* Purge all asconf queues.*/
SCTP_CMD_SET_ASOC, /* Restore association context */
SCTP_CMD_LAST
-} sctp_verb_t;
+};
-/* How many commands can you put in an sctp_cmd_seq_t?
+/* How many commands can you put in an struct sctp_cmd_seq?
* This is a rather arbitrary number, ideally derived from a careful
* analysis of the state functions, but in reality just taken from
* thin air in the hopes othat we don't trigger a kernel panic.
*/
#define SCTP_MAX_NUM_COMMANDS 20
-typedef union {
+union sctp_arg {
void *zero_all; /* Set to NULL to clear the entire union */
__s32 i32;
__u32 u32;
@@ -126,8 +126,8 @@ typedef union {
__u8 u8;
int error;
__be16 err;
- sctp_state_t state;
- sctp_event_timeout_t to;
+ enum sctp_state state;
+ enum sctp_event_timeout to;
struct sctp_chunk *chunk;
struct sctp_association *asoc;
struct sctp_transport *transport;
@@ -135,26 +135,26 @@ typedef union {
struct sctp_init_chunk *init;
struct sctp_ulpevent *ulpevent;
struct sctp_packet *packet;
- sctp_sackhdr_t *sackh;
+ struct sctp_sackhdr *sackh;
struct sctp_datamsg *msg;
-} sctp_arg_t;
+};
/* We are simulating ML type constructors here.
*
* SCTP_ARG_CONSTRUCTOR(NAME, TYPE, ELT) builds a function called
* SCTP_NAME() which takes an argument of type TYPE and returns an
- * sctp_arg_t. It does this by inserting the sole argument into the
- * ELT union element of a local sctp_arg_t.
+ * union sctp_arg. It does this by inserting the sole argument into
+ * the ELT union element of a local union sctp_arg.
*
* E.g., SCTP_ARG_CONSTRUCTOR(I32, __s32, i32) builds SCTP_I32(arg),
- * which takes an __s32 and returns a sctp_arg_t containing the
+ * which takes an __s32 and returns a union sctp_arg containing the
* __s32. So, after foo = SCTP_I32(arg), foo.i32 == arg.
*/
#define SCTP_ARG_CONSTRUCTOR(name, type, elt) \
-static inline sctp_arg_t \
+static inline union sctp_arg \
SCTP_## name (type arg) \
-{ sctp_arg_t retval;\
+{ union sctp_arg retval;\
retval.zero_all = NULL;\
retval.elt = arg;\
return retval;\
@@ -167,8 +167,8 @@ SCTP_ARG_CONSTRUCTOR(U16, __u16, u16)
SCTP_ARG_CONSTRUCTOR(U8, __u8, u8)
SCTP_ARG_CONSTRUCTOR(ERROR, int, error)
SCTP_ARG_CONSTRUCTOR(PERR, __be16, err) /* protocol error */
-SCTP_ARG_CONSTRUCTOR(STATE, sctp_state_t, state)
-SCTP_ARG_CONSTRUCTOR(TO, sctp_event_timeout_t, to)
+SCTP_ARG_CONSTRUCTOR(STATE, enum sctp_state, state)
+SCTP_ARG_CONSTRUCTOR(TO, enum sctp_event_timeout, to)
SCTP_ARG_CONSTRUCTOR(CHUNK, struct sctp_chunk *, chunk)
SCTP_ARG_CONSTRUCTOR(ASOC, struct sctp_association *, asoc)
SCTP_ARG_CONSTRUCTOR(TRANSPORT, struct sctp_transport *, transport)
@@ -176,42 +176,42 @@ SCTP_ARG_CONSTRUCTOR(BA, struct sctp_bind_addr *, bp)
SCTP_ARG_CONSTRUCTOR(PEER_INIT, struct sctp_init_chunk *, init)
SCTP_ARG_CONSTRUCTOR(ULPEVENT, struct sctp_ulpevent *, ulpevent)
SCTP_ARG_CONSTRUCTOR(PACKET, struct sctp_packet *, packet)
-SCTP_ARG_CONSTRUCTOR(SACKH, sctp_sackhdr_t *, sackh)
+SCTP_ARG_CONSTRUCTOR(SACKH, struct sctp_sackhdr *, sackh)
SCTP_ARG_CONSTRUCTOR(DATAMSG, struct sctp_datamsg *, msg)
-static inline sctp_arg_t SCTP_FORCE(void)
+static inline union sctp_arg SCTP_FORCE(void)
{
return SCTP_I32(1);
}
-static inline sctp_arg_t SCTP_NOFORCE(void)
+static inline union sctp_arg SCTP_NOFORCE(void)
{
return SCTP_I32(0);
}
-static inline sctp_arg_t SCTP_NULL(void)
+static inline union sctp_arg SCTP_NULL(void)
{
- sctp_arg_t retval;
+ union sctp_arg retval;
retval.zero_all = NULL;
return retval;
}
-typedef struct {
- sctp_arg_t obj;
- sctp_verb_t verb;
-} sctp_cmd_t;
+struct sctp_cmd {
+ union sctp_arg obj;
+ enum sctp_verb verb;
+};
-typedef struct {
- sctp_cmd_t cmds[SCTP_MAX_NUM_COMMANDS];
- sctp_cmd_t *last_used_slot;
- sctp_cmd_t *next_cmd;
-} sctp_cmd_seq_t;
+struct sctp_cmd_seq {
+ struct sctp_cmd cmds[SCTP_MAX_NUM_COMMANDS];
+ struct sctp_cmd *last_used_slot;
+ struct sctp_cmd *next_cmd;
+};
/* Initialize a block of memory as a command sequence.
* Return 0 if the initialization fails.
*/
-static inline int sctp_init_cmd_seq(sctp_cmd_seq_t *seq)
+static inline int sctp_init_cmd_seq(struct sctp_cmd_seq *seq)
{
/* cmds[] is filled backwards to simplify the overflow BUG() check */
seq->last_used_slot = seq->cmds + SCTP_MAX_NUM_COMMANDS;
@@ -220,15 +220,15 @@ static inline int sctp_init_cmd_seq(sctp_cmd_seq_t *seq)
}
-/* Add a command to an sctp_cmd_seq_t.
+/* Add a command to an struct sctp_cmd_seq.
*
* Use the SCTP_* constructors defined by SCTP_ARG_CONSTRUCTOR() above
* to wrap data which goes in the obj argument.
*/
-static inline void sctp_add_cmd_sf(sctp_cmd_seq_t *seq, sctp_verb_t verb,
- sctp_arg_t obj)
+static inline void sctp_add_cmd_sf(struct sctp_cmd_seq *seq,
+ enum sctp_verb verb, union sctp_arg obj)
{
- sctp_cmd_t *cmd = seq->last_used_slot - 1;
+ struct sctp_cmd *cmd = seq->last_used_slot - 1;
BUG_ON(cmd < seq->cmds);
@@ -240,7 +240,7 @@ static inline void sctp_add_cmd_sf(sctp_cmd_seq_t *seq, sctp_verb_t verb,
/* Return the next command structure in an sctp_cmd_seq.
* Return NULL at the end of the sequence.
*/
-static inline sctp_cmd_t *sctp_next_cmd(sctp_cmd_seq_t *seq)
+static inline struct sctp_cmd *sctp_next_cmd(struct sctp_cmd_seq *seq)
{
if (seq->next_cmd <= seq->last_used_slot)
return NULL;
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
index 9b18044c551e..deaafa9b09cb 100644
--- a/include/net/sctp/constants.h
+++ b/include/net/sctp/constants.h
@@ -42,7 +42,7 @@
#include <linux/sctp.h>
#include <linux/ipv6.h> /* For ipv6hdr. */
-#include <net/tcp_states.h> /* For TCP states used in sctp_sock_state_t */
+#include <net/tcp_states.h> /* For TCP states used in enum sctp_sock_state */
/* Value used for stream negotiation. */
enum { SCTP_MAX_STREAM = 0xffff };
@@ -71,20 +71,18 @@ enum { SCTP_DEFAULT_INSTREAMS = SCTP_MAX_STREAM };
SCTP_NUM_AUTH_CHUNK_TYPES)
/* These are the different flavours of event. */
-typedef enum {
-
+enum sctp_event {
SCTP_EVENT_T_CHUNK = 1,
SCTP_EVENT_T_TIMEOUT,
SCTP_EVENT_T_OTHER,
SCTP_EVENT_T_PRIMITIVE
-
-} sctp_event_t;
+};
/* As a convenience for the state machine, we append SCTP_EVENT_* and
* SCTP_ULP_* to the list of possible chunks.
*/
-typedef enum {
+enum sctp_event_timeout {
SCTP_EVENT_TIMEOUT_NONE = 0,
SCTP_EVENT_TIMEOUT_T1_COOKIE,
SCTP_EVENT_TIMEOUT_T1_INIT,
@@ -96,21 +94,21 @@ typedef enum {
SCTP_EVENT_TIMEOUT_RECONF,
SCTP_EVENT_TIMEOUT_SACK,
SCTP_EVENT_TIMEOUT_AUTOCLOSE,
-} sctp_event_timeout_t;
+};
#define SCTP_EVENT_TIMEOUT_MAX SCTP_EVENT_TIMEOUT_AUTOCLOSE
#define SCTP_NUM_TIMEOUT_TYPES (SCTP_EVENT_TIMEOUT_MAX + 1)
-typedef enum {
+enum sctp_event_other {
SCTP_EVENT_NO_PENDING_TSN = 0,
SCTP_EVENT_ICMP_PROTO_UNREACH,
-} sctp_event_other_t;
+};
#define SCTP_EVENT_OTHER_MAX SCTP_EVENT_ICMP_PROTO_UNREACH
#define SCTP_NUM_OTHER_TYPES (SCTP_EVENT_OTHER_MAX + 1)
/* These are primitive requests from the ULP. */
-typedef enum {
+enum sctp_event_primitive {
SCTP_PRIMITIVE_ASSOCIATE = 0,
SCTP_PRIMITIVE_SHUTDOWN,
SCTP_PRIMITIVE_ABORT,
@@ -118,7 +116,7 @@ typedef enum {
SCTP_PRIMITIVE_REQUESTHEARTBEAT,
SCTP_PRIMITIVE_ASCONF,
SCTP_PRIMITIVE_RECONF,
-} sctp_event_primitive_t;
+};
#define SCTP_EVENT_PRIMITIVE_MAX SCTP_PRIMITIVE_RECONF
#define SCTP_NUM_PRIMITIVE_TYPES (SCTP_EVENT_PRIMITIVE_MAX + 1)
@@ -126,25 +124,25 @@ typedef enum {
/* We define here a utility type for manipulating subtypes.
* The subtype constructors all work like this:
*
- * sctp_subtype_t foo = SCTP_ST_CHUNK(SCTP_CID_INIT);
+ * union sctp_subtype foo = SCTP_ST_CHUNK(SCTP_CID_INIT);
*/
-typedef union {
+union sctp_subtype {
enum sctp_cid chunk;
- sctp_event_timeout_t timeout;
- sctp_event_other_t other;
- sctp_event_primitive_t primitive;
-} sctp_subtype_t;
+ enum sctp_event_timeout timeout;
+ enum sctp_event_other other;
+ enum sctp_event_primitive primitive;
+};
#define SCTP_SUBTYPE_CONSTRUCTOR(_name, _type, _elt) \
-static inline sctp_subtype_t \
+static inline union sctp_subtype \
SCTP_ST_## _name (_type _arg) \
-{ sctp_subtype_t _retval; _retval._elt = _arg; return _retval; }
+{ union sctp_subtype _retval; _retval._elt = _arg; return _retval; }
SCTP_SUBTYPE_CONSTRUCTOR(CHUNK, enum sctp_cid, chunk)
-SCTP_SUBTYPE_CONSTRUCTOR(TIMEOUT, sctp_event_timeout_t, timeout)
-SCTP_SUBTYPE_CONSTRUCTOR(OTHER, sctp_event_other_t, other)
-SCTP_SUBTYPE_CONSTRUCTOR(PRIMITIVE, sctp_event_primitive_t, primitive)
+SCTP_SUBTYPE_CONSTRUCTOR(TIMEOUT, enum sctp_event_timeout, timeout)
+SCTP_SUBTYPE_CONSTRUCTOR(OTHER, enum sctp_event_other, other)
+SCTP_SUBTYPE_CONSTRUCTOR(PRIMITIVE, enum sctp_event_primitive, primitive)
#define sctp_chunk_is_data(a) (a->chunk_hdr->type == SCTP_CID_DATA)
@@ -155,8 +153,7 @@ SCTP_SUBTYPE_CONSTRUCTOR(PRIMITIVE, sctp_event_primitive_t, primitive)
- sizeof(struct sctp_data_chunk)))
/* Internal error codes */
-typedef enum {
-
+enum sctp_ierror {
SCTP_IERROR_NO_ERROR = 0,
SCTP_IERROR_BASE = 1000,
SCTP_IERROR_NO_COOKIE,
@@ -177,12 +174,12 @@ typedef enum {
SCTP_IERROR_PROTO_VIOLATION,
SCTP_IERROR_ERROR,
SCTP_IERROR_ABORT,
-} sctp_ierror_t;
+};
/* SCTP state defines for internal state machine */
-typedef enum {
+enum sctp_state {
SCTP_STATE_CLOSED = 0,
SCTP_STATE_COOKIE_WAIT = 1,
@@ -193,7 +190,7 @@ typedef enum {
SCTP_STATE_SHUTDOWN_RECEIVED = 6,
SCTP_STATE_SHUTDOWN_ACK_SENT = 7,
-} sctp_state_t;
+};
#define SCTP_STATE_MAX SCTP_STATE_SHUTDOWN_ACK_SENT
#define SCTP_STATE_NUM_STATES (SCTP_STATE_MAX + 1)
@@ -214,19 +211,19 @@ typedef enum {
* - A socket in SCTP_SS_ESTABLISHED state indicates that it has a single
* association.
*/
-typedef enum {
+enum sctp_sock_state {
SCTP_SS_CLOSED = TCP_CLOSE,
SCTP_SS_LISTENING = TCP_LISTEN,
SCTP_SS_ESTABLISHING = TCP_SYN_SENT,
SCTP_SS_ESTABLISHED = TCP_ESTABLISHED,
SCTP_SS_CLOSING = TCP_CLOSE_WAIT,
-} sctp_sock_state_t;
+};
/* These functions map various type to printable names. */
-const char *sctp_cname(const sctp_subtype_t); /* chunk types */
-const char *sctp_oname(const sctp_subtype_t); /* other events */
-const char *sctp_tname(const sctp_subtype_t); /* timeouts */
-const char *sctp_pname(const sctp_subtype_t); /* primitives */
+const char *sctp_cname(const union sctp_subtype id); /* chunk types */
+const char *sctp_oname(const union sctp_subtype id); /* other events */
+const char *sctp_tname(const union sctp_subtype id); /* timeouts */
+const char *sctp_pname(const union sctp_subtype id); /* primitives */
/* This is a table of printable names of sctp_state_t's. */
extern const char *const sctp_state_tbl[];
@@ -312,19 +309,19 @@ enum { SCTP_MAX_GABS = 16 };
/* These return values describe the success or failure of a number of
* routines which form the lower interface to SCTP_outqueue.
*/
-typedef enum {
+enum sctp_xmit {
SCTP_XMIT_OK,
SCTP_XMIT_PMTU_FULL,
SCTP_XMIT_RWND_FULL,
SCTP_XMIT_DELAY,
-} sctp_xmit_t;
+};
/* These are the commands for manipulating transports. */
-typedef enum {
+enum sctp_transport_cmd {
SCTP_TRANSPORT_UP,
SCTP_TRANSPORT_DOWN,
SCTP_TRANSPORT_PF,
-} sctp_transport_cmd_t;
+};
/* These are the address scopes defined mainly for IPv4 addresses
* based on draft of SCTP IPv4 scoping <draft-stewart-tsvwg-sctp-ipv4-00.txt>.
@@ -333,20 +330,22 @@ typedef enum {
* At this point, the IPv6 scopes will be mapped to these internal scopes
* as much as possible.
*/
-typedef enum {
+enum sctp_scope {
SCTP_SCOPE_GLOBAL, /* IPv4 global addresses */
SCTP_SCOPE_PRIVATE, /* IPv4 private addresses */
SCTP_SCOPE_LINK, /* IPv4 link local address */
SCTP_SCOPE_LOOPBACK, /* IPv4 loopback address */
SCTP_SCOPE_UNUSABLE, /* IPv4 unusable addresses */
-} sctp_scope_t;
+};
-typedef enum {
+enum {
SCTP_SCOPE_POLICY_DISABLE, /* Disable IPv4 address scoping */
SCTP_SCOPE_POLICY_ENABLE, /* Enable IPv4 address scoping */
SCTP_SCOPE_POLICY_PRIVATE, /* Follow draft but allow IPv4 private addresses */
SCTP_SCOPE_POLICY_LINK, /* Follow draft but allow IPv4 link local addresses */
-} sctp_scope_policy_t;
+};
+
+#define SCTP_SCOPE_POLICY_MAX SCTP_SCOPE_POLICY_LINK
/* Based on IPv4 scoping <draft-stewart-tsvwg-sctp-ipv4-00.txt>,
* SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 198.18.0.0/24,
@@ -370,20 +369,20 @@ typedef enum {
peer */
/* Reasons to retransmit. */
-typedef enum {
+enum sctp_retransmit_reason {
SCTP_RTXR_T3_RTX,
SCTP_RTXR_FAST_RTX,
SCTP_RTXR_PMTUD,
SCTP_RTXR_T1_RTX,
-} sctp_retransmit_reason_t;
+};
/* Reasons to lower cwnd. */
-typedef enum {
+enum sctp_lower_cwnd {
SCTP_LOWER_CWND_T3_RTX,
SCTP_LOWER_CWND_FAST_RTX,
SCTP_LOWER_CWND_ECNE,
SCTP_LOWER_CWND_INACTIVE,
-} sctp_lower_cwnd_t;
+};
/* SCTP-AUTH Necessary constants */
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index a9519a06a23b..06b4f515e157 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -94,8 +94,8 @@
/*
* sctp/protocol.c
*/
-int sctp_copy_local_addr_list(struct net *, struct sctp_bind_addr *,
- sctp_scope_t, gfp_t gfp, int flags);
+int sctp_copy_local_addr_list(struct net *net, struct sctp_bind_addr *addr,
+ enum sctp_scope, gfp_t gfp, int flags);
struct sctp_pf *sctp_get_pf_specific(sa_family_t family);
int sctp_register_pf(struct sctp_pf *, sa_family_t);
void sctp_addr_wq_mgmt(struct net *, struct sctp_sockaddr_entry *, int);
@@ -469,6 +469,8 @@ _sctp_walk_params((pos), (chunk), ntohs((chunk)->chunk_hdr.length), member)
#define _sctp_walk_params(pos, chunk, end, member)\
for (pos.v = chunk->member;\
+ (pos.v + offsetof(struct sctp_paramhdr, length) + sizeof(pos.p->length) <=\
+ (void *)chunk + end) &&\
pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\
ntohs(pos.p->length) >= sizeof(struct sctp_paramhdr);\
pos.v += SCTP_PAD4(ntohs(pos.p->length)))
@@ -477,11 +479,13 @@ for (pos.v = chunk->member;\
_sctp_walk_errors((err), (chunk_hdr), ntohs((chunk_hdr)->length))
#define _sctp_walk_errors(err, chunk_hdr, end)\
-for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \
+for (err = (struct sctp_errhdr *)((void *)chunk_hdr + \
sizeof(struct sctp_chunkhdr));\
+ ((void *)err + offsetof(struct sctp_errhdr, length) + sizeof(err->length) <=\
+ (void *)chunk_hdr + end) &&\
(void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\
- ntohs(err->length) >= sizeof(sctp_errhdr_t); \
- err = (sctp_errhdr_t *)((void *)err + SCTP_PAD4(ntohs(err->length))))
+ ntohs(err->length) >= sizeof(struct sctp_errhdr); \
+ err = (struct sctp_errhdr *)((void *)err + SCTP_PAD4(ntohs(err->length))))
#define sctp_walk_fwdtsn(pos, chunk)\
_sctp_walk_fwdtsn((pos), (chunk), ntohs((chunk)->chunk_hdr->length) - sizeof(struct sctp_fwdtsn_chunk))
@@ -546,7 +550,8 @@ static inline int sctp_ep_hashfn(struct net *net, __u16 lport)
/* Is a socket of this style? */
#define sctp_style(sk, style) __sctp_style((sk), (SCTP_SOCKET_##style))
-static inline int __sctp_style(const struct sock *sk, sctp_socket_type_t style)
+static inline int __sctp_style(const struct sock *sk,
+ enum sctp_socket_type style)
{
return sctp_sk(sk)->type == style;
}
@@ -554,14 +559,15 @@ static inline int __sctp_style(const struct sock *sk, sctp_socket_type_t style)
/* Is the association in this state? */
#define sctp_state(asoc, state) __sctp_state((asoc), (SCTP_STATE_##state))
static inline int __sctp_state(const struct sctp_association *asoc,
- sctp_state_t state)
+ enum sctp_state state)
{
return asoc->state == state;
}
/* Is the socket in this state? */
#define sctp_sstate(sk, state) __sctp_sstate((sk), (SCTP_SS_##state))
-static inline int __sctp_sstate(const struct sock *sk, sctp_sock_state_t state)
+static inline int __sctp_sstate(const struct sock *sk,
+ enum sctp_sock_state state)
{
return sk->sk_state == state;
}
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 860f378333b5..2db3d3a9ce1d 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -53,7 +53,7 @@
/*
* Possible values for the disposition are:
*/
-typedef enum {
+enum sctp_disposition {
SCTP_DISPOSITION_DISCARD, /* No further processing. */
SCTP_DISPOSITION_CONSUME, /* Process return values normally. */
SCTP_DISPOSITION_NOMEM, /* We ran out of memory--recover. */
@@ -63,24 +63,20 @@ typedef enum {
SCTP_DISPOSITION_NOT_IMPL, /* This entry is not implemented. */
SCTP_DISPOSITION_ERROR, /* This is plain old user error. */
SCTP_DISPOSITION_BUG, /* This is a bug. */
-} sctp_disposition_t;
-
-typedef struct {
- int name;
- int action;
-} sctp_sm_command_t;
-
-typedef sctp_disposition_t (sctp_state_fn_t) (struct net *,
- const struct sctp_endpoint *,
- const struct sctp_association *,
- const sctp_subtype_t type,
- void *arg,
- sctp_cmd_seq_t *);
+};
+
+typedef enum sctp_disposition (sctp_state_fn_t) (
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands);
typedef void (sctp_timer_event_t) (unsigned long);
-typedef struct {
+struct sctp_sm_table_entry {
sctp_state_fn_t *fn;
const char *name;
-} sctp_sm_table_entry_t;
+};
/* A naming convention of "sctp_sf_xxx" applies to all the state functions
* currently in use.
@@ -175,10 +171,11 @@ sctp_state_fn_t sctp_sf_autoclose_timer_expire;
/* Prototypes for utility support functions. */
__u8 sctp_get_chunk_type(struct sctp_chunk *chunk);
-const sctp_sm_table_entry_t *sctp_sm_lookup_event(struct net *,
- sctp_event_t,
- sctp_state_t,
- sctp_subtype_t);
+const struct sctp_sm_table_entry *sctp_sm_lookup_event(
+ struct net *net,
+ enum sctp_event event_type,
+ enum sctp_state state,
+ union sctp_subtype event_subtype);
int sctp_chunk_iif(const struct sctp_chunk *);
struct sctp_association *sctp_make_temp_asoc(const struct sctp_endpoint *,
struct sctp_chunk *,
@@ -187,68 +184,69 @@ __u32 sctp_generate_verification_tag(void);
void sctp_populate_tie_tags(__u8 *cookie, __u32 curTag, __u32 hisTag);
/* Prototypes for chunk-building functions. */
-struct sctp_chunk *sctp_make_init(const struct sctp_association *,
- const struct sctp_bind_addr *,
- gfp_t gfp, int vparam_len);
-struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *,
- const struct sctp_chunk *,
- const gfp_t gfp,
- const int unkparam_len);
-struct sctp_chunk *sctp_make_cookie_echo(const struct sctp_association *,
- const struct sctp_chunk *);
-struct sctp_chunk *sctp_make_cookie_ack(const struct sctp_association *,
- const struct sctp_chunk *);
-struct sctp_chunk *sctp_make_cwr(const struct sctp_association *,
+struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
+ const struct sctp_bind_addr *bp,
+ gfp_t gfp, int vparam_len);
+struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk,
+ const gfp_t gfp, const int unkparam_len);
+struct sctp_chunk *sctp_make_cookie_echo(const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk);
+struct sctp_chunk *sctp_make_cookie_ack(const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk);
+struct sctp_chunk *sctp_make_cwr(const struct sctp_association *asoc,
const __u32 lowest_tsn,
- const struct sctp_chunk *);
-struct sctp_chunk * sctp_make_datafrag_empty(struct sctp_association *,
- const struct sctp_sndrcvinfo *sinfo,
- int len, const __u8 flags,
- __u16 ssn, gfp_t gfp);
-struct sctp_chunk *sctp_make_ecne(const struct sctp_association *,
- const __u32);
-struct sctp_chunk *sctp_make_sack(const struct sctp_association *);
+ const struct sctp_chunk *chunk);
+struct sctp_chunk *sctp_make_datafrag_empty(struct sctp_association *asoc,
+ const struct sctp_sndrcvinfo *sinfo,
+ int len, const __u8 flags,
+ __u16 ssn, gfp_t gfp);
+struct sctp_chunk *sctp_make_ecne(const struct sctp_association *asoc,
+ const __u32 lowest_tsn);
+struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc);
struct sctp_chunk *sctp_make_shutdown(const struct sctp_association *asoc,
const struct sctp_chunk *chunk);
struct sctp_chunk *sctp_make_shutdown_ack(const struct sctp_association *asoc,
- const struct sctp_chunk *);
-struct sctp_chunk *sctp_make_shutdown_complete(const struct sctp_association *,
- const struct sctp_chunk *);
-void sctp_init_cause(struct sctp_chunk *, __be16 cause, size_t);
-struct sctp_chunk *sctp_make_abort(const struct sctp_association *,
- const struct sctp_chunk *,
- const size_t hint);
-struct sctp_chunk *sctp_make_abort_no_data(const struct sctp_association *,
- const struct sctp_chunk *,
- __u32 tsn);
-struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *,
- struct msghdr *, size_t msg_len);
-struct sctp_chunk *sctp_make_abort_violation(const struct sctp_association *,
- const struct sctp_chunk *,
- const __u8 *,
- const size_t );
-struct sctp_chunk *sctp_make_violation_paramlen(const struct sctp_association *,
- const struct sctp_chunk *,
- struct sctp_paramhdr *);
-struct sctp_chunk *sctp_make_violation_max_retrans(const struct sctp_association *,
- const struct sctp_chunk *);
-struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *,
- const struct sctp_transport *);
-struct sctp_chunk *sctp_make_heartbeat_ack(const struct sctp_association *,
- const struct sctp_chunk *,
- const void *payload,
- const size_t paylen);
-struct sctp_chunk *sctp_make_op_error(const struct sctp_association *,
- const struct sctp_chunk *chunk,
- __be16 cause_code,
- const void *payload,
- size_t paylen,
- size_t reserve_tail);
-
-struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *,
- union sctp_addr *,
- struct sockaddr *,
- int, __be16);
+ const struct sctp_chunk *chunk);
+struct sctp_chunk *sctp_make_shutdown_complete(
+ const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk);
+void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause, size_t paylen);
+struct sctp_chunk *sctp_make_abort(const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk,
+ const size_t hint);
+struct sctp_chunk *sctp_make_abort_no_data(const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk,
+ __u32 tsn);
+struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc,
+ struct msghdr *msg, size_t msg_len);
+struct sctp_chunk *sctp_make_abort_violation(
+ const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk,
+ const __u8 *payload,
+ const size_t paylen);
+struct sctp_chunk *sctp_make_violation_paramlen(
+ const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk,
+ struct sctp_paramhdr *param);
+struct sctp_chunk *sctp_make_violation_max_retrans(
+ const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk);
+struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc,
+ const struct sctp_transport *transport);
+struct sctp_chunk *sctp_make_heartbeat_ack(const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk,
+ const void *payload,
+ const size_t paylen);
+struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk,
+ __be16 cause_code, const void *payload,
+ size_t paylen, size_t reserve_tail);
+
+struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *asoc,
+ union sctp_addr *laddr,
+ struct sockaddr *addrs,
+ int addrcnt, __be16 flags);
struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc,
union sctp_addr *addr);
bool sctp_verify_asconf(const struct sctp_association *asoc,
@@ -262,27 +260,25 @@ struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc,
__u32 new_cum_tsn, size_t nstreams,
struct sctp_fwdtsn_skip *skiplist);
struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc);
-struct sctp_chunk *sctp_make_strreset_req(
- const struct sctp_association *asoc,
- __u16 stream_num, __u16 *stream_list,
- bool out, bool in);
+struct sctp_chunk *sctp_make_strreset_req(const struct sctp_association *asoc,
+ __u16 stream_num, __u16 *stream_list,
+ bool out, bool in);
struct sctp_chunk *sctp_make_strreset_tsnreq(
- const struct sctp_association *asoc);
+ const struct sctp_association *asoc);
struct sctp_chunk *sctp_make_strreset_addstrm(
- const struct sctp_association *asoc,
- __u16 out, __u16 in);
-struct sctp_chunk *sctp_make_strreset_resp(
- const struct sctp_association *asoc,
- __u32 result, __u32 sn);
-struct sctp_chunk *sctp_make_strreset_tsnresp(
- struct sctp_association *asoc,
- __u32 result, __u32 sn,
- __u32 sender_tsn, __u32 receiver_tsn);
+ const struct sctp_association *asoc,
+ __u16 out, __u16 in);
+struct sctp_chunk *sctp_make_strreset_resp(const struct sctp_association *asoc,
+ __u32 result, __u32 sn);
+struct sctp_chunk *sctp_make_strreset_tsnresp(struct sctp_association *asoc,
+ __u32 result, __u32 sn,
+ __u32 sender_tsn,
+ __u32 receiver_tsn);
bool sctp_verify_reconf(const struct sctp_association *asoc,
struct sctp_chunk *chunk,
struct sctp_paramhdr **errp);
-void sctp_chunk_assign_tsn(struct sctp_chunk *);
-void sctp_chunk_assign_ssn(struct sctp_chunk *);
+void sctp_chunk_assign_tsn(struct sctp_chunk *chunk);
+void sctp_chunk_assign_ssn(struct sctp_chunk *chunk);
/* Prototypes for stream-processing functions. */
struct sctp_chunk *sctp_process_strreset_outreq(
@@ -312,12 +308,10 @@ struct sctp_chunk *sctp_process_strreset_resp(
/* Prototypes for statetable processing. */
-int sctp_do_sm(struct net *net, sctp_event_t event_type, sctp_subtype_t subtype,
- sctp_state_t state,
- struct sctp_endpoint *,
- struct sctp_association *asoc,
- void *event_arg,
- gfp_t gfp);
+int sctp_do_sm(struct net *net, enum sctp_event event_type,
+ union sctp_subtype subtype, enum sctp_state state,
+ struct sctp_endpoint *ep, struct sctp_association *asoc,
+ void *event_arg, gfp_t gfp);
/* 2nd level prototypes */
void sctp_generate_t3_rtx_event(unsigned long peer);
@@ -327,11 +321,12 @@ void sctp_generate_proto_unreach_event(unsigned long peer);
void sctp_ootb_pkt_free(struct sctp_packet *packet);
-struct sctp_association *sctp_unpack_cookie(const struct sctp_endpoint *ep,
- const struct sctp_association *asoc,
- struct sctp_chunk *chunk,
- gfp_t gfp, int *err,
- struct sctp_chunk **err_chk_p);
+struct sctp_association *sctp_unpack_cookie(
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ struct sctp_chunk *chunk,
+ gfp_t gfp, int *err,
+ struct sctp_chunk **err_chk_p);
/* 3rd level prototypes */
__u32 sctp_generate_tag(const struct sctp_endpoint *ep);
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 5ab29af8ca8a..0477945de1a3 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -150,18 +150,18 @@ extern struct sctp_globals {
#define sctp_checksum_disable (sctp_globals.checksum_disable)
/* SCTP Socket type: UDP or TCP style. */
-typedef enum {
+enum sctp_socket_type {
SCTP_SOCKET_UDP = 0,
SCTP_SOCKET_UDP_HIGH_BANDWIDTH,
SCTP_SOCKET_TCP
-} sctp_socket_type_t;
+};
/* Per socket SCTP information. */
struct sctp_sock {
/* inet_sock has to be the first member of sctp_sock */
struct inet_sock inet;
/* What kind of a socket is this? */
- sctp_socket_type_t type;
+ enum sctp_socket_type type;
/* PF_ family specific functions. */
struct sctp_pf *pf;
@@ -371,12 +371,12 @@ union sctp_params {
* chunk is sent and the destination transport address to which this
* HEARTBEAT is sent (see Section 8.3).
*/
-typedef struct sctp_sender_hb_info {
+struct sctp_sender_hb_info {
struct sctp_paramhdr param_hdr;
union sctp_addr daddr;
unsigned long sent_at;
__u64 hb_nonce;
-} sctp_sender_hb_info_t;
+};
int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
gfp_t gfp);
@@ -449,7 +449,7 @@ struct sctp_af {
int (*addr_valid) (union sctp_addr *,
struct sctp_sock *,
const struct sk_buff *);
- sctp_scope_t (*scope) (union sctp_addr *);
+ enum sctp_scope (*scope)(union sctp_addr *);
void (*inaddr_any) (union sctp_addr *, __be16);
int (*is_any) (const union sctp_addr *);
int (*available) (union sctp_addr *,
@@ -657,8 +657,6 @@ struct sctp_sockaddr_entry {
#define SCTP_ADDRESS_TICK_DELAY 500
-typedef struct sctp_chunk *(sctp_packet_phandler_t)(struct sctp_association *);
-
/* This structure holds lists of chunks as we are assembling for
* transmission.
*/
@@ -697,10 +695,11 @@ struct sctp_packet {
void sctp_packet_init(struct sctp_packet *, struct sctp_transport *,
__u16 sport, __u16 dport);
void sctp_packet_config(struct sctp_packet *, __u32 vtag, int);
-sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *,
- struct sctp_chunk *, int, gfp_t);
-sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *,
- struct sctp_chunk *);
+enum sctp_xmit sctp_packet_transmit_chunk(struct sctp_packet *packet,
+ struct sctp_chunk *chunk,
+ int one_packet, gfp_t gfp);
+enum sctp_xmit sctp_packet_append_chunk(struct sctp_packet *packet,
+ struct sctp_chunk *chunk);
int sctp_packet_transmit(struct sctp_packet *, gfp_t);
void sctp_packet_free(struct sctp_packet *);
@@ -950,7 +949,8 @@ int sctp_transport_hold(struct sctp_transport *);
void sctp_transport_put(struct sctp_transport *);
void sctp_transport_update_rto(struct sctp_transport *, __u32);
void sctp_transport_raise_cwnd(struct sctp_transport *, __u32, __u32);
-void sctp_transport_lower_cwnd(struct sctp_transport *, sctp_lower_cwnd_t);
+void sctp_transport_lower_cwnd(struct sctp_transport *t,
+ enum sctp_lower_cwnd reason);
void sctp_transport_burst_limited(struct sctp_transport *);
void sctp_transport_burst_reset(struct sctp_transport *);
unsigned long sctp_transport_timeout(struct sctp_transport *);
@@ -1053,8 +1053,8 @@ int sctp_outq_sack(struct sctp_outq *, struct sctp_chunk *);
int sctp_outq_is_empty(const struct sctp_outq *);
void sctp_outq_restart(struct sctp_outq *);
-void sctp_retransmit(struct sctp_outq *, struct sctp_transport *,
- sctp_retransmit_reason_t);
+void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
+ enum sctp_retransmit_reason reason);
void sctp_retransmit_mark(struct sctp_outq *, struct sctp_transport *, __u8);
void sctp_outq_uncork(struct sctp_outq *, gfp_t gfp);
void sctp_prsctp_prune(struct sctp_association *asoc,
@@ -1110,7 +1110,7 @@ void sctp_bind_addr_init(struct sctp_bind_addr *, __u16 port);
void sctp_bind_addr_free(struct sctp_bind_addr *);
int sctp_bind_addr_copy(struct net *net, struct sctp_bind_addr *dest,
const struct sctp_bind_addr *src,
- sctp_scope_t scope, gfp_t gfp,
+ enum sctp_scope scope, gfp_t gfp,
int flags);
int sctp_bind_addr_dup(struct sctp_bind_addr *dest,
const struct sctp_bind_addr *src,
@@ -1134,17 +1134,18 @@ union sctp_params sctp_bind_addrs_to_raw(const struct sctp_bind_addr *bp,
int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw, int len,
__u16 port, gfp_t gfp);
-sctp_scope_t sctp_scope(const union sctp_addr *);
-int sctp_in_scope(struct net *net, const union sctp_addr *addr, const sctp_scope_t scope);
+enum sctp_scope sctp_scope(const union sctp_addr *addr);
+int sctp_in_scope(struct net *net, const union sctp_addr *addr,
+ const enum sctp_scope scope);
int sctp_is_any(struct sock *sk, const union sctp_addr *addr);
int sctp_is_ep_boundall(struct sock *sk);
/* What type of endpoint? */
-typedef enum {
+enum sctp_endpoint_type {
SCTP_EP_TYPE_SOCKET,
SCTP_EP_TYPE_ASSOCIATION,
-} sctp_endpoint_type_t;
+};
/*
* A common base class to bridge the implmentation view of a
@@ -1168,7 +1169,7 @@ struct sctp_ep_common {
int hashent;
/* Runtime type information. What kind of endpoint is this? */
- sctp_endpoint_type_t type;
+ enum sctp_endpoint_type type;
/* Some fields to help us manage this object.
* refcnt - Reference count access to this object.
@@ -1556,9 +1557,9 @@ struct sctp_association {
* and authenticated chunk list. All that is part of the
* cookie and these are just pointers to those locations
*/
- sctp_random_param_t *peer_random;
- sctp_chunks_param_t *peer_chunks;
- sctp_hmac_algo_param_t *peer_hmacs;
+ struct sctp_random_param *peer_random;
+ struct sctp_chunks_param *peer_chunks;
+ struct sctp_hmac_algo_param *peer_hmacs;
} peer;
/* State : A state variable indicating what state the
@@ -1574,7 +1575,7 @@ struct sctp_association {
*
* State takes values from SCTP_STATE_*.
*/
- sctp_state_t state;
+ enum sctp_state state;
/* Overall : The overall association error count.
* Error Count : [Clear this any time I get something.]
@@ -1924,8 +1925,8 @@ static inline struct sctp_association *sctp_assoc(struct sctp_ep_common *base)
struct sctp_association *
-sctp_association_new(const struct sctp_endpoint *, const struct sock *,
- sctp_scope_t scope, gfp_t gfp);
+sctp_association_new(const struct sctp_endpoint *ep, const struct sock *sk,
+ enum sctp_scope scope, gfp_t gfp);
void sctp_association_free(struct sctp_association *);
void sctp_association_put(struct sctp_association *);
void sctp_association_hold(struct sctp_association *);
@@ -1945,9 +1946,10 @@ void sctp_assoc_del_peer(struct sctp_association *asoc,
const union sctp_addr *addr);
void sctp_assoc_rm_peer(struct sctp_association *asoc,
struct sctp_transport *peer);
-void sctp_assoc_control_transport(struct sctp_association *,
- struct sctp_transport *,
- sctp_transport_cmd_t, sctp_sn_error_t);
+void sctp_assoc_control_transport(struct sctp_association *asoc,
+ struct sctp_transport *transport,
+ enum sctp_transport_cmd command,
+ sctp_sn_error_t error);
struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *, __u32);
struct sctp_transport *sctp_assoc_is_match(struct sctp_association *,
struct net *,
@@ -1966,8 +1968,8 @@ void sctp_assoc_set_primary(struct sctp_association *,
struct sctp_transport *);
void sctp_assoc_del_nonprimary_peers(struct sctp_association *,
struct sctp_transport *);
-int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *,
- sctp_scope_t, gfp_t);
+int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
+ enum sctp_scope scope, gfp_t gfp);
int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *,
struct sctp_cookie*,
gfp_t gfp);
@@ -1983,16 +1985,16 @@ int sctp_cmp_addr_exact(const union sctp_addr *ss1,
struct sctp_chunk *sctp_get_ecne_prepend(struct sctp_association *asoc);
/* A convenience structure to parse out SCTP specific CMSGs. */
-typedef struct sctp_cmsgs {
+struct sctp_cmsgs {
struct sctp_initmsg *init;
struct sctp_sndrcvinfo *srinfo;
struct sctp_sndinfo *sinfo;
-} sctp_cmsgs_t;
+};
/* Structure for tracking memory objects */
-typedef struct {
+struct sctp_dbg_objcnt_entry {
char *label;
atomic_t *counter;
-} sctp_dbg_objcnt_entry_t;
+};
#endif /* __sctp_structs_h__ */
diff --git a/include/net/seg6.h b/include/net/seg6.h
index 4e0357517d79..099bad59dc90 100644
--- a/include/net/seg6.h
+++ b/include/net/seg6.h
@@ -56,7 +56,12 @@ extern int seg6_init(void);
extern void seg6_exit(void);
extern int seg6_iptunnel_init(void);
extern void seg6_iptunnel_exit(void);
+extern int seg6_local_init(void);
+extern void seg6_local_exit(void);
extern bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len);
+extern int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh,
+ int proto);
+extern int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh);
#endif
diff --git a/include/net/sock.h b/include/net/sock.h
index f69c8c2782df..03a362568357 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -294,6 +294,7 @@ struct sock_common {
* @sk_stamp: time stamp of last packet received
* @sk_tsflags: SO_TIMESTAMPING socket options
* @sk_tskey: counter to disambiguate concurrent tstamp requests
+ * @sk_zckey: counter to order MSG_ZEROCOPY notifications
* @sk_socket: Identd and reporting IO signals
* @sk_user_data: RPC layer private data
* @sk_frag: cached page frag
@@ -462,6 +463,7 @@ struct sock {
u16 sk_tsflags;
u8 sk_shutdown;
u32 sk_tskey;
+ atomic_t sk_zckey;
struct socket *sk_socket;
void *sk_user_data;
#ifdef CONFIG_SECURITY
@@ -507,9 +509,7 @@ int sk_set_peek_off(struct sock *sk, int val);
static inline int sk_peek_offset(struct sock *sk, int flags)
{
if (unlikely(flags & MSG_PEEK)) {
- s32 off = READ_ONCE(sk->sk_peek_off);
- if (off >= 0)
- return off;
+ return READ_ONCE(sk->sk_peek_off);
}
return 0;
@@ -1128,7 +1128,7 @@ struct proto {
atomic_t socks;
#endif
int (*diag_destroy)(struct sock *sk, int err);
-};
+} __randomize_layout;
int proto_register(struct proto *prot, int alloc_slab);
void proto_unregister(struct proto *prot);
@@ -1531,6 +1531,8 @@ struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
gfp_t priority);
void __sock_wfree(struct sk_buff *skb);
void sock_wfree(struct sk_buff *skb);
+struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
+ gfp_t priority);
void skb_orphan_partial(struct sk_buff *skb);
void sock_rfree(struct sk_buff *skb);
void sock_efree(struct sk_buff *skb);
@@ -1582,11 +1584,14 @@ int sock_no_shutdown(struct socket *, int);
int sock_no_getsockopt(struct socket *, int , int, char __user *, int __user *);
int sock_no_setsockopt(struct socket *, int, int, char __user *, unsigned int);
int sock_no_sendmsg(struct socket *, struct msghdr *, size_t);
+int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t len);
int sock_no_recvmsg(struct socket *, struct msghdr *, size_t, int);
int sock_no_mmap(struct file *file, struct socket *sock,
struct vm_area_struct *vma);
ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset,
size_t size, int flags);
+ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page,
+ int offset, size_t size, int flags);
/*
* Functions to fill in entries in struct proto_ops when a protocol
@@ -2363,6 +2368,16 @@ bool sk_net_capable(const struct sock *sk, int cap);
void sk_get_meminfo(const struct sock *sk, u32 *meminfo);
+/* Take into consideration the size of the struct sk_buff overhead in the
+ * determination of these values, since that is non-constant across
+ * platforms. This makes socket queueing behavior and performance
+ * not depend upon such differences.
+ */
+#define _SK_MEM_PACKETS 256
+#define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
+#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
+#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
+
extern __u32 sysctl_wmem_max;
extern __u32 sysctl_rmem_max;
diff --git a/include/net/strparser.h b/include/net/strparser.h
index 0c28ad97c52f..7dc131d62ad5 100644
--- a/include/net/strparser.h
+++ b/include/net/strparser.h
@@ -18,26 +18,26 @@
#define STRP_STATS_INCR(stat) ((stat)++)
struct strp_stats {
- unsigned long long rx_msgs;
- unsigned long long rx_bytes;
- unsigned int rx_mem_fail;
- unsigned int rx_need_more_hdr;
- unsigned int rx_msg_too_big;
- unsigned int rx_msg_timeouts;
- unsigned int rx_bad_hdr_len;
+ unsigned long long msgs;
+ unsigned long long bytes;
+ unsigned int mem_fail;
+ unsigned int need_more_hdr;
+ unsigned int msg_too_big;
+ unsigned int msg_timeouts;
+ unsigned int bad_hdr_len;
};
struct strp_aggr_stats {
- unsigned long long rx_msgs;
- unsigned long long rx_bytes;
- unsigned int rx_mem_fail;
- unsigned int rx_need_more_hdr;
- unsigned int rx_msg_too_big;
- unsigned int rx_msg_timeouts;
- unsigned int rx_bad_hdr_len;
- unsigned int rx_aborts;
- unsigned int rx_interrupted;
- unsigned int rx_unrecov_intr;
+ unsigned long long msgs;
+ unsigned long long bytes;
+ unsigned int mem_fail;
+ unsigned int need_more_hdr;
+ unsigned int msg_too_big;
+ unsigned int msg_timeouts;
+ unsigned int bad_hdr_len;
+ unsigned int aborts;
+ unsigned int interrupted;
+ unsigned int unrecov_intr;
};
struct strparser;
@@ -48,16 +48,18 @@ struct strp_callbacks {
void (*rcv_msg)(struct strparser *strp, struct sk_buff *skb);
int (*read_sock_done)(struct strparser *strp, int err);
void (*abort_parser)(struct strparser *strp, int err);
+ void (*lock)(struct strparser *strp);
+ void (*unlock)(struct strparser *strp);
};
-struct strp_rx_msg {
+struct strp_msg {
int full_len;
int offset;
};
-static inline struct strp_rx_msg *strp_rx_msg(struct sk_buff *skb)
+static inline struct strp_msg *strp_msg(struct sk_buff *skb)
{
- return (struct strp_rx_msg *)((void *)skb->cb +
+ return (struct strp_msg *)((void *)skb->cb +
offsetof(struct qdisc_skb_cb, data));
}
@@ -65,18 +67,18 @@ static inline struct strp_rx_msg *strp_rx_msg(struct sk_buff *skb)
struct strparser {
struct sock *sk;
- u32 rx_stopped : 1;
- u32 rx_paused : 1;
- u32 rx_aborted : 1;
- u32 rx_interrupted : 1;
- u32 rx_unrecov_intr : 1;
-
- struct sk_buff **rx_skb_nextp;
- struct timer_list rx_msg_timer;
- struct sk_buff *rx_skb_head;
- unsigned int rx_need_bytes;
- struct delayed_work rx_delayed_work;
- struct work_struct rx_work;
+ u32 stopped : 1;
+ u32 paused : 1;
+ u32 aborted : 1;
+ u32 interrupted : 1;
+ u32 unrecov_intr : 1;
+
+ struct sk_buff **skb_nextp;
+ struct timer_list msg_timer;
+ struct sk_buff *skb_head;
+ unsigned int need_bytes;
+ struct delayed_work delayed_work;
+ struct work_struct work;
struct strp_stats stats;
struct strp_callbacks cb;
};
@@ -84,7 +86,7 @@ struct strparser {
/* Must be called with lock held for attached socket */
static inline void strp_pause(struct strparser *strp)
{
- strp->rx_paused = 1;
+ strp->paused = 1;
}
/* May be called without holding lock for attached socket */
@@ -97,37 +99,37 @@ static inline void save_strp_stats(struct strparser *strp,
#define SAVE_PSOCK_STATS(_stat) (agg_stats->_stat += \
strp->stats._stat)
- SAVE_PSOCK_STATS(rx_msgs);
- SAVE_PSOCK_STATS(rx_bytes);
- SAVE_PSOCK_STATS(rx_mem_fail);
- SAVE_PSOCK_STATS(rx_need_more_hdr);
- SAVE_PSOCK_STATS(rx_msg_too_big);
- SAVE_PSOCK_STATS(rx_msg_timeouts);
- SAVE_PSOCK_STATS(rx_bad_hdr_len);
+ SAVE_PSOCK_STATS(msgs);
+ SAVE_PSOCK_STATS(bytes);
+ SAVE_PSOCK_STATS(mem_fail);
+ SAVE_PSOCK_STATS(need_more_hdr);
+ SAVE_PSOCK_STATS(msg_too_big);
+ SAVE_PSOCK_STATS(msg_timeouts);
+ SAVE_PSOCK_STATS(bad_hdr_len);
#undef SAVE_PSOCK_STATS
- if (strp->rx_aborted)
- agg_stats->rx_aborts++;
- if (strp->rx_interrupted)
- agg_stats->rx_interrupted++;
- if (strp->rx_unrecov_intr)
- agg_stats->rx_unrecov_intr++;
+ if (strp->aborted)
+ agg_stats->aborts++;
+ if (strp->interrupted)
+ agg_stats->interrupted++;
+ if (strp->unrecov_intr)
+ agg_stats->unrecov_intr++;
}
static inline void aggregate_strp_stats(struct strp_aggr_stats *stats,
struct strp_aggr_stats *agg_stats)
{
#define SAVE_PSOCK_STATS(_stat) (agg_stats->_stat += stats->_stat)
- SAVE_PSOCK_STATS(rx_msgs);
- SAVE_PSOCK_STATS(rx_bytes);
- SAVE_PSOCK_STATS(rx_mem_fail);
- SAVE_PSOCK_STATS(rx_need_more_hdr);
- SAVE_PSOCK_STATS(rx_msg_too_big);
- SAVE_PSOCK_STATS(rx_msg_timeouts);
- SAVE_PSOCK_STATS(rx_bad_hdr_len);
- SAVE_PSOCK_STATS(rx_aborts);
- SAVE_PSOCK_STATS(rx_interrupted);
- SAVE_PSOCK_STATS(rx_unrecov_intr);
+ SAVE_PSOCK_STATS(msgs);
+ SAVE_PSOCK_STATS(bytes);
+ SAVE_PSOCK_STATS(mem_fail);
+ SAVE_PSOCK_STATS(need_more_hdr);
+ SAVE_PSOCK_STATS(msg_too_big);
+ SAVE_PSOCK_STATS(msg_timeouts);
+ SAVE_PSOCK_STATS(bad_hdr_len);
+ SAVE_PSOCK_STATS(aborts);
+ SAVE_PSOCK_STATS(interrupted);
+ SAVE_PSOCK_STATS(unrecov_intr);
#undef SAVE_PSOCK_STATS
}
@@ -135,8 +137,11 @@ static inline void aggregate_strp_stats(struct strp_aggr_stats *stats,
void strp_done(struct strparser *strp);
void strp_stop(struct strparser *strp);
void strp_check_rcv(struct strparser *strp);
-int strp_init(struct strparser *strp, struct sock *csk,
- struct strp_callbacks *cb);
+int strp_init(struct strparser *strp, struct sock *sk,
+ const struct strp_callbacks *cb);
void strp_data_ready(struct strparser *strp);
+int strp_process(struct strparser *strp, struct sk_buff *orig_skb,
+ unsigned int orig_offset, size_t orig_len,
+ size_t max_msg_size, long timeo);
#endif /* __NET_STRPARSER_H_ */
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index 8ae9e3b6392e..d767b7991887 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -74,7 +74,6 @@ struct switchdev_attr {
enum switchdev_obj_id {
SWITCHDEV_OBJ_ID_UNDEFINED,
SWITCHDEV_OBJ_ID_PORT_VLAN,
- SWITCHDEV_OBJ_ID_PORT_FDB,
SWITCHDEV_OBJ_ID_PORT_MDB,
};
@@ -97,17 +96,6 @@ struct switchdev_obj_port_vlan {
#define SWITCHDEV_OBJ_PORT_VLAN(obj) \
container_of(obj, struct switchdev_obj_port_vlan, obj)
-/* SWITCHDEV_OBJ_ID_PORT_FDB */
-struct switchdev_obj_port_fdb {
- struct switchdev_obj obj;
- unsigned char addr[ETH_ALEN];
- u16 vid;
- u16 ndm_state;
-};
-
-#define SWITCHDEV_OBJ_PORT_FDB(obj) \
- container_of(obj, struct switchdev_obj_port_fdb, obj)
-
/* SWITCHDEV_OBJ_ID_PORT_MDB */
struct switchdev_obj_port_mdb {
struct switchdev_obj obj;
@@ -135,8 +123,6 @@ typedef int switchdev_obj_dump_cb_t(struct switchdev_obj *obj);
* @switchdev_port_obj_add: Add an object to port (see switchdev_obj_*).
*
* @switchdev_port_obj_del: Delete an object from port (see switchdev_obj_*).
- *
- * @switchdev_port_obj_dump: Dump port objects (see switchdev_obj_*).
*/
struct switchdev_ops {
int (*switchdev_port_attr_get)(struct net_device *dev,
@@ -149,9 +135,6 @@ struct switchdev_ops {
struct switchdev_trans *trans);
int (*switchdev_port_obj_del)(struct net_device *dev,
const struct switchdev_obj *obj);
- int (*switchdev_port_obj_dump)(struct net_device *dev,
- struct switchdev_obj *obj,
- switchdev_obj_dump_cb_t *cb);
};
enum switchdev_notifier_type {
@@ -189,28 +172,10 @@ int switchdev_port_obj_add(struct net_device *dev,
const struct switchdev_obj *obj);
int switchdev_port_obj_del(struct net_device *dev,
const struct switchdev_obj *obj);
-int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj,
- switchdev_obj_dump_cb_t *cb);
int register_switchdev_notifier(struct notifier_block *nb);
int unregister_switchdev_notifier(struct notifier_block *nb);
int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
struct switchdev_notifier_info *info);
-int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
- struct net_device *dev, u32 filter_mask,
- int nlflags);
-int switchdev_port_bridge_setlink(struct net_device *dev,
- struct nlmsghdr *nlh, u16 flags);
-int switchdev_port_bridge_dellink(struct net_device *dev,
- struct nlmsghdr *nlh, u16 flags);
-int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
- struct net_device *dev, const unsigned char *addr,
- u16 vid, u16 nlm_flags);
-int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
- struct net_device *dev, const unsigned char *addr,
- u16 vid);
-int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
- struct net_device *dev,
- struct net_device *filter_dev, int *idx);
void switchdev_port_fwd_mark_set(struct net_device *dev,
struct net_device *group_dev,
bool joining);
@@ -249,13 +214,6 @@ static inline int switchdev_port_obj_del(struct net_device *dev,
return -EOPNOTSUPP;
}
-static inline int switchdev_port_obj_dump(struct net_device *dev,
- const struct switchdev_obj *obj,
- switchdev_obj_dump_cb_t *cb)
-{
- return -EOPNOTSUPP;
-}
-
static inline int register_switchdev_notifier(struct notifier_block *nb)
{
return 0;
@@ -273,51 +231,6 @@ static inline int call_switchdev_notifiers(unsigned long val,
return NOTIFY_DONE;
}
-static inline int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid,
- u32 seq, struct net_device *dev,
- u32 filter_mask, int nlflags)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int switchdev_port_bridge_setlink(struct net_device *dev,
- struct nlmsghdr *nlh,
- u16 flags)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int switchdev_port_bridge_dellink(struct net_device *dev,
- struct nlmsghdr *nlh,
- u16 flags)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
- struct net_device *dev,
- const unsigned char *addr,
- u16 vid, u16 nlm_flags)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
- struct net_device *dev,
- const unsigned char *addr, u16 vid)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int switchdev_port_fdb_dump(struct sk_buff *skb,
- struct netlink_callback *cb,
- struct net_device *dev,
- struct net_device *filter_dev,
- int *idx)
-{
- return *idx;
-}
-
static inline bool switchdev_port_same_parent_id(struct net_device *a,
struct net_device *b)
{
diff --git a/include/net/tc_act/tc_gact.h b/include/net/tc_act/tc_gact.h
index d576374c4d6f..41afe1ce7b16 100644
--- a/include/net/tc_act/tc_gact.h
+++ b/include/net/tc_act/tc_gact.h
@@ -15,7 +15,8 @@ struct tcf_gact {
};
#define to_gact(a) ((struct tcf_gact *)a)
-static inline bool __is_tcf_gact_act(const struct tc_action *a, int act)
+static inline bool __is_tcf_gact_act(const struct tc_action *a, int act,
+ bool is_ext)
{
#ifdef CONFIG_NET_CLS_ACT
struct tcf_gact *gact;
@@ -24,7 +25,8 @@ static inline bool __is_tcf_gact_act(const struct tc_action *a, int act)
return false;
gact = to_gact(a);
- if (gact->tcf_action == act)
+ if ((!is_ext && gact->tcf_action == act) ||
+ (is_ext && TC_ACT_EXT_CMP(gact->tcf_action, act)))
return true;
#endif
@@ -33,12 +35,22 @@ static inline bool __is_tcf_gact_act(const struct tc_action *a, int act)
static inline bool is_tcf_gact_shot(const struct tc_action *a)
{
- return __is_tcf_gact_act(a, TC_ACT_SHOT);
+ return __is_tcf_gact_act(a, TC_ACT_SHOT, false);
}
static inline bool is_tcf_gact_trap(const struct tc_action *a)
{
- return __is_tcf_gact_act(a, TC_ACT_TRAP);
+ return __is_tcf_gact_act(a, TC_ACT_TRAP, false);
+}
+
+static inline bool is_tcf_gact_goto_chain(const struct tc_action *a)
+{
+ return __is_tcf_gact_act(a, TC_ACT_GOTO_CHAIN, true);
+}
+
+static inline u32 tcf_gact_goto_chain_index(const struct tc_action *a)
+{
+ return a->goto_chain->index;
}
#endif /* __NET_TC_GACT_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 70483296157f..b510f284427a 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -139,6 +139,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
#endif
#define TCP_RTO_MAX ((unsigned)(120*HZ))
#define TCP_RTO_MIN ((unsigned)(HZ/5))
+#define TCP_TIMEOUT_MIN (2U) /* Min timeout for TCP timers in jiffies */
#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */
#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now
* used as a fallback RTO for the
@@ -150,8 +151,6 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
* for local resources.
*/
-#define TCP_REO_TIMEOUT_MIN (2000) /* Min RACK reordering timeout in usec */
-
#define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
#define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
#define TCP_KEEPALIVE_INTVL (75*HZ)
@@ -257,7 +256,6 @@ extern int sysctl_tcp_rmem[3];
extern int sysctl_tcp_app_win;
extern int sysctl_tcp_adv_win_scale;
extern int sysctl_tcp_frto;
-extern int sysctl_tcp_low_latency;
extern int sysctl_tcp_nometrics_save;
extern int sysctl_tcp_moderate_rcvbuf;
extern int sysctl_tcp_tso_win_divisor;
@@ -352,8 +350,11 @@ int tcp_v4_rcv(struct sk_buff *skb);
int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
+int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
int flags);
+int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
+ size_t size, int flags);
ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
size_t size, int flags);
void tcp_release_cb(struct sock *sk);
@@ -363,7 +364,7 @@ void tcp_delack_timer_handler(struct sock *sk);
int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
- const struct tcphdr *th, unsigned int len);
+ const struct tcphdr *th);
void tcp_rcv_space_adjust(struct sock *sk);
int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
void tcp_twsk_destructor(struct sock *sk);
@@ -796,6 +797,12 @@ struct tcp_skb_cb {
u16 tcp_gso_segs;
u16 tcp_gso_size;
};
+
+ /* Used to stash the receive timestamp while this skb is in the
+ * out of order queue, as skb->tstamp is overwritten by the
+ * rbnode.
+ */
+ ktime_t swtstamp;
};
__u8 tcp_flags; /* TCP header flags. (tcp[13]) */
@@ -812,7 +819,8 @@ struct tcp_skb_cb {
__u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
__u8 txstamp_ack:1, /* Record TX timestamp for ack? */
eor:1, /* Is skb MSG_EOR marked? */
- unused:6;
+ has_rxtstamp:1, /* SKB has a RX timestamp */
+ unused:5;
__u32 ack_seq; /* Sequence number ACK'd */
union {
struct {
@@ -849,6 +857,16 @@ static inline int tcp_v6_iif(const struct sk_buff *skb)
return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
}
+
+/* TCP_SKB_CB reference means this can not be used from early demux */
+static inline int tcp_v6_sdif(const struct sk_buff *skb)
+{
+#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
+ if (skb && ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags))
+ return TCP_SKB_CB(skb)->header.h6.iif;
+#endif
+ return 0;
+}
#endif
/* TCP_SKB_CB reference means this can not be used from early demux */
@@ -862,6 +880,16 @@ static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb)
return false;
}
+/* TCP_SKB_CB reference means this can not be used from early demux */
+static inline int tcp_v4_sdif(struct sk_buff *skb)
+{
+#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
+ if (skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
+ return TCP_SKB_CB(skb)->header.h4.iif;
+#endif
+ return 0;
+}
+
/* Due to TSO, an SKB can be composed of multiple actual
* packets. To keep these tracked properly, we use this.
*/
@@ -1004,9 +1032,7 @@ void tcp_get_default_congestion_control(char *name);
void tcp_get_available_congestion_control(char *buf, size_t len);
void tcp_get_allowed_congestion_control(char *buf, size_t len);
int tcp_set_allowed_congestion_control(char *allowed);
-int tcp_set_congestion_control(struct sock *sk, const char *name, bool load);
-void tcp_reinit_congestion_control(struct sock *sk,
- const struct tcp_congestion_ops *ca);
+int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, bool reinit);
u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
@@ -1245,17 +1271,6 @@ static inline bool tcp_checksum_complete(struct sk_buff *skb)
__tcp_checksum_complete(skb);
}
-/* Prequeue for VJ style copy to user, combined with checksumming. */
-
-static inline void tcp_prequeue_init(struct tcp_sock *tp)
-{
- tp->ucopy.task = NULL;
- tp->ucopy.len = 0;
- tp->ucopy.memory = 0;
- skb_queue_head_init(&tp->ucopy.prequeue);
-}
-
-bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
int tcp_filter(struct sock *sk, struct sk_buff *skb);
@@ -1547,8 +1562,7 @@ int tcp_fastopen_reset_cipher(void *key, unsigned int len);
void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
- struct tcp_fastopen_cookie *foc,
- struct dst_entry *dst);
+ struct tcp_fastopen_cookie *foc);
void tcp_fastopen_init_key_once(bool publish);
bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
struct tcp_fastopen_cookie *cookie);
@@ -1916,10 +1930,21 @@ extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
u64 xmit_time);
extern void tcp_rack_reo_timeout(struct sock *sk);
+/* At how many usecs into the future should the RTO fire? */
+static inline s64 tcp_rto_delta_us(const struct sock *sk)
+{
+ const struct sk_buff *skb = tcp_write_queue_head(sk);
+ u32 rto = inet_csk(sk)->icsk_rto;
+ u64 rto_time_stamp_us = skb->skb_mstamp + jiffies_to_usecs(rto);
+
+ return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
+}
+
/*
* Save and compile IPv4 options, return a pointer to it
*/
-static inline struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
+static inline struct ip_options_rcu *tcp_v4_save_options(struct net *net,
+ struct sk_buff *skb)
{
const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
struct ip_options_rcu *dopt = NULL;
@@ -1928,7 +1953,7 @@ static inline struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
int opt_size = sizeof(*dopt) + opt->optlen;
dopt = kmalloc(opt_size, GFP_ATOMIC);
- if (dopt && __ip_options_echo(&dopt->opt, skb, opt)) {
+ if (dopt && __ip_options_echo(net, &dopt->opt, skb, opt)) {
kfree(dopt);
dopt = NULL;
}
diff --git a/include/net/tso.h b/include/net/tso.h
index b7be852bfe9d..9a56c39e6d0a 100644
--- a/include/net/tso.h
+++ b/include/net/tso.h
@@ -3,6 +3,8 @@
#include <net/ip.h>
+#define TSO_HEADER_SIZE 128
+
struct tso_t {
int next_frag_idx;
void *data;
diff --git a/include/net/tun_proto.h b/include/net/tun_proto.h
new file mode 100644
index 000000000000..2ea3deba4c99
--- /dev/null
+++ b/include/net/tun_proto.h
@@ -0,0 +1,49 @@
+#ifndef __NET_TUN_PROTO_H
+#define __NET_TUN_PROTO_H
+
+#include <linux/kernel.h>
+
+/* One byte protocol values as defined by VXLAN-GPE and NSH. These will
+ * hopefully get a shared IANA registry.
+ */
+#define TUN_P_IPV4 0x01
+#define TUN_P_IPV6 0x02
+#define TUN_P_ETHERNET 0x03
+#define TUN_P_NSH 0x04
+#define TUN_P_MPLS_UC 0x05
+
+static inline __be16 tun_p_to_eth_p(u8 proto)
+{
+ switch (proto) {
+ case TUN_P_IPV4:
+ return htons(ETH_P_IP);
+ case TUN_P_IPV6:
+ return htons(ETH_P_IPV6);
+ case TUN_P_ETHERNET:
+ return htons(ETH_P_TEB);
+ case TUN_P_NSH:
+ return htons(ETH_P_NSH);
+ case TUN_P_MPLS_UC:
+ return htons(ETH_P_MPLS_UC);
+ }
+ return 0;
+}
+
+static inline u8 tun_p_from_eth_p(__be16 proto)
+{
+ switch (proto) {
+ case htons(ETH_P_IP):
+ return TUN_P_IPV4;
+ case htons(ETH_P_IPV6):
+ return TUN_P_IPV6;
+ case htons(ETH_P_TEB):
+ return TUN_P_ETHERNET;
+ case htons(ETH_P_NSH):
+ return TUN_P_NSH;
+ case htons(ETH_P_MPLS_UC):
+ return TUN_P_MPLS_UC;
+ }
+ return 0;
+}
+
+#endif
diff --git a/include/net/udp.h b/include/net/udp.h
index 972ce4baab6b..12dfbfe2e2d7 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -260,6 +260,7 @@ static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
}
void udp_v4_early_demux(struct sk_buff *skb);
+bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
int udp_get_port(struct sock *sk, unsigned short snum,
int (*saddr_cmp)(const struct sock *,
const struct sock *));
@@ -286,7 +287,7 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
__be32 daddr, __be16 dport, int dif);
struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
- __be32 daddr, __be16 dport, int dif,
+ __be32 daddr, __be16 dport, int dif, int sdif,
struct udp_table *tbl, struct sk_buff *skb);
struct sock *udp4_lib_lookup_skb(struct sk_buff *skb,
__be16 sport, __be16 dport);
@@ -297,7 +298,7 @@ struct sock *udp6_lib_lookup(struct net *net,
struct sock *__udp6_lib_lookup(struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, __be16 dport,
- int dif, struct udp_table *tbl,
+ int dif, int sdif, struct udp_table *tbl,
struct sk_buff *skb);
struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
__be16 sport, __be16 dport);
@@ -305,33 +306,44 @@ struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
/* UDP uses skb->dev_scratch to cache as much information as possible and avoid
* possibly multiple cache miss on dequeue()
*/
-#if BITS_PER_LONG == 64
-
-/* truesize, len and the bit needed to compute skb_csum_unnecessary will be on
- * cold cache lines at recvmsg time.
- * skb->len can be stored on 16 bits since the udp header has been already
- * validated and pulled.
- */
struct udp_dev_scratch {
- u32 truesize;
+ /* skb->truesize and the stateless bit are embedded in a single field;
+ * do not use a bitfield since the compiler emits better/smaller code
+ * this way
+ */
+ u32 _tsize_state;
+
+#if BITS_PER_LONG == 64
+ /* len and the bit needed to compute skb_csum_unnecessary
+ * will be on cold cache lines at recvmsg time.
+ * skb->len can be stored on 16 bits since the udp header has been
+ * already validated and pulled.
+ */
u16 len;
bool is_linear;
bool csum_unnecessary;
+#endif
};
+static inline struct udp_dev_scratch *udp_skb_scratch(struct sk_buff *skb)
+{
+ return (struct udp_dev_scratch *)&skb->dev_scratch;
+}
+
+#if BITS_PER_LONG == 64
static inline unsigned int udp_skb_len(struct sk_buff *skb)
{
- return ((struct udp_dev_scratch *)&skb->dev_scratch)->len;
+ return udp_skb_scratch(skb)->len;
}
static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb)
{
- return ((struct udp_dev_scratch *)&skb->dev_scratch)->csum_unnecessary;
+ return udp_skb_scratch(skb)->csum_unnecessary;
}
static inline bool udp_skb_is_linear(struct sk_buff *skb)
{
- return ((struct udp_dev_scratch *)&skb->dev_scratch)->is_linear;
+ return udp_skb_scratch(skb)->is_linear;
}
#else
@@ -354,12 +366,13 @@ static inline bool udp_skb_is_linear(struct sk_buff *skb)
static inline int copy_linear_skb(struct sk_buff *skb, int len, int off,
struct iov_iter *to)
{
- int n, copy = len - off;
+ int n;
- n = copy_to_iter(skb->data + off, copy, to);
- if (n == copy)
+ n = copy_to_iter(skb->data + off, len, to);
+ if (n == len)
return 0;
+ iov_iter_revert(to, n);
return -EFAULT;
}
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index 02c5be037451..10cce0dd4450 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -115,6 +115,8 @@ struct udp_tunnel_info {
/* Notify network devices of offloadable types */
void udp_tunnel_push_rx_port(struct net_device *dev, struct socket *sock,
unsigned short type);
+void udp_tunnel_drop_rx_port(struct net_device *dev, struct socket *sock,
+ unsigned short type);
void udp_tunnel_notify_add_rx_port(struct socket *sock, unsigned short type);
void udp_tunnel_notify_del_rx_port(struct socket *sock, unsigned short type);
@@ -124,6 +126,12 @@ static inline void udp_tunnel_get_rx_info(struct net_device *dev)
call_netdevice_notifiers(NETDEV_UDP_TUNNEL_PUSH_INFO, dev);
}
+static inline void udp_tunnel_drop_rx_info(struct net_device *dev)
+{
+ ASSERT_RTNL();
+ call_netdevice_notifiers(NETDEV_UDP_TUNNEL_DROP_INFO, dev);
+}
+
/* Transmit the skb using UDP encapsulation. */
void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
__be32 src, __be32 dst, __u8 tos, __u8 ttl,
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 3f430e38ab82..4e3876dde295 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -168,12 +168,6 @@ reserved_flags2:2;
#define VXLAN_GPE_USED_BITS (VXLAN_HF_VER | VXLAN_HF_NP | VXLAN_HF_OAM | \
cpu_to_be32(0xff))
-/* VXLAN-GPE header Next Protocol. */
-#define VXLAN_GPE_NP_IPV4 0x01
-#define VXLAN_GPE_NP_IPV6 0x02
-#define VXLAN_GPE_NP_ETHERNET 0x03
-#define VXLAN_GPE_NP_NSH 0x04
-
struct vxlan_metadata {
u32 gbp;
};
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index c0916ab18d32..f002a2c5e33c 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -43,6 +43,8 @@
MODULE_ALIAS("xfrm-mode-" __stringify(family) "-" __stringify(encap))
#define MODULE_ALIAS_XFRM_TYPE(family, proto) \
MODULE_ALIAS("xfrm-type-" __stringify(family) "-" __stringify(proto))
+#define MODULE_ALIAS_XFRM_OFFLOAD_TYPE(family, proto) \
+ MODULE_ALIAS("xfrm-offload-" __stringify(family) "-" __stringify(proto))
#ifdef CONFIG_XFRM_STATISTICS
#define XFRM_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.xfrm_statistics, field)
@@ -163,6 +165,7 @@ struct xfrm_state {
int header_len;
int trailer_len;
u32 extra_flags;
+ u32 output_mark;
} props;
struct xfrm_lifetime_cfg lft;
@@ -296,10 +299,12 @@ struct xfrm_policy_afinfo {
struct dst_entry *(*dst_lookup)(struct net *net,
int tos, int oif,
const xfrm_address_t *saddr,
- const xfrm_address_t *daddr);
+ const xfrm_address_t *daddr,
+ u32 mark);
int (*get_saddr)(struct net *net, int oif,
xfrm_address_t *saddr,
- xfrm_address_t *daddr);
+ xfrm_address_t *daddr,
+ u32 mark);
void (*decode_session)(struct sk_buff *skb,
struct flowi *fl,
int reverse);
@@ -317,6 +322,7 @@ int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int fam
void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo);
void km_policy_notify(struct xfrm_policy *xp, int dir,
const struct km_event *c);
+void xfrm_policy_cache_flush(void);
void km_state_notify(struct xfrm_state *x, const struct km_event *c);
struct xfrm_tmpl;
@@ -563,7 +569,6 @@ struct xfrm_policy {
refcount_t refcnt;
struct timer_list timer;
- struct flow_cache_object flo;
atomic_t genid;
u32 priority;
u32 index;
@@ -978,7 +983,6 @@ struct xfrm_dst {
struct rt6_info rt6;
} u;
struct dst_entry *route;
- struct flow_cache_object flo;
struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
int num_pols, num_xfrms;
u32 xfrm_genid;
@@ -1015,6 +1019,7 @@ struct xfrm_offload {
#define CRYPTO_FALLBACK 8
#define XFRM_GSO_SEGMENT 16
#define XFRM_GRO 32
+#define XFRM_ESP_NO_TRAILER 64
__u32 status;
#define CRYPTO_SUCCESS 1
@@ -1226,9 +1231,6 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
}
}
-void xfrm_garbage_collect(struct net *net);
-void xfrm_garbage_collect_deferred(struct net *net);
-
#else
static inline void xfrm_sk_free_policy(struct sock *sk) {}
@@ -1263,9 +1265,6 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
{
return 1;
}
-static inline void xfrm_garbage_collect(struct net *net)
-{
-}
#endif
static __inline__
@@ -1565,7 +1564,7 @@ void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
int xfrm_init_replay(struct xfrm_state *x);
int xfrm_state_mtu(struct xfrm_state *x, int mtu);
-int __xfrm_init_state(struct xfrm_state *x, bool init_replay);
+int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload);
int xfrm_init_state(struct xfrm_state *x);
int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb);
int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type);
@@ -1645,7 +1644,7 @@ static inline int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
const xfrm_address_t *saddr,
const xfrm_address_t *daddr,
- int family);
+ int family, u32 mark);
struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp);
@@ -1863,6 +1862,20 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
struct xfrm_user_offload *xuo);
bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
+static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
+{
+ struct xfrm_state *x = dst->xfrm;
+
+ if (!x || !x->type_offload)
+ return false;
+
+ if (x->xso.offload_handle && (x->xso.dev == dst->path->dev) &&
+ !dst->child->xfrm)
+ return true;
+
+ return false;
+}
+
static inline void xfrm_dev_state_delete(struct xfrm_state *x)
{
struct xfrm_state_offload *xso = &x->xso;
@@ -1905,6 +1918,11 @@ static inline bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x
{
return false;
}
+
+static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
+{
+ return false;
+}
#endif
static inline int xfrm_mark_get(struct nlattr **attrs, struct xfrm_mark *m)
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index 4b34c51f859e..ec5008cf5d51 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -172,7 +172,8 @@ static inline int rdma_ip2gid(struct sockaddr *addr, union ib_gid *gid)
(struct in6_addr *)gid);
break;
case AF_INET6:
- memcpy(gid->raw, &((struct sockaddr_in6 *)addr)->sin6_addr, 16);
+ *(struct in6_addr *)&gid->raw =
+ ((struct sockaddr_in6 *)addr)->sin6_addr;
break;
default:
return -EINVAL;
@@ -205,11 +206,13 @@ static inline void iboe_addr_get_sgid(struct rdma_dev_addr *dev_addr,
dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
if (dev) {
ip4 = in_dev_get(dev);
- if (ip4 && ip4->ifa_list && ip4->ifa_list->ifa_address) {
+ if (ip4 && ip4->ifa_list && ip4->ifa_list->ifa_address)
ipv6_addr_set_v4mapped(ip4->ifa_list->ifa_address,
(struct in6_addr *)gid);
+
+ if (ip4)
in_dev_put(ip4);
- }
+
dev_put(dev);
}
}
@@ -302,7 +305,13 @@ static inline void rdma_get_ll_mac(struct in6_addr *addr, u8 *mac)
static inline int rdma_is_multicast_addr(struct in6_addr *addr)
{
- return addr->s6_addr[0] == 0xff;
+ u32 ipv4_addr;
+
+ if (addr->s6_addr[0] == 0xff)
+ return 1;
+
+ memcpy(&ipv4_addr, addr->s6_addr + 12, 4);
+ return (ipv6_addr_v4mapped(addr) && ipv4_is_multicast(ipv4_addr));
}
static inline void rdma_get_mcast_mac(struct in6_addr *addr, u8 *mac)
diff --git a/include/rdma/ib_hdrs.h b/include/rdma/ib_hdrs.h
index 5519f31f043a..c124d515f7d5 100644
--- a/include/rdma/ib_hdrs.h
+++ b/include/rdma/ib_hdrs.h
@@ -193,8 +193,12 @@ static inline void put_ib_ateth_compare(u64 val, struct ib_atomic_eth *ateth)
#define IB_LNH_MASK 3
#define IB_SC_MASK 0xf
#define IB_SC_SHIFT 12
+#define IB_SC5_MASK 0x10
#define IB_SL_MASK 0xf
#define IB_SL_SHIFT 4
+#define IB_SL_SHIFT 4
+#define IB_LVER_MASK 0xf
+#define IB_LVER_SHIFT 8
static inline u8 ib_get_lnh(struct ib_header *hdr)
{
@@ -206,6 +210,11 @@ static inline u8 ib_get_sc(struct ib_header *hdr)
return ((be16_to_cpu(hdr->lrh[0]) >> IB_SC_SHIFT) & IB_SC_MASK);
}
+static inline bool ib_is_sc5(u16 sc5)
+{
+ return !!(sc5 & IB_SC5_MASK);
+}
+
static inline u8 ib_get_sl(struct ib_header *hdr)
{
return ((be16_to_cpu(hdr->lrh[0]) >> IB_SL_SHIFT) & IB_SL_MASK);
@@ -221,6 +230,27 @@ static inline u16 ib_get_slid(struct ib_header *hdr)
return (be16_to_cpu(hdr->lrh[3]));
}
+static inline u8 ib_get_lver(struct ib_header *hdr)
+{
+ return (u8)((be16_to_cpu(hdr->lrh[0]) >> IB_LVER_SHIFT) &
+ IB_LVER_MASK);
+}
+
+static inline u16 ib_get_len(struct ib_header *hdr)
+{
+ return (u16)(be16_to_cpu(hdr->lrh[2]));
+}
+
+static inline u32 ib_get_qkey(struct ib_other_headers *ohdr)
+{
+ return be32_to_cpu(ohdr->u.ud.deth[0]);
+}
+
+static inline u32 ib_get_sqpn(struct ib_other_headers *ohdr)
+{
+ return ((be32_to_cpu(ohdr->u.ud.deth[1])) & IB_QPN_MASK);
+}
+
/*
* BTH
*/
@@ -229,6 +259,14 @@ static inline u16 ib_get_slid(struct ib_header *hdr)
#define IB_BTH_PAD_MASK 3
#define IB_BTH_PKEY_MASK 0xffff
#define IB_BTH_PAD_SHIFT 20
+#define IB_BTH_A_MASK 1
+#define IB_BTH_A_SHIFT 31
+#define IB_BTH_M_MASK 1
+#define IB_BTH_M_SHIFT 22
+#define IB_BTH_SE_MASK 1
+#define IB_BTH_SE_SHIFT 23
+#define IB_BTH_TVER_MASK 0xf
+#define IB_BTH_TVER_SHIFT 16
static inline u8 ib_bth_get_pad(struct ib_other_headers *ohdr)
{
@@ -247,4 +285,50 @@ static inline u8 ib_bth_get_opcode(struct ib_other_headers *ohdr)
IB_BTH_OPCODE_MASK);
}
+static inline u8 ib_bth_get_ackreq(struct ib_other_headers *ohdr)
+{
+ return (u8)((be32_to_cpu(ohdr->bth[2]) >> IB_BTH_A_SHIFT) &
+ IB_BTH_A_MASK);
+}
+
+static inline u8 ib_bth_get_migreq(struct ib_other_headers *ohdr)
+{
+ return (u8)((be32_to_cpu(ohdr->bth[0]) >> IB_BTH_M_SHIFT) &
+ IB_BTH_M_MASK);
+}
+
+static inline u8 ib_bth_get_se(struct ib_other_headers *ohdr)
+{
+ return (u8)((be32_to_cpu(ohdr->bth[0]) >> IB_BTH_SE_SHIFT) &
+ IB_BTH_SE_MASK);
+}
+
+static inline u32 ib_bth_get_psn(struct ib_other_headers *ohdr)
+{
+ return (u32)(be32_to_cpu(ohdr->bth[2]));
+}
+
+static inline u32 ib_bth_get_qpn(struct ib_other_headers *ohdr)
+{
+ return (u32)((be32_to_cpu(ohdr->bth[1])) & IB_QPN_MASK);
+}
+
+static inline u8 ib_bth_get_becn(struct ib_other_headers *ohdr)
+{
+ return (u8)((be32_to_cpu(ohdr->bth[1]) >> IB_BECN_SHIFT) &
+ IB_BECN_MASK);
+}
+
+static inline u8 ib_bth_get_fecn(struct ib_other_headers *ohdr)
+{
+ return (u8)((be32_to_cpu(ohdr->bth[1]) >> IB_FECN_SHIFT) &
+ IB_FECN_MASK);
+}
+
+static inline u8 ib_bth_get_tver(struct ib_other_headers *ohdr)
+{
+ return (u8)((be32_to_cpu(ohdr->bth[0]) >> IB_BTH_TVER_SHIFT) &
+ IB_BTH_TVER_MASK);
+}
+
#endif /* IB_HDRS_H */
diff --git a/include/rdma/ib_marshall.h b/include/rdma/ib_marshall.h
index 68cef3bd50fb..8ebf84ae9ed1 100644
--- a/include/rdma/ib_marshall.h
+++ b/include/rdma/ib_marshall.h
@@ -38,10 +38,12 @@
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_user_sa.h>
-void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
+void ib_copy_qp_attr_to_user(struct ib_device *device,
+ struct ib_uverbs_qp_attr *dst,
struct ib_qp_attr *src);
-void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
+void ib_copy_ah_attr_to_user(struct ib_device *device,
+ struct ib_uverbs_ah_attr *dst,
struct rdma_ah_attr *src);
void ib_copy_path_rec_to_user(struct ib_user_path_rec *dst,
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 356953d3dbd1..e6df68048517 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -64,6 +64,8 @@
#include <linux/cgroup_rdma.h>
#include <uapi/rdma/ib_user_verbs.h>
+#define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
+
extern struct workqueue_struct *ib_wq;
extern struct workqueue_struct *ib_comp_wq;
@@ -168,7 +170,7 @@ enum ib_device_cap_flags {
IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6),
IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7),
IB_DEVICE_SHUTDOWN_PORT = (1 << 8),
- IB_DEVICE_INIT_TYPE = (1 << 9),
+ /* Not in use, former INIT_TYPE = (1 << 9),*/
IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10),
IB_DEVICE_SYS_IMAGE_GUID = (1 << 11),
IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12),
@@ -183,7 +185,7 @@ enum ib_device_cap_flags {
* which will always contain a usable lkey.
*/
IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15),
- IB_DEVICE_RESERVED /* old SEND_W_INV */ = (1 << 16),
+ /* Reserved, old SEND_W_INV = (1 << 16),*/
IB_DEVICE_MEM_WINDOW = (1 << 17),
/*
* Devices should set IB_DEVICE_UD_IP_SUM if they support
@@ -218,7 +220,7 @@ enum ib_device_cap_flags {
* of I/O operations with single completion queue managed
* by hardware.
*/
- IB_DEVICE_CROSS_CHANNEL = (1 << 27),
+ IB_DEVICE_CROSS_CHANNEL = (1 << 27),
IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29),
IB_DEVICE_SIGNATURE_HANDOVER = (1 << 30),
IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31),
@@ -278,6 +280,24 @@ struct ib_rss_caps {
u32 max_rwq_indirection_table_size;
};
+enum ib_tm_cap_flags {
+ /* Support tag matching on RC transport */
+ IB_TM_CAP_RC = 1 << 0,
+};
+
+struct ib_xrq_caps {
+ /* Max size of RNDV header */
+ u32 max_rndv_hdr_size;
+ /* Max number of entries in tag matching list */
+ u32 max_num_tags;
+ /* From enum ib_tm_cap_flags */
+ u32 flags;
+ /* Max number of outstanding list operations */
+ u32 max_ops;
+ /* Max number of SGE in tag matching entry */
+ u32 max_sge;
+};
+
enum ib_cq_creation_flags {
IB_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0,
IB_CQ_FLAGS_IGNORE_OVERRUN = 1 << 1,
@@ -338,6 +358,7 @@ struct ib_device_attr {
struct ib_rss_caps rss_caps;
u32 max_wq_type_rq;
u32 raw_packet_caps; /* Use ib_raw_packet_caps enum */
+ struct ib_xrq_caps xrq_caps;
};
enum ib_mtu {
@@ -549,8 +570,8 @@ struct ib_port_attr {
u32 bad_pkey_cntr;
u32 qkey_viol_cntr;
u16 pkey_tbl_len;
- u16 lid;
- u16 sm_lid;
+ u32 sm_lid;
+ u32 lid;
u8 lmc;
u8 max_vl_num;
u8 sm_sl;
@@ -577,7 +598,8 @@ struct ib_device_modify {
enum ib_port_modify_flags {
IB_PORT_SHUTDOWN = 1,
IB_PORT_INIT_TYPE = (1<<2),
- IB_PORT_RESET_QKEY_CNTR = (1<<3)
+ IB_PORT_RESET_QKEY_CNTR = (1<<3),
+ IB_PORT_OPA_MASK_CHG = (1<<4)
};
struct ib_port_modify {
@@ -664,6 +686,8 @@ union rdma_network_hdr {
};
};
+#define IB_QPN_MASK 0xFFFFFF
+
enum {
IB_MULTICAST_QPN = 0xffffff
};
@@ -859,6 +883,7 @@ struct roce_ah_attr {
struct opa_ah_attr {
u32 dlid;
u8 src_path_bits;
+ bool make_grd;
};
struct rdma_ah_attr {
@@ -948,7 +973,7 @@ struct ib_wc {
u32 src_qp;
int wc_flags;
u16 pkey_index;
- u16 slid;
+ u32 slid;
u8 sl;
u8 dlid_path_bits;
u8 port_num; /* valid only for DR SMPs on switches */
@@ -966,9 +991,16 @@ enum ib_cq_notify_flags {
enum ib_srq_type {
IB_SRQT_BASIC,
- IB_SRQT_XRC
+ IB_SRQT_XRC,
+ IB_SRQT_TM,
};
+static inline bool ib_srq_has_cq(enum ib_srq_type srq_type)
+{
+ return srq_type == IB_SRQT_XRC ||
+ srq_type == IB_SRQT_TM;
+}
+
enum ib_srq_attr_mask {
IB_SRQ_MAX_WR = 1 << 0,
IB_SRQ_LIMIT = 1 << 1,
@@ -986,11 +1018,17 @@ struct ib_srq_init_attr {
struct ib_srq_attr attr;
enum ib_srq_type srq_type;
- union {
- struct {
- struct ib_xrcd *xrcd;
- struct ib_cq *cq;
- } xrc;
+ struct {
+ struct ib_cq *cq;
+ union {
+ struct {
+ struct ib_xrcd *xrcd;
+ } xrc;
+
+ struct {
+ u32 max_num_tags;
+ } tag_matching;
+ };
} ext;
};
@@ -1056,9 +1094,10 @@ enum ib_qp_create_flags {
IB_QP_CREATE_MANAGED_RECV = 1 << 4,
IB_QP_CREATE_NETIF_QP = 1 << 5,
IB_QP_CREATE_SIGNATURE_EN = 1 << 6,
- IB_QP_CREATE_USE_GFP_NOIO = 1 << 7,
+ /* FREE = 1 << 7, */
IB_QP_CREATE_SCATTER_FCS = 1 << 8,
IB_QP_CREATE_CVLAN_STRIPPING = 1 << 9,
+ IB_QP_CREATE_SOURCE_QPN = 1 << 10,
/* reserve bits 26-31 for low level drivers' internal use */
IB_QP_CREATE_RESERVED_START = 1 << 26,
IB_QP_CREATE_RESERVED_END = 1 << 31,
@@ -1086,6 +1125,7 @@ struct ib_qp_init_attr {
*/
u8 port_num;
struct ib_rwq_ind_table *rwq_ind_tbl;
+ u32 source_qpn;
};
struct ib_qp_open_attr {
@@ -1527,12 +1567,14 @@ struct ib_srq {
enum ib_srq_type srq_type;
atomic_t usecnt;
- union {
- struct {
- struct ib_xrcd *xrcd;
- struct ib_cq *cq;
- u32 srq_num;
- } xrc;
+ struct {
+ struct ib_cq *cq;
+ union {
+ struct {
+ struct ib_xrcd *xrcd;
+ u32 srq_num;
+ } xrc;
+ };
} ext;
};
@@ -1546,6 +1588,10 @@ enum ib_raw_packet_caps {
IB_RAW_PACKET_CAP_SCATTER_FCS = (1 << 1),
/* Checksum offloads are supported (for both send and receive). */
IB_RAW_PACKET_CAP_IP_CSUM = (1 << 2),
+ /* When a packet is received for an RQ with no receive WQEs, the
+ * packet processing is delayed.
+ */
+ IB_RAW_PACKET_CAP_DELAY_DROP = (1 << 3),
};
enum ib_wq_type {
@@ -1574,6 +1620,7 @@ struct ib_wq {
enum ib_wq_flags {
IB_WQ_FLAGS_CVLAN_STRIPPING = 1 << 0,
IB_WQ_FLAGS_SCATTER_FCS = 1 << 1,
+ IB_WQ_FLAGS_DELAY_DROP = 1 << 2,
};
struct ib_wq_init_attr {
@@ -1683,6 +1730,7 @@ struct ib_qp {
enum ib_qp_type qp_type;
struct ib_rwq_ind_table *rwq_ind_tbl;
struct ib_qp_security *qp_sec;
+ u8 port;
};
struct ib_mr {
@@ -2288,6 +2336,8 @@ struct ib_device {
struct rdmacg_device cg_device;
#endif
+ u32 index;
+
/**
* The following mandatory functions are used only at device
* registration. Keep functions such as these at the end of this
@@ -2295,7 +2345,11 @@ struct ib_device {
* in fast paths.
*/
int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *);
- void (*get_dev_fw_str)(struct ib_device *, char *str, size_t str_len);
+ void (*get_dev_fw_str)(struct ib_device *, char *str);
+ const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
+ int comp_vector);
+
+ struct uverbs_root_spec *specs_root;
};
struct ib_client {
@@ -2331,7 +2385,7 @@ struct ib_client {
struct ib_device *ib_alloc_device(size_t size);
void ib_dealloc_device(struct ib_device *device);
-void ib_get_device_fw_str(struct ib_device *device, char *str, size_t str_len);
+void ib_get_device_fw_str(struct ib_device *device, char *str);
int ib_register_device(struct ib_device *device,
int (*port_callback)(struct ib_device *,
@@ -2395,8 +2449,8 @@ int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
enum ib_qp_type type, enum ib_qp_attr_mask mask,
enum rdma_link_layer ll);
-int ib_register_event_handler (struct ib_event_handler *event_handler);
-int ib_unregister_event_handler(struct ib_event_handler *event_handler);
+void ib_register_event_handler(struct ib_event_handler *event_handler);
+void ib_unregister_event_handler(struct ib_event_handler *event_handler);
void ib_dispatch_event(struct ib_event *event);
int ib_query_port(struct ib_device *device,
@@ -2948,6 +3002,22 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *qp_init_attr);
/**
+ * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
+ * @qp: The QP to modify.
+ * @attr: On input, specifies the QP attributes to modify. On output,
+ * the current values of selected QP attributes are returned.
+ * @attr_mask: A bit-mask used to specify which attributes of the QP
+ * are being modified.
+ * @udata: pointer to user's input output buffer information
+ * are being modified.
+ * It returns 0 on success and returns appropriate error code on error.
+ */
+int ib_modify_qp_with_udata(struct ib_qp *qp,
+ struct ib_qp_attr *attr,
+ int attr_mask,
+ struct ib_udata *udata);
+
+/**
* ib_modify_qp - Modifies the attributes for the specified QP and then
* transitions the QP to the given state.
* @qp: The QP to modify.
@@ -3539,6 +3609,7 @@ void ib_drain_qp(struct ib_qp *qp);
int ib_resolve_eth_dmac(struct ib_device *device,
struct rdma_ah_attr *ah_attr);
+int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width);
static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
{
@@ -3592,6 +3663,20 @@ static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr)
return 0;
}
+static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr,
+ bool make_grd)
+{
+ if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
+ attr->opa.make_grd = make_grd;
+}
+
+static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr)
+{
+ if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
+ return attr->opa.make_grd;
+ return false;
+}
+
static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u8 port_num)
{
attr->port_num = port_num;
@@ -3690,4 +3775,52 @@ static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
else
return RDMA_AH_ATTR_TYPE_IB;
}
+
+/**
+ * ib_lid_cpu16 - Return lid in 16bit CPU encoding.
+ * In the current implementation the only way to get
+ * get the 32bit lid is from other sources for OPA.
+ * For IB, lids will always be 16bits so cast the
+ * value accordingly.
+ *
+ * @lid: A 32bit LID
+ */
+static inline u16 ib_lid_cpu16(u32 lid)
+{
+ WARN_ON_ONCE(lid & 0xFFFF0000);
+ return (u16)lid;
+}
+
+/**
+ * ib_lid_be16 - Return lid in 16bit BE encoding.
+ *
+ * @lid: A 32bit LID
+ */
+static inline __be16 ib_lid_be16(u32 lid)
+{
+ WARN_ON_ONCE(lid & 0xFFFF0000);
+ return cpu_to_be16((u16)lid);
+}
+
+/**
+ * ib_get_vector_affinity - Get the affinity mappings of a given completion
+ * vector
+ * @device: the rdma device
+ * @comp_vector: index of completion vector
+ *
+ * Returns NULL on failure, otherwise a corresponding cpu map of the
+ * completion vector (returns all-cpus map if the device driver doesn't
+ * implement get_vector_affinity).
+ */
+static inline const struct cpumask *
+ib_get_vector_affinity(struct ib_device *device, int comp_vector)
+{
+ if (comp_vector < 0 || comp_vector >= device->num_comp_vectors ||
+ !device->get_vector_affinity)
+ return NULL;
+
+ return device->get_vector_affinity(device, comp_vector);
+
+}
+
#endif /* IB_VERBS_H */
diff --git a/include/rdma/opa_addr.h b/include/rdma/opa_addr.h
index eace28f1555d..e6e90f18e6d5 100644
--- a/include/rdma/opa_addr.h
+++ b/include/rdma/opa_addr.h
@@ -48,8 +48,21 @@
#ifndef OPA_ADDR_H
#define OPA_ADDR_H
+#include <rdma/opa_smi.h>
+
#define OPA_SPECIAL_OUI (0x00066AULL)
#define OPA_MAKE_ID(x) (cpu_to_be64(OPA_SPECIAL_OUI << 40 | (x)))
+#define OPA_TO_IB_UCAST_LID(x) (((x) >= be16_to_cpu(IB_MULTICAST_LID_BASE)) \
+ ? 0 : x)
+#define OPA_GID_INDEX 0x1
+/**
+ * 0xF8 - 4 bits of multicast range and 1 bit for collective range
+ * Example: For 24 bit LID space,
+ * Multicast range: 0xF00000 to 0xF7FFFF
+ * Collective range: 0xF80000 to 0xFFFFFE
+ */
+#define OPA_MCAST_NR 0x4 /* Number of top bits set */
+#define OPA_COLLECTIVE_NR 0x1 /* Number of bits after MCAST_NR */
/**
* ib_is_opa_gid: Returns true if the top 24 bits of the gid
@@ -59,7 +72,7 @@
*
* @gid: The Global identifier
*/
-static inline bool ib_is_opa_gid(union ib_gid *gid)
+static inline bool ib_is_opa_gid(const union ib_gid *gid)
{
return ((be64_to_cpu(gid->global.interface_id) >> 40) ==
OPA_SPECIAL_OUI);
@@ -72,8 +85,33 @@ static inline bool ib_is_opa_gid(union ib_gid *gid)
*
* @gid: The Global identifier
*/
-static inline u32 opa_get_lid_from_gid(union ib_gid *gid)
+static inline u32 opa_get_lid_from_gid(const union ib_gid *gid)
{
return be64_to_cpu(gid->global.interface_id) & 0xFFFFFFFF;
}
+
+/**
+ * opa_is_extended_lid: Returns true if dlid or slid are
+ * extended.
+ *
+ * @dlid: The DLID
+ * @slid: The SLID
+ */
+static inline bool opa_is_extended_lid(u32 dlid, u32 slid)
+{
+ if ((be32_to_cpu(dlid) >=
+ be16_to_cpu(IB_MULTICAST_LID_BASE)) ||
+ (be32_to_cpu(slid) >=
+ be16_to_cpu(IB_MULTICAST_LID_BASE)))
+ return true;
+ else
+ return false;
+}
+
+/* Get multicast lid base */
+static inline u32 opa_get_mcast_base(u32 nr_top_bits)
+{
+ return (be32_to_cpu(OPA_LID_PERMISSIVE) << (32 - nr_top_bits));
+}
+
#endif /* OPA_ADDR_H */
diff --git a/include/rdma/opa_vnic.h b/include/rdma/opa_vnic.h
index 39d6890616a6..0c07a70bd7f6 100644
--- a/include/rdma/opa_vnic.h
+++ b/include/rdma/opa_vnic.h
@@ -54,9 +54,6 @@
#include <rdma/ib_verbs.h>
-/* VNIC uses 16B header format */
-#define OPA_VNIC_L2_TYPE 0x2
-
/* 16 header bytes + 2 reserved bytes */
#define OPA_VNIC_L2_HDR_LEN (16 + 2)
diff --git a/include/rdma/rdma_netlink.h b/include/rdma/rdma_netlink.h
index 348c102cb5f6..2d878596b1e0 100644
--- a/include/rdma/rdma_netlink.h
+++ b/include/rdma/rdma_netlink.h
@@ -5,29 +5,43 @@
#include <linux/netlink.h>
#include <uapi/rdma/rdma_netlink.h>
-struct ibnl_client_cbs {
+struct rdma_nl_cbs {
+ int (*doit)(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack);
int (*dump)(struct sk_buff *skb, struct netlink_callback *nlcb);
- struct module *module;
+ u8 flags;
};
+enum rdma_nl_flags {
+ /* Require CAP_NET_ADMIN */
+ RDMA_NL_ADMIN_PERM = 1 << 0,
+};
+
+/* Define this module as providing netlink services for NETLINK_RDMA, with
+ * index _index. Since the client indexes were setup in a uapi header as an
+ * enum and we do no want to change that, the user must supply the expanded
+ * constant as well and the compiler checks they are the same.
+ */
+#define MODULE_ALIAS_RDMA_NETLINK(_index, _val) \
+ static inline void __chk_##_index(void) \
+ { \
+ BUILD_BUG_ON(_index != _val); \
+ } \
+ MODULE_ALIAS("rdma-netlink-subsys-" __stringify(_val))
+
/**
- * Add a a client to the list of IB netlink exporters.
+ * Register client in RDMA netlink.
* @index: Index of the added client
- * @nops: Number of supported ops by the added client.
* @cb_table: A table for op->callback
- *
- * Returns 0 on success or a negative error code.
*/
-int ibnl_add_client(int index, int nops,
- const struct ibnl_client_cbs cb_table[]);
+void rdma_nl_register(unsigned int index,
+ const struct rdma_nl_cbs cb_table[]);
/**
* Remove a client from IB netlink.
* @index: Index of the removed IB client.
- *
- * Returns 0 on success or a negative error code.
*/
-int ibnl_remove_client(int index);
+void rdma_nl_unregister(unsigned int index);
/**
* Put a new message in a supplied skb.
@@ -56,22 +70,32 @@ int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh,
/**
* Send the supplied skb to a specific userspace PID.
* @skb: The netlink skb
- * @nlh: Header of the netlink message to send
* @pid: Userspace netlink process ID
* Returns 0 on success or a negative error code.
*/
-int ibnl_unicast(struct sk_buff *skb, struct nlmsghdr *nlh,
- __u32 pid);
+int rdma_nl_unicast(struct sk_buff *skb, u32 pid);
+
+/**
+ * Send, with wait/1 retry, the supplied skb to a specific userspace PID.
+ * @skb: The netlink skb
+ * @pid: Userspace netlink process ID
+ * Returns 0 on success or a negative error code.
+ */
+int rdma_nl_unicast_wait(struct sk_buff *skb, __u32 pid);
/**
* Send the supplied skb to a netlink group.
* @skb: The netlink skb
- * @nlh: Header of the netlink message to send
* @group: Netlink group ID
* @flags: allocation flags
* Returns 0 on success or a negative error code.
*/
-int ibnl_multicast(struct sk_buff *skb, struct nlmsghdr *nlh,
- unsigned int group, gfp_t flags);
+int rdma_nl_multicast(struct sk_buff *skb, unsigned int group, gfp_t flags);
+/**
+ * Check if there are any listeners to the netlink group
+ * @group: the netlink group ID
+ * Returns 0 on success or a negative for no listeners.
+ */
+int rdma_nl_chk_listeners(unsigned int group);
#endif /* _RDMA_NETLINK_H */
diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
index 4878aaf7bdff..1ba84a78f1c5 100644
--- a/include/rdma/rdma_vt.h
+++ b/include/rdma/rdma_vt.h
@@ -57,11 +57,21 @@
#include <linux/list.h>
#include <linux/hash.h>
#include <rdma/ib_verbs.h>
+#include <rdma/ib_mad.h>
#include <rdma/rdmavt_mr.h>
#include <rdma/rdmavt_qp.h>
#define RVT_MAX_PKEY_VALUES 16
+#define RVT_MAX_TRAP_LEN 100 /* Limit pending trap list */
+#define RVT_MAX_TRAP_LISTS 5 /*((IB_NOTICE_TYPE_INFO & 0x0F) + 1)*/
+#define RVT_TRAP_TIMEOUT 4096 /* 4.096 usec */
+
+struct trap_list {
+ u32 list_len;
+ struct list_head list;
+};
+
struct rvt_ibport {
struct rvt_qp __rcu *qp[2];
struct ib_mad_agent *send_agent; /* agent for SMI (traps) */
@@ -75,12 +85,13 @@ struct rvt_ibport {
__be64 mkey;
u64 tid;
u32 port_cap_flags;
+ u16 port_cap3_flags;
u32 pma_sample_start;
u32 pma_sample_interval;
__be16 pma_counter_select[5];
u16 pma_tag;
u16 mkey_lease_period;
- u16 sm_lid;
+ u32 sm_lid;
u8 sm_sl;
u8 mkeyprot;
u8 subnet_timeout;
@@ -127,6 +138,13 @@ struct rvt_ibport {
u16 *pkey_table;
struct rvt_ah *sm_ah;
+
+ /*
+ * Keep a list of traps that have not been repressed. They will be
+ * resent based on trap_timer.
+ */
+ struct trap_list trap_lists[RVT_MAX_TRAP_LISTS];
+ struct timer_list trap_timer;
};
#define RVT_CQN_MAX 16 /* maximum length of cq name */
@@ -229,8 +247,7 @@ struct rvt_driver_provided {
* ERR_PTR(err). The driver is free to return NULL or a valid
* pointer.
*/
- void * (*qp_priv_alloc)(struct rvt_dev_info *rdi, struct rvt_qp *qp,
- gfp_t gfp);
+ void * (*qp_priv_alloc)(struct rvt_dev_info *rdi, struct rvt_qp *qp);
/*
* Free the driver's private qp structure.
@@ -319,7 +336,7 @@ struct rvt_driver_provided {
/* Let the driver pick the next queue pair number*/
int (*alloc_qpn)(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
- enum ib_qp_type type, u8 port_num, gfp_t gfp);
+ enum ib_qp_type type, u8 port_num);
/* Determine if its safe or allowed to modify the qp */
int (*check_modify_qp)(struct rvt_qp *qp, struct ib_qp_attr *attr,
@@ -515,7 +532,8 @@ int rvt_invalidate_rkey(struct rvt_qp *qp, u32 rkey);
int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
u32 len, u64 vaddr, u32 rkey, int acc);
int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
- struct rvt_sge *isge, struct ib_sge *sge, int acc);
+ struct rvt_sge *isge, struct rvt_sge *last_sge,
+ struct ib_sge *sge, int acc);
struct rvt_mcast *rvt_mcast_find(struct rvt_ibport *ibp, union ib_gid *mgid,
u16 lid);
diff --git a/include/rdma/rdmavt_mr.h b/include/rdma/rdmavt_mr.h
index f418bd5571a5..72a3856d4057 100644
--- a/include/rdma/rdmavt_mr.h
+++ b/include/rdma/rdmavt_mr.h
@@ -191,4 +191,7 @@ static inline void rvt_skip_sge(struct rvt_sge_state *ss, u32 length,
}
}
+bool rvt_ss_has_lkey(struct rvt_sge_state *ss, u32 lkey);
+bool rvt_mr_has_lkey(struct rvt_mregion *mr, u32 lkey);
+
#endif /* DEF_RDMAVT_INCMRH */
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index be6472e5b06b..0eed3d8752fa 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -277,7 +277,6 @@ struct rvt_qp {
unsigned long timeout_jiffies; /* computed from timeout */
- enum ib_mtu path_mtu;
int srate_mbps; /* s_srate (below) converted to Mbit/s */
pid_t pid; /* pid for user mode QPs */
u32 remote_qpn;
@@ -396,7 +395,7 @@ struct rvt_srq {
#define RVT_QPNMAP_ENTRIES (RVT_QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
#define RVT_BITS_PER_PAGE (PAGE_SIZE * BITS_PER_BYTE)
#define RVT_BITS_PER_PAGE_MASK (RVT_BITS_PER_PAGE - 1)
-#define RVT_QPN_MASK 0xFFFFFF
+#define RVT_QPN_MASK IB_QPN_MASK
/*
* QPN-map pages start out as NULL, they get allocated upon
@@ -647,6 +646,20 @@ static inline u32 rvt_div_mtu(struct rvt_qp *qp, u32 len)
return len >> qp->log_pmtu;
}
+/**
+ * rvt_timeout_to_jiffies - Convert a ULP timeout input into jiffies
+ * @timeout - timeout input(0 - 31).
+ *
+ * Return a timeout value in jiffies.
+ */
+static inline unsigned long rvt_timeout_to_jiffies(u8 timeout)
+{
+ if (timeout > 31)
+ timeout = 31;
+
+ return usecs_to_jiffies(1U << timeout) * 4096UL / 1000UL;
+}
+
extern const int ib_rvt_state_ops[];
struct rvt_dev_info;
@@ -660,4 +673,34 @@ void rvt_del_timers_sync(struct rvt_qp *qp);
void rvt_stop_rc_timers(struct rvt_qp *qp);
void rvt_add_retry_timer(struct rvt_qp *qp);
+/**
+ * struct rvt_qp_iter - the iterator for QPs
+ * @qp - the current QP
+ *
+ * This structure defines the current iterator
+ * state for sequenced access to all QPs relative
+ * to an rvt_dev_info.
+ */
+struct rvt_qp_iter {
+ struct rvt_qp *qp;
+ /* private: backpointer */
+ struct rvt_dev_info *rdi;
+ /* private: callback routine */
+ void (*cb)(struct rvt_qp *qp, u64 v);
+ /* private: for arg to callback routine */
+ u64 v;
+ /* private: number of SMI,GSI QPs for device */
+ int specials;
+ /* private: current iterator index */
+ int n;
+};
+
+struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi,
+ u64 v,
+ void (*cb)(struct rvt_qp *qp, u64 v));
+int rvt_qp_iter_next(struct rvt_qp_iter *iter);
+void rvt_qp_iter(struct rvt_dev_info *rdi,
+ u64 v,
+ void (*cb)(struct rvt_qp *qp, u64 v));
+void rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey);
#endif /* DEF_RDMAVT_INCQP_H */
diff --git a/include/rdma/uverbs_ioctl.h b/include/rdma/uverbs_ioctl.h
new file mode 100644
index 000000000000..6da44079aa58
--- /dev/null
+++ b/include/rdma/uverbs_ioctl.h
@@ -0,0 +1,438 @@
+/*
+ * Copyright (c) 2017, Mellanox Technologies inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _UVERBS_IOCTL_
+#define _UVERBS_IOCTL_
+
+#include <rdma/uverbs_types.h>
+#include <linux/uaccess.h>
+#include <rdma/rdma_user_ioctl.h>
+#include <rdma/ib_user_ioctl_verbs.h>
+
+/*
+ * =======================================
+ * Verbs action specifications
+ * =======================================
+ */
+
+enum uverbs_attr_type {
+ UVERBS_ATTR_TYPE_NA,
+ UVERBS_ATTR_TYPE_PTR_IN,
+ UVERBS_ATTR_TYPE_PTR_OUT,
+ UVERBS_ATTR_TYPE_IDR,
+ UVERBS_ATTR_TYPE_FD,
+};
+
+enum uverbs_obj_access {
+ UVERBS_ACCESS_READ,
+ UVERBS_ACCESS_WRITE,
+ UVERBS_ACCESS_NEW,
+ UVERBS_ACCESS_DESTROY
+};
+
+enum {
+ UVERBS_ATTR_SPEC_F_MANDATORY = 1U << 0,
+ /* Support extending attributes by length */
+ UVERBS_ATTR_SPEC_F_MIN_SZ = 1U << 1,
+};
+
+struct uverbs_attr_spec {
+ enum uverbs_attr_type type;
+ union {
+ u16 len;
+ struct {
+ /*
+ * higher bits mean the namespace and lower bits mean
+ * the type id within the namespace.
+ */
+ u16 obj_type;
+ u8 access;
+ } obj;
+ };
+ /* Combination of bits from enum UVERBS_ATTR_SPEC_F_XXXX */
+ u8 flags;
+};
+
+struct uverbs_attr_spec_hash {
+ size_t num_attrs;
+ unsigned long *mandatory_attrs_bitmask;
+ struct uverbs_attr_spec attrs[0];
+};
+
+struct uverbs_attr_bundle;
+struct ib_uverbs_file;
+
+enum {
+ /*
+ * Action marked with this flag creates a context (or root for all
+ * objects).
+ */
+ UVERBS_ACTION_FLAG_CREATE_ROOT = 1U << 0,
+};
+
+struct uverbs_method_spec {
+ /* Combination of bits from enum UVERBS_ACTION_FLAG_XXXX */
+ u32 flags;
+ size_t num_buckets;
+ size_t num_child_attrs;
+ int (*handler)(struct ib_device *ib_dev, struct ib_uverbs_file *ufile,
+ struct uverbs_attr_bundle *ctx);
+ struct uverbs_attr_spec_hash *attr_buckets[0];
+};
+
+struct uverbs_method_spec_hash {
+ size_t num_methods;
+ struct uverbs_method_spec *methods[0];
+};
+
+struct uverbs_object_spec {
+ const struct uverbs_obj_type *type_attrs;
+ size_t num_buckets;
+ struct uverbs_method_spec_hash *method_buckets[0];
+};
+
+struct uverbs_object_spec_hash {
+ size_t num_objects;
+ struct uverbs_object_spec *objects[0];
+};
+
+struct uverbs_root_spec {
+ size_t num_buckets;
+ struct uverbs_object_spec_hash *object_buckets[0];
+};
+
+/*
+ * =======================================
+ * Verbs definitions
+ * =======================================
+ */
+
+struct uverbs_attr_def {
+ u16 id;
+ struct uverbs_attr_spec attr;
+};
+
+struct uverbs_method_def {
+ u16 id;
+ /* Combination of bits from enum UVERBS_ACTION_FLAG_XXXX */
+ u32 flags;
+ size_t num_attrs;
+ const struct uverbs_attr_def * const (*attrs)[];
+ int (*handler)(struct ib_device *ib_dev, struct ib_uverbs_file *ufile,
+ struct uverbs_attr_bundle *ctx);
+};
+
+struct uverbs_object_def {
+ u16 id;
+ const struct uverbs_obj_type *type_attrs;
+ size_t num_methods;
+ const struct uverbs_method_def * const (*methods)[];
+};
+
+struct uverbs_object_tree_def {
+ size_t num_objects;
+ const struct uverbs_object_def * const (*objects)[];
+};
+
+#define UA_FLAGS(_flags) .flags = _flags
+#define __UVERBS_ATTR0(_id, _len, _type, ...) \
+ ((const struct uverbs_attr_def) \
+ {.id = _id, .attr = {.type = _type, {.len = _len}, .flags = 0, } })
+#define __UVERBS_ATTR1(_id, _len, _type, _flags) \
+ ((const struct uverbs_attr_def) \
+ {.id = _id, .attr = {.type = _type, {.len = _len}, _flags, } })
+#define __UVERBS_ATTR(_id, _len, _type, _flags, _n, ...) \
+ __UVERBS_ATTR##_n(_id, _len, _type, _flags)
+/*
+ * In new compiler, UVERBS_ATTR could be simplified by declaring it as
+ * [_id] = {.type = _type, .len = _len, ##__VA_ARGS__}
+ * But since we support older compilers too, we need the more complex code.
+ */
+#define UVERBS_ATTR(_id, _len, _type, ...) \
+ __UVERBS_ATTR(_id, _len, _type, ##__VA_ARGS__, 1, 0)
+#define UVERBS_ATTR_PTR_IN_SZ(_id, _len, ...) \
+ UVERBS_ATTR(_id, _len, UVERBS_ATTR_TYPE_PTR_IN, ##__VA_ARGS__)
+/* If sizeof(_type) <= sizeof(u64), this will be inlined rather than a pointer */
+#define UVERBS_ATTR_PTR_IN(_id, _type, ...) \
+ UVERBS_ATTR_PTR_IN_SZ(_id, sizeof(_type), ##__VA_ARGS__)
+#define UVERBS_ATTR_PTR_OUT_SZ(_id, _len, ...) \
+ UVERBS_ATTR(_id, _len, UVERBS_ATTR_TYPE_PTR_OUT, ##__VA_ARGS__)
+#define UVERBS_ATTR_PTR_OUT(_id, _type, ...) \
+ UVERBS_ATTR_PTR_OUT_SZ(_id, sizeof(_type), ##__VA_ARGS__)
+
+/*
+ * In new compiler, UVERBS_ATTR_IDR (and FD) could be simplified by declaring
+ * it as
+ * {.id = _id, \
+ * .attr {.type = __obj_class, \
+ * .obj = {.obj_type = _idr_type, \
+ * .access = _access \
+ * }, ##__VA_ARGS__ } }
+ * But since we support older compilers too, we need the more complex code.
+ */
+#define ___UVERBS_ATTR_OBJ0(_id, _obj_class, _obj_type, _access, ...)\
+ ((const struct uverbs_attr_def) \
+ {.id = _id, \
+ .attr = {.type = _obj_class, \
+ {.obj = {.obj_type = _obj_type, .access = _access } },\
+ .flags = 0} })
+#define ___UVERBS_ATTR_OBJ1(_id, _obj_class, _obj_type, _access, _flags)\
+ ((const struct uverbs_attr_def) \
+ {.id = _id, \
+ .attr = {.type = _obj_class, \
+ {.obj = {.obj_type = _obj_type, .access = _access} }, \
+ _flags} })
+#define ___UVERBS_ATTR_OBJ(_id, _obj_class, _obj_type, _access, _flags, \
+ _n, ...) \
+ ___UVERBS_ATTR_OBJ##_n(_id, _obj_class, _obj_type, _access, _flags)
+#define __UVERBS_ATTR_OBJ(_id, _obj_class, _obj_type, _access, ...) \
+ ___UVERBS_ATTR_OBJ(_id, _obj_class, _obj_type, _access, \
+ ##__VA_ARGS__, 1, 0)
+#define UVERBS_ATTR_IDR(_id, _idr_type, _access, ...) \
+ __UVERBS_ATTR_OBJ(_id, UVERBS_ATTR_TYPE_IDR, _idr_type, _access,\
+ ##__VA_ARGS__)
+#define UVERBS_ATTR_FD(_id, _fd_type, _access, ...) \
+ __UVERBS_ATTR_OBJ(_id, UVERBS_ATTR_TYPE_FD, _fd_type, \
+ (_access) + BUILD_BUG_ON_ZERO( \
+ (_access) != UVERBS_ACCESS_NEW && \
+ (_access) != UVERBS_ACCESS_READ), \
+ ##__VA_ARGS__)
+#define DECLARE_UVERBS_ATTR_SPEC(_name, ...) \
+ const struct uverbs_attr_def _name = __VA_ARGS__
+
+#define _UVERBS_METHOD_ATTRS_SZ(...) \
+ (sizeof((const struct uverbs_attr_def * const []){__VA_ARGS__}) /\
+ sizeof(const struct uverbs_attr_def *))
+#define _UVERBS_METHOD(_id, _handler, _flags, ...) \
+ ((const struct uverbs_method_def) { \
+ .id = _id, \
+ .flags = _flags, \
+ .handler = _handler, \
+ .num_attrs = _UVERBS_METHOD_ATTRS_SZ(__VA_ARGS__), \
+ .attrs = &(const struct uverbs_attr_def * const []){__VA_ARGS__} })
+#define DECLARE_UVERBS_METHOD(_name, _id, _handler, ...) \
+ const struct uverbs_method_def _name = \
+ _UVERBS_METHOD(_id, _handler, 0, ##__VA_ARGS__)
+#define DECLARE_UVERBS_CTX_METHOD(_name, _id, _handler, _flags, ...) \
+ const struct uverbs_method_def _name = \
+ _UVERBS_METHOD(_id, _handler, \
+ UVERBS_ACTION_FLAG_CREATE_ROOT, \
+ ##__VA_ARGS__)
+#define _UVERBS_OBJECT_METHODS_SZ(...) \
+ (sizeof((const struct uverbs_method_def * const []){__VA_ARGS__}) / \
+ sizeof(const struct uverbs_method_def *))
+#define _UVERBS_OBJECT(_id, _type_attrs, ...) \
+ ((const struct uverbs_object_def) { \
+ .id = _id, \
+ .type_attrs = _type_attrs, \
+ .num_methods = _UVERBS_OBJECT_METHODS_SZ(__VA_ARGS__), \
+ .methods = &(const struct uverbs_method_def * const []){__VA_ARGS__} })
+#define DECLARE_UVERBS_OBJECT(_name, _id, _type_attrs, ...) \
+ const struct uverbs_object_def _name = \
+ _UVERBS_OBJECT(_id, _type_attrs, ##__VA_ARGS__)
+#define _UVERBS_TREE_OBJECTS_SZ(...) \
+ (sizeof((const struct uverbs_object_def * const []){__VA_ARGS__}) / \
+ sizeof(const struct uverbs_object_def *))
+#define _UVERBS_OBJECT_TREE(...) \
+ ((const struct uverbs_object_tree_def) { \
+ .num_objects = _UVERBS_TREE_OBJECTS_SZ(__VA_ARGS__), \
+ .objects = &(const struct uverbs_object_def * const []){__VA_ARGS__} })
+#define DECLARE_UVERBS_OBJECT_TREE(_name, ...) \
+ const struct uverbs_object_tree_def _name = \
+ _UVERBS_OBJECT_TREE(__VA_ARGS__)
+
+/* =================================================
+ * Parsing infrastructure
+ * =================================================
+ */
+
+struct uverbs_ptr_attr {
+ union {
+ u64 data;
+ void __user *ptr;
+ };
+ u16 len;
+ /* Combination of bits from enum UVERBS_ATTR_F_XXXX */
+ u16 flags;
+};
+
+struct uverbs_obj_attr {
+ /* pointer to the kernel descriptor -> type, access, etc */
+ const struct uverbs_obj_type *type;
+ struct ib_uobject *uobject;
+ /* fd or id in idr of this object */
+ int id;
+};
+
+struct uverbs_attr {
+ /*
+ * pointer to the user-space given attribute, in order to write the
+ * new uobject's id or update flags.
+ */
+ struct ib_uverbs_attr __user *uattr;
+ union {
+ struct uverbs_ptr_attr ptr_attr;
+ struct uverbs_obj_attr obj_attr;
+ };
+};
+
+struct uverbs_attr_bundle_hash {
+ /* if bit i is set, it means attrs[i] contains valid information */
+ unsigned long *valid_bitmap;
+ size_t num_attrs;
+ /*
+ * arrays of attributes, each element corresponds to the specification
+ * of the attribute in the same index.
+ */
+ struct uverbs_attr *attrs;
+};
+
+struct uverbs_attr_bundle {
+ size_t num_buckets;
+ struct uverbs_attr_bundle_hash hash[];
+};
+
+static inline bool uverbs_attr_is_valid_in_hash(const struct uverbs_attr_bundle_hash *attrs_hash,
+ unsigned int idx)
+{
+ return test_bit(idx, attrs_hash->valid_bitmap);
+}
+
+static inline bool uverbs_attr_is_valid(const struct uverbs_attr_bundle *attrs_bundle,
+ unsigned int idx)
+{
+ u16 idx_bucket = idx >> UVERBS_ID_NS_SHIFT;
+
+ if (attrs_bundle->num_buckets <= idx_bucket)
+ return false;
+
+ return uverbs_attr_is_valid_in_hash(&attrs_bundle->hash[idx_bucket],
+ idx & ~UVERBS_ID_NS_MASK);
+}
+
+static inline const struct uverbs_attr *uverbs_attr_get(const struct uverbs_attr_bundle *attrs_bundle,
+ u16 idx)
+{
+ u16 idx_bucket = idx >> UVERBS_ID_NS_SHIFT;
+
+ if (!uverbs_attr_is_valid(attrs_bundle, idx))
+ return ERR_PTR(-ENOENT);
+
+ return &attrs_bundle->hash[idx_bucket].attrs[idx & ~UVERBS_ID_NS_MASK];
+}
+
+static inline int uverbs_copy_to(const struct uverbs_attr_bundle *attrs_bundle,
+ size_t idx, const void *from)
+{
+ const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle, idx);
+ u16 flags;
+
+ if (IS_ERR(attr))
+ return PTR_ERR(attr);
+
+ flags = attr->ptr_attr.flags | UVERBS_ATTR_F_VALID_OUTPUT;
+ return (!copy_to_user(attr->ptr_attr.ptr, from, attr->ptr_attr.len) &&
+ !put_user(flags, &attr->uattr->flags)) ? 0 : -EFAULT;
+}
+
+static inline int _uverbs_copy_from(void *to, size_t to_size,
+ const struct uverbs_attr_bundle *attrs_bundle,
+ size_t idx)
+{
+ const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle, idx);
+
+ if (IS_ERR(attr))
+ return PTR_ERR(attr);
+
+ if (to_size <= sizeof(((struct ib_uverbs_attr *)0)->data))
+ memcpy(to, &attr->ptr_attr.data, attr->ptr_attr.len);
+ else if (copy_from_user(to, attr->ptr_attr.ptr, attr->ptr_attr.len))
+ return -EFAULT;
+
+ return 0;
+}
+
+#define uverbs_copy_from(to, attrs_bundle, idx) \
+ _uverbs_copy_from(to, sizeof(*(to)), attrs_bundle, idx)
+
+/* =================================================
+ * Definitions -> Specs infrastructure
+ * =================================================
+ */
+
+/*
+ * uverbs_alloc_spec_tree - Merges different common and driver specific feature
+ * into one parsing tree that every uverbs command will be parsed upon.
+ *
+ * @num_trees: Number of trees in the array @trees.
+ * @trees: Array of pointers to tree root definitions to merge. Each such tree
+ * possibly contains objects, methods and attributes definitions.
+ *
+ * Returns:
+ * uverbs_root_spec *: The root of the merged parsing tree.
+ * On error, we return an error code. Error is checked via IS_ERR.
+ *
+ * The following merges could take place:
+ * a. Two trees representing the same method with different handler
+ * -> We take the handler of the tree that its handler != NULL
+ * and its index in the trees array is greater. The incentive for that
+ * is that developers are expected to first merge common trees and then
+ * merge trees that gives specialized the behaviour.
+ * b. Two trees representing the same object with different
+ * type_attrs (struct uverbs_obj_type):
+ * -> We take the type_attrs of the tree that its type_attr != NULL
+ * and its index in the trees array is greater. This could be used
+ * in order to override the free function, allocation size, etc.
+ * c. Two trees representing the same method attribute (same id but possibly
+ * different attributes):
+ * -> ERROR (-ENOENT), we believe that's not the programmer's intent.
+ *
+ * An object without any methods is considered invalid and will abort the
+ * function with -ENOENT error.
+ */
+#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
+struct uverbs_root_spec *uverbs_alloc_spec_tree(unsigned int num_trees,
+ const struct uverbs_object_tree_def **trees);
+void uverbs_free_spec_tree(struct uverbs_root_spec *root);
+#else
+static inline struct uverbs_root_spec *uverbs_alloc_spec_tree(unsigned int num_trees,
+ const struct uverbs_object_tree_def **trees)
+{
+ return NULL;
+}
+
+static inline void uverbs_free_spec_tree(struct uverbs_root_spec *root)
+{
+}
+#endif
+
+#endif
diff --git a/include/rdma/uverbs_std_types.h b/include/rdma/uverbs_std_types.h
index 7771ce966952..5f8e20bbd67c 100644
--- a/include/rdma/uverbs_std_types.h
+++ b/include/rdma/uverbs_std_types.h
@@ -34,19 +34,35 @@
#define _UVERBS_STD_TYPES__
#include <rdma/uverbs_types.h>
-
-extern const struct uverbs_obj_fd_type uverbs_type_attrs_comp_channel;
-extern const struct uverbs_obj_idr_type uverbs_type_attrs_cq;
-extern const struct uverbs_obj_idr_type uverbs_type_attrs_qp;
-extern const struct uverbs_obj_idr_type uverbs_type_attrs_rwq_ind_table;
-extern const struct uverbs_obj_idr_type uverbs_type_attrs_wq;
-extern const struct uverbs_obj_idr_type uverbs_type_attrs_srq;
-extern const struct uverbs_obj_idr_type uverbs_type_attrs_ah;
-extern const struct uverbs_obj_idr_type uverbs_type_attrs_flow;
-extern const struct uverbs_obj_idr_type uverbs_type_attrs_mr;
-extern const struct uverbs_obj_idr_type uverbs_type_attrs_mw;
-extern const struct uverbs_obj_idr_type uverbs_type_attrs_pd;
-extern const struct uverbs_obj_idr_type uverbs_type_attrs_xrcd;
+#include <rdma/uverbs_ioctl.h>
+#include <rdma/ib_user_ioctl_verbs.h>
+
+#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
+extern const struct uverbs_object_def uverbs_object_comp_channel;
+extern const struct uverbs_object_def uverbs_object_cq;
+extern const struct uverbs_object_def uverbs_object_qp;
+extern const struct uverbs_object_def uverbs_object_rwq_ind_table;
+extern const struct uverbs_object_def uverbs_object_wq;
+extern const struct uverbs_object_def uverbs_object_srq;
+extern const struct uverbs_object_def uverbs_object_ah;
+extern const struct uverbs_object_def uverbs_object_flow;
+extern const struct uverbs_object_def uverbs_object_mr;
+extern const struct uverbs_object_def uverbs_object_mw;
+extern const struct uverbs_object_def uverbs_object_pd;
+extern const struct uverbs_object_def uverbs_object_xrcd;
+extern const struct uverbs_object_def uverbs_object_device;
+
+extern const struct uverbs_object_tree_def uverbs_default_objects;
+static inline const struct uverbs_object_tree_def *uverbs_default_get_objects(void)
+{
+ return &uverbs_default_objects;
+}
+#else
+static inline const struct uverbs_object_tree_def *uverbs_default_get_objects(void)
+{
+ return NULL;
+}
+#endif
static inline struct ib_uobject *__uobj_get(const struct uverbs_obj_type *type,
bool write,
@@ -56,22 +72,22 @@ static inline struct ib_uobject *__uobj_get(const struct uverbs_obj_type *type,
return rdma_lookup_get_uobject(type, ucontext, id, write);
}
-#define uobj_get_type(_type) uverbs_type_attrs_##_type.type
+#define uobj_get_type(_object) uverbs_object_##_object.type_attrs
#define uobj_get_read(_type, _id, _ucontext) \
- __uobj_get(&(_type), false, _ucontext, _id)
+ __uobj_get(_type, false, _ucontext, _id)
-#define uobj_get_obj_read(_type, _id, _ucontext) \
+#define uobj_get_obj_read(_object, _id, _ucontext) \
({ \
- struct ib_uobject *uobj = \
- __uobj_get(&uobj_get_type(_type), \
+ struct ib_uobject *__uobj = \
+ __uobj_get(uverbs_object_##_object.type_attrs, \
false, _ucontext, _id); \
\
- (struct ib_##_type *)(IS_ERR(uobj) ? NULL : uobj->object); \
+ (struct ib_##_object *)(IS_ERR(__uobj) ? NULL : __uobj->object);\
})
#define uobj_get_write(_type, _id, _ucontext) \
- __uobj_get(&(_type), true, _ucontext, _id)
+ __uobj_get(_type, true, _ucontext, _id)
static inline void uobj_put_read(struct ib_uobject *uobj)
{
@@ -108,7 +124,7 @@ static inline struct ib_uobject *__uobj_alloc(const struct uverbs_obj_type *type
}
#define uobj_alloc(_type, ucontext) \
- __uobj_alloc(&(_type), ucontext)
+ __uobj_alloc(_type, ucontext)
#endif
diff --git a/include/rdma/uverbs_types.h b/include/rdma/uverbs_types.h
index 351ea185df44..cc04ec65588d 100644
--- a/include/rdma/uverbs_types.h
+++ b/include/rdma/uverbs_types.h
@@ -129,6 +129,7 @@ struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_obj_type *type,
void rdma_alloc_abort_uobject(struct ib_uobject *uobj);
int __must_check rdma_remove_commit_uobject(struct ib_uobject *uobj);
int rdma_alloc_commit_uobject(struct ib_uobject *uobj);
+int rdma_explicit_destroy(struct ib_uobject *uobject);
struct uverbs_obj_fd_type {
/*
@@ -151,22 +152,30 @@ extern const struct uverbs_obj_type_class uverbs_fd_class;
#define UVERBS_BUILD_BUG_ON(cond) (sizeof(char[1 - 2 * !!(cond)]) - \
sizeof(char))
-#define UVERBS_TYPE_ALLOC_FD(_size, _order) \
- { \
- .destroy_order = _order, \
- .type_class = &uverbs_fd_class, \
- .obj_size = (_size) + \
- UVERBS_BUILD_BUG_ON((_size) < \
- sizeof(struct ib_uobject_file)),\
- }
-#define UVERBS_TYPE_ALLOC_IDR_SZ(_size, _order) \
- { \
+#define UVERBS_TYPE_ALLOC_FD(_order, _obj_size, _context_closed, _fops, _name, _flags)\
+ ((&((const struct uverbs_obj_fd_type) \
+ {.type = { \
+ .destroy_order = _order, \
+ .type_class = &uverbs_fd_class, \
+ .obj_size = (_obj_size) + \
+ UVERBS_BUILD_BUG_ON((_obj_size) < sizeof(struct ib_uobject_file)), \
+ }, \
+ .context_closed = _context_closed, \
+ .fops = _fops, \
+ .name = _name, \
+ .flags = _flags}))->type)
+#define UVERBS_TYPE_ALLOC_IDR_SZ(_size, _order, _destroy_object) \
+ ((&((const struct uverbs_obj_idr_type) \
+ {.type = { \
.destroy_order = _order, \
.type_class = &uverbs_idr_class, \
.obj_size = (_size) + \
- UVERBS_BUILD_BUG_ON((_size) < \
- sizeof(struct ib_uobject)), \
- }
-#define UVERBS_TYPE_ALLOC_IDR(_order) \
- UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uobject), _order)
+ UVERBS_BUILD_BUG_ON((_size) < \
+ sizeof(struct ib_uobject)) \
+ }, \
+ .destroy_object = _destroy_object,}))->type)
+#define UVERBS_TYPE_ALLOC_IDR(_order, _destroy_object) \
+ UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uobject), _order, \
+ _destroy_object)
+
#endif
diff --git a/include/rxrpc/packet.h b/include/rxrpc/packet.h
deleted file mode 100644
index a2dcfb850b9f..000000000000
--- a/include/rxrpc/packet.h
+++ /dev/null
@@ -1,235 +0,0 @@
-/* packet.h: Rx packet layout and definitions
- *
- * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _LINUX_RXRPC_PACKET_H
-#define _LINUX_RXRPC_PACKET_H
-
-typedef u32 rxrpc_seq_t; /* Rx message sequence number */
-typedef u32 rxrpc_serial_t; /* Rx message serial number */
-typedef __be32 rxrpc_seq_net_t; /* on-the-wire Rx message sequence number */
-typedef __be32 rxrpc_serial_net_t; /* on-the-wire Rx message serial number */
-
-/*****************************************************************************/
-/*
- * on-the-wire Rx packet header
- * - all multibyte fields should be in network byte order
- */
-struct rxrpc_wire_header {
- __be32 epoch; /* client boot timestamp */
-#define RXRPC_RANDOM_EPOCH 0x80000000 /* Random if set, date-based if not */
-
- __be32 cid; /* connection and channel ID */
-#define RXRPC_MAXCALLS 4 /* max active calls per conn */
-#define RXRPC_CHANNELMASK (RXRPC_MAXCALLS-1) /* mask for channel ID */
-#define RXRPC_CIDMASK (~RXRPC_CHANNELMASK) /* mask for connection ID */
-#define RXRPC_CIDSHIFT ilog2(RXRPC_MAXCALLS) /* shift for connection ID */
-#define RXRPC_CID_INC (1 << RXRPC_CIDSHIFT) /* connection ID increment */
-
- __be32 callNumber; /* call ID (0 for connection-level packets) */
- __be32 seq; /* sequence number of pkt in call stream */
- __be32 serial; /* serial number of pkt sent to network */
-
- uint8_t type; /* packet type */
-#define RXRPC_PACKET_TYPE_DATA 1 /* data */
-#define RXRPC_PACKET_TYPE_ACK 2 /* ACK */
-#define RXRPC_PACKET_TYPE_BUSY 3 /* call reject */
-#define RXRPC_PACKET_TYPE_ABORT 4 /* call/connection abort */
-#define RXRPC_PACKET_TYPE_ACKALL 5 /* ACK all outstanding packets on call */
-#define RXRPC_PACKET_TYPE_CHALLENGE 6 /* connection security challenge (SRVR->CLNT) */
-#define RXRPC_PACKET_TYPE_RESPONSE 7 /* connection secutity response (CLNT->SRVR) */
-#define RXRPC_PACKET_TYPE_DEBUG 8 /* debug info request */
-#define RXRPC_PACKET_TYPE_VERSION 13 /* version string request */
-#define RXRPC_N_PACKET_TYPES 14 /* number of packet types (incl type 0) */
-
- uint8_t flags; /* packet flags */
-#define RXRPC_CLIENT_INITIATED 0x01 /* signifies a packet generated by a client */
-#define RXRPC_REQUEST_ACK 0x02 /* request an unconditional ACK of this packet */
-#define RXRPC_LAST_PACKET 0x04 /* the last packet from this side for this call */
-#define RXRPC_MORE_PACKETS 0x08 /* more packets to come */
-#define RXRPC_JUMBO_PACKET 0x20 /* [DATA] this is a jumbo packet */
-#define RXRPC_SLOW_START_OK 0x20 /* [ACK] slow start supported */
-
- uint8_t userStatus; /* app-layer defined status */
-#define RXRPC_USERSTATUS_SERVICE_UPGRADE 0x01 /* AuriStor service upgrade request */
-
- uint8_t securityIndex; /* security protocol ID */
- union {
- __be16 _rsvd; /* reserved */
- __be16 cksum; /* kerberos security checksum */
- };
- __be16 serviceId; /* service ID */
-
-} __packed;
-
-#define RXRPC_SUPPORTED_PACKET_TYPES ( \
- (1 << RXRPC_PACKET_TYPE_DATA) | \
- (1 << RXRPC_PACKET_TYPE_ACK) | \
- (1 << RXRPC_PACKET_TYPE_BUSY) | \
- (1 << RXRPC_PACKET_TYPE_ABORT) | \
- (1 << RXRPC_PACKET_TYPE_ACKALL) | \
- (1 << RXRPC_PACKET_TYPE_CHALLENGE) | \
- (1 << RXRPC_PACKET_TYPE_RESPONSE) | \
- /*(1 << RXRPC_PACKET_TYPE_DEBUG) | */ \
- (1 << RXRPC_PACKET_TYPE_VERSION))
-
-/*****************************************************************************/
-/*
- * jumbo packet secondary header
- * - can be mapped to read header by:
- * - new_serial = serial + 1
- * - new_seq = seq + 1
- * - new_flags = j_flags
- * - new__rsvd = j__rsvd
- * - duplicating all other fields
- */
-struct rxrpc_jumbo_header {
- uint8_t flags; /* packet flags (as per rxrpc_header) */
- uint8_t pad;
- union {
- __be16 _rsvd; /* reserved */
- __be16 cksum; /* kerberos security checksum */
- };
-};
-
-#define RXRPC_JUMBO_DATALEN 1412 /* non-terminal jumbo packet data length */
-#define RXRPC_JUMBO_SUBPKTLEN (RXRPC_JUMBO_DATALEN + sizeof(struct rxrpc_jumbo_header))
-
-/*****************************************************************************/
-/*
- * on-the-wire Rx ACK packet data payload
- * - all multibyte fields should be in network byte order
- */
-struct rxrpc_ackpacket {
- __be16 bufferSpace; /* number of packet buffers available */
- __be16 maxSkew; /* diff between serno being ACK'd and highest serial no
- * received */
- __be32 firstPacket; /* sequence no of first ACK'd packet in attached list */
- __be32 previousPacket; /* sequence no of previous packet received */
- __be32 serial; /* serial no of packet that prompted this ACK */
-
- uint8_t reason; /* reason for ACK */
-#define RXRPC_ACK_REQUESTED 1 /* ACK was requested on packet */
-#define RXRPC_ACK_DUPLICATE 2 /* duplicate packet received */
-#define RXRPC_ACK_OUT_OF_SEQUENCE 3 /* out of sequence packet received */
-#define RXRPC_ACK_EXCEEDS_WINDOW 4 /* packet received beyond end of ACK window */
-#define RXRPC_ACK_NOSPACE 5 /* packet discarded due to lack of buffer space */
-#define RXRPC_ACK_PING 6 /* keep alive ACK */
-#define RXRPC_ACK_PING_RESPONSE 7 /* response to RXRPC_ACK_PING */
-#define RXRPC_ACK_DELAY 8 /* nothing happened since received packet */
-#define RXRPC_ACK_IDLE 9 /* ACK due to fully received ACK window */
-#define RXRPC_ACK__INVALID 10 /* Representation of invalid ACK reason */
-
- uint8_t nAcks; /* number of ACKs */
-#define RXRPC_MAXACKS 255
-
- uint8_t acks[0]; /* list of ACK/NAKs */
-#define RXRPC_ACK_TYPE_NACK 0
-#define RXRPC_ACK_TYPE_ACK 1
-
-} __packed;
-
-/* Some ACKs refer to specific packets and some are general and can be updated. */
-#define RXRPC_ACK_UPDATEABLE ((1 << RXRPC_ACK_REQUESTED) | \
- (1 << RXRPC_ACK_PING_RESPONSE) | \
- (1 << RXRPC_ACK_DELAY) | \
- (1 << RXRPC_ACK_IDLE))
-
-
-/*
- * ACK packets can have a further piece of information tagged on the end
- */
-struct rxrpc_ackinfo {
- __be32 rxMTU; /* maximum Rx MTU size (bytes) [AFS 3.3] */
- __be32 maxMTU; /* maximum interface MTU size (bytes) [AFS 3.3] */
- __be32 rwind; /* Rx window size (packets) [AFS 3.4] */
- __be32 jumbo_max; /* max packets to stick into a jumbo packet [AFS 3.5] */
-};
-
-/*****************************************************************************/
-/*
- * Kerberos security type-2 challenge packet
- */
-struct rxkad_challenge {
- __be32 version; /* version of this challenge type */
- __be32 nonce; /* encrypted random number */
- __be32 min_level; /* minimum security level */
- __be32 __padding; /* padding to 8-byte boundary */
-} __packed;
-
-/*****************************************************************************/
-/*
- * Kerberos security type-2 response packet
- */
-struct rxkad_response {
- __be32 version; /* version of this response type */
- __be32 __pad;
-
- /* encrypted bit of the response */
- struct {
- __be32 epoch; /* current epoch */
- __be32 cid; /* parent connection ID */
- __be32 checksum; /* checksum */
- __be32 securityIndex; /* security type */
- __be32 call_id[4]; /* encrypted call IDs */
- __be32 inc_nonce; /* challenge nonce + 1 */
- __be32 level; /* desired level */
- } encrypted;
-
- __be32 kvno; /* Kerberos key version number */
- __be32 ticket_len; /* Kerberos ticket length */
-} __packed;
-
-/*****************************************************************************/
-/*
- * RxRPC-level abort codes
- */
-#define RX_CALL_DEAD -1 /* call/conn has been inactive and is shut down */
-#define RX_INVALID_OPERATION -2 /* invalid operation requested / attempted */
-#define RX_CALL_TIMEOUT -3 /* call timeout exceeded */
-#define RX_EOF -4 /* unexpected end of data on read op */
-#define RX_PROTOCOL_ERROR -5 /* low-level protocol error */
-#define RX_USER_ABORT -6 /* generic user abort */
-#define RX_ADDRINUSE -7 /* UDP port in use */
-#define RX_DEBUGI_BADTYPE -8 /* bad debugging packet type */
-
-/*
- * (un)marshalling abort codes (rxgen)
- */
-#define RXGEN_CC_MARSHAL -450
-#define RXGEN_CC_UNMARSHAL -451
-#define RXGEN_SS_MARSHAL -452
-#define RXGEN_SS_UNMARSHAL -453
-#define RXGEN_DECODE -454
-#define RXGEN_OPCODE -455
-#define RXGEN_SS_XDRFREE -456
-#define RXGEN_CC_XDRFREE -457
-
-/*
- * Rx kerberos security abort codes
- * - unfortunately we have no generalised security abort codes to say things
- * like "unsupported security", so we have to use these instead and hope the
- * other side understands
- */
-#define RXKADINCONSISTENCY 19270400 /* security module structure inconsistent */
-#define RXKADPACKETSHORT 19270401 /* packet too short for security challenge */
-#define RXKADLEVELFAIL 19270402 /* security level negotiation failed */
-#define RXKADTICKETLEN 19270403 /* ticket length too short or too long */
-#define RXKADOUTOFSEQUENCE 19270404 /* packet had bad sequence number */
-#define RXKADNOAUTH 19270405 /* caller not authorised */
-#define RXKADBADKEY 19270406 /* illegal key: bad parity or weak */
-#define RXKADBADTICKET 19270407 /* security object was passed a bad ticket */
-#define RXKADUNKNOWNKEY 19270408 /* ticket contained unknown key version number */
-#define RXKADEXPIRED 19270409 /* authentication expired */
-#define RXKADSEALEDINCON 19270410 /* sealed data inconsistent */
-#define RXKADDATALEN 19270411 /* user data too long */
-#define RXKADILLEGALLEVEL 19270412 /* caller not authorised to use encrypted conns */
-
-#endif /* _LINUX_RXRPC_PACKET_H */
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index a1266d318c85..6af198d8120b 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -57,6 +57,7 @@ struct scsi_pointer {
/* for scmd->flags */
#define SCMD_TAGGED (1 << 0)
#define SCMD_UNCHECKED_ISA_DMA (1 << 1)
+#define SCMD_ZONE_WRITE_LOCK (1 << 2)
struct scsi_cmnd {
struct scsi_request req;
diff --git a/include/sound/omap-hdmi-audio.h b/include/sound/omap-hdmi-audio.h
index 1df2ff61a4dd..0e495ed8872e 100644
--- a/include/sound/omap-hdmi-audio.h
+++ b/include/sound/omap-hdmi-audio.h
@@ -39,7 +39,7 @@ struct omap_hdmi_audio_ops {
/* HDMI audio initalization data */
struct omap_hdmi_audio_pdata {
struct device *dev;
- enum omapdss_version dss_version;
+ unsigned int version;
phys_addr_t audio_dma_addr;
const struct omap_hdmi_audio_ops *ops;
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 9c94b97c17f8..c4a8b1947566 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -795,10 +795,6 @@ struct snd_soc_component_driver {
int (*suspend)(struct snd_soc_component *);
int (*resume)(struct snd_soc_component *);
- /* pcm creation and destruction */
- int (*pcm_new)(struct snd_soc_pcm_runtime *);
- void (*pcm_free)(struct snd_pcm *);
-
/* DT */
int (*of_xlate_dai_name)(struct snd_soc_component *component,
struct of_phandle_args *args,
@@ -874,8 +870,6 @@ struct snd_soc_component {
void (*remove)(struct snd_soc_component *);
int (*suspend)(struct snd_soc_component *);
int (*resume)(struct snd_soc_component *);
- int (*pcm_new)(struct snd_soc_pcm_runtime *);
- void (*pcm_free)(struct snd_pcm *);
/* machine specific init */
int (*init)(struct snd_soc_component *component);
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index 0ca1fb08805b..fb87d32f5e51 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -786,6 +786,7 @@ struct iscsi_np {
int np_sock_type;
enum np_thread_state_table np_thread_state;
bool enabled;
+ atomic_t np_reset_count;
enum iscsi_timer_flags_table np_login_timer_flags;
u32 np_exports;
enum np_flags_table np_flags;
diff --git a/include/trace/events/bridge.h b/include/trace/events/bridge.h
new file mode 100644
index 000000000000..1bee3e7fdf32
--- /dev/null
+++ b/include/trace/events/bridge.h
@@ -0,0 +1,129 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM bridge
+
+#if !defined(_TRACE_BRIDGE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_BRIDGE_H
+
+#include <linux/netdevice.h>
+#include <linux/tracepoint.h>
+
+#include "../../../net/bridge/br_private.h"
+
+TRACE_EVENT(br_fdb_add,
+
+ TP_PROTO(struct ndmsg *ndm, struct net_device *dev,
+ const unsigned char *addr, u16 vid, u16 nlh_flags),
+
+ TP_ARGS(ndm, dev, addr, vid, nlh_flags),
+
+ TP_STRUCT__entry(
+ __field(u8, ndm_flags)
+ __string(dev, dev->name)
+ __array(unsigned char, addr, ETH_ALEN)
+ __field(u16, vid)
+ __field(u16, nlh_flags)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev, dev->name);
+ memcpy(__entry->addr, addr, ETH_ALEN);
+ __entry->vid = vid;
+ __entry->nlh_flags = nlh_flags;
+ __entry->ndm_flags = ndm->ndm_flags;
+ ),
+
+ TP_printk("dev %s addr %02x:%02x:%02x:%02x:%02x:%02x vid %u nlh_flags %04x ndm_flags %02x",
+ __get_str(dev), __entry->addr[0], __entry->addr[1],
+ __entry->addr[2], __entry->addr[3], __entry->addr[4],
+ __entry->addr[5], __entry->vid,
+ __entry->nlh_flags, __entry->ndm_flags)
+);
+
+TRACE_EVENT(br_fdb_external_learn_add,
+
+ TP_PROTO(struct net_bridge *br, struct net_bridge_port *p,
+ const unsigned char *addr, u16 vid),
+
+ TP_ARGS(br, p, addr, vid),
+
+ TP_STRUCT__entry(
+ __string(br_dev, br->dev->name)
+ __string(dev, p ? p->dev->name : "null")
+ __array(unsigned char, addr, ETH_ALEN)
+ __field(u16, vid)
+ ),
+
+ TP_fast_assign(
+ __assign_str(br_dev, br->dev->name);
+ __assign_str(dev, p ? p->dev->name : "null");
+ memcpy(__entry->addr, addr, ETH_ALEN);
+ __entry->vid = vid;
+ ),
+
+ TP_printk("br_dev %s port %s addr %02x:%02x:%02x:%02x:%02x:%02x vid %u",
+ __get_str(br_dev), __get_str(dev), __entry->addr[0],
+ __entry->addr[1], __entry->addr[2], __entry->addr[3],
+ __entry->addr[4], __entry->addr[5], __entry->vid)
+);
+
+TRACE_EVENT(fdb_delete,
+
+ TP_PROTO(struct net_bridge *br, struct net_bridge_fdb_entry *f),
+
+ TP_ARGS(br, f),
+
+ TP_STRUCT__entry(
+ __string(br_dev, br->dev->name)
+ __string(dev, f->dst ? f->dst->dev->name : "null")
+ __array(unsigned char, addr, ETH_ALEN)
+ __field(u16, vid)
+ ),
+
+ TP_fast_assign(
+ __assign_str(br_dev, br->dev->name);
+ __assign_str(dev, f->dst ? f->dst->dev->name : "null");
+ memcpy(__entry->addr, f->addr.addr, ETH_ALEN);
+ __entry->vid = f->vlan_id;
+ ),
+
+ TP_printk("br_dev %s dev %s addr %02x:%02x:%02x:%02x:%02x:%02x vid %u",
+ __get_str(br_dev), __get_str(dev), __entry->addr[0],
+ __entry->addr[1], __entry->addr[2], __entry->addr[3],
+ __entry->addr[4], __entry->addr[5], __entry->vid)
+);
+
+TRACE_EVENT(br_fdb_update,
+
+ TP_PROTO(struct net_bridge *br, struct net_bridge_port *source,
+ const unsigned char *addr, u16 vid, bool added_by_user),
+
+ TP_ARGS(br, source, addr, vid, added_by_user),
+
+ TP_STRUCT__entry(
+ __string(br_dev, br->dev->name)
+ __string(dev, source->dev->name)
+ __array(unsigned char, addr, ETH_ALEN)
+ __field(u16, vid)
+ __field(bool, added_by_user)
+ ),
+
+ TP_fast_assign(
+ __assign_str(br_dev, br->dev->name);
+ __assign_str(dev, source->dev->name);
+ memcpy(__entry->addr, addr, ETH_ALEN);
+ __entry->vid = vid;
+ __entry->added_by_user = added_by_user;
+ ),
+
+ TP_printk("br_dev %s source %s addr %02x:%02x:%02x:%02x:%02x:%02x vid %u added_by_user %d",
+ __get_str(br_dev), __get_str(dev), __entry->addr[0],
+ __entry->addr[1], __entry->addr[2], __entry->addr[3],
+ __entry->addr[4], __entry->addr[5], __entry->vid,
+ __entry->added_by_user)
+);
+
+
+#endif /* _TRACE_BRIDGE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index dfae175ddebc..9c3bc3883d2f 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -937,21 +937,19 @@ TRACE_EVENT(ext4_alloc_da_blocks,
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
- __field( unsigned int, data_blocks )
- __field( unsigned int, meta_blocks )
+ __field( unsigned int, data_blocks )
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->data_blocks = EXT4_I(inode)->i_reserved_data_blocks;
- __entry->meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;
),
- TP_printk("dev %d,%d ino %lu data_blocks %u meta_blocks %u",
+ TP_printk("dev %d,%d ino %lu reserved_data_blocks %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
- __entry->data_blocks, __entry->meta_blocks)
+ __entry->data_blocks)
);
TRACE_EVENT(ext4_mballoc_alloc,
@@ -1153,8 +1151,6 @@ TRACE_EVENT(ext4_da_update_reserve_space,
__field( __u64, i_blocks )
__field( int, used_blocks )
__field( int, reserved_data_blocks )
- __field( int, reserved_meta_blocks )
- __field( int, allocated_meta_blocks )
__field( int, quota_claim )
__field( __u16, mode )
),
@@ -1166,22 +1162,16 @@ TRACE_EVENT(ext4_da_update_reserve_space,
__entry->used_blocks = used_blocks;
__entry->reserved_data_blocks =
EXT4_I(inode)->i_reserved_data_blocks;
- __entry->reserved_meta_blocks =
- EXT4_I(inode)->i_reserved_meta_blocks;
- __entry->allocated_meta_blocks =
- EXT4_I(inode)->i_allocated_meta_blocks;
__entry->quota_claim = quota_claim;
__entry->mode = inode->i_mode;
),
TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu used_blocks %d "
- "reserved_data_blocks %d reserved_meta_blocks %d "
- "allocated_meta_blocks %d quota_claim %d",
+ "reserved_data_blocks %d quota_claim %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
__entry->mode, __entry->i_blocks,
__entry->used_blocks, __entry->reserved_data_blocks,
- __entry->reserved_meta_blocks, __entry->allocated_meta_blocks,
__entry->quota_claim)
);
@@ -1195,7 +1185,6 @@ TRACE_EVENT(ext4_da_reserve_space,
__field( ino_t, ino )
__field( __u64, i_blocks )
__field( int, reserved_data_blocks )
- __field( int, reserved_meta_blocks )
__field( __u16, mode )
),
@@ -1204,17 +1193,15 @@ TRACE_EVENT(ext4_da_reserve_space,
__entry->ino = inode->i_ino;
__entry->i_blocks = inode->i_blocks;
__entry->reserved_data_blocks = EXT4_I(inode)->i_reserved_data_blocks;
- __entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;
__entry->mode = inode->i_mode;
),
TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu "
- "reserved_data_blocks %d reserved_meta_blocks %d",
+ "reserved_data_blocks %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
__entry->mode, __entry->i_blocks,
- __entry->reserved_data_blocks,
- __entry->reserved_meta_blocks)
+ __entry->reserved_data_blocks)
);
TRACE_EVENT(ext4_da_release_space,
@@ -1228,8 +1215,6 @@ TRACE_EVENT(ext4_da_release_space,
__field( __u64, i_blocks )
__field( int, freed_blocks )
__field( int, reserved_data_blocks )
- __field( int, reserved_meta_blocks )
- __field( int, allocated_meta_blocks )
__field( __u16, mode )
),
@@ -1239,19 +1224,15 @@ TRACE_EVENT(ext4_da_release_space,
__entry->i_blocks = inode->i_blocks;
__entry->freed_blocks = freed_blocks;
__entry->reserved_data_blocks = EXT4_I(inode)->i_reserved_data_blocks;
- __entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;
- __entry->allocated_meta_blocks = EXT4_I(inode)->i_allocated_meta_blocks;
__entry->mode = inode->i_mode;
),
TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu freed_blocks %d "
- "reserved_data_blocks %d reserved_meta_blocks %d "
- "allocated_meta_blocks %d",
+ "reserved_data_blocks %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
__entry->mode, __entry->i_blocks,
- __entry->freed_blocks, __entry->reserved_data_blocks,
- __entry->reserved_meta_blocks, __entry->allocated_meta_blocks)
+ __entry->freed_blocks, __entry->reserved_data_blocks)
);
DECLARE_EVENT_CLASS(ext4__bitmap_load,
diff --git a/include/trace/events/fs_dax.h b/include/trace/events/fs_dax.h
index 08bb3ed18dcc..fbc4a06f7310 100644
--- a/include/trace/events/fs_dax.h
+++ b/include/trace/events/fs_dax.h
@@ -190,8 +190,6 @@ DEFINE_EVENT(dax_pte_fault_class, name, \
DEFINE_PTE_FAULT_EVENT(dax_pte_fault);
DEFINE_PTE_FAULT_EVENT(dax_pte_fault_done);
-DEFINE_PTE_FAULT_EVENT(dax_pfn_mkwrite_no_entry);
-DEFINE_PTE_FAULT_EVENT(dax_pfn_mkwrite);
DEFINE_PTE_FAULT_EVENT(dax_load_hole);
TRACE_EVENT(dax_insert_mapping,
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index 8e50d01c645f..4c2e4737d7bc 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -125,12 +125,6 @@ IF_HAVE_PG_IDLE(PG_idle, "idle" )
#define __VM_ARCH_SPECIFIC_1 {VM_ARCH_1, "arch_1" }
#endif
-#if defined(CONFIG_X86)
-#define __VM_ARCH_SPECIFIC_2 {VM_MPX, "mpx" }
-#else
-#define __VM_ARCH_SPECIFIC_2 {VM_ARCH_2, "arch_2" }
-#endif
-
#ifdef CONFIG_MEM_SOFT_DIRTY
#define IF_HAVE_VM_SOFTDIRTY(flag,name) {flag, name },
#else
@@ -162,7 +156,7 @@ IF_HAVE_PG_IDLE(PG_idle, "idle" )
{VM_NORESERVE, "noreserve" }, \
{VM_HUGETLB, "hugetlb" }, \
__VM_ARCH_SPECIFIC_1 , \
- __VM_ARCH_SPECIFIC_2 , \
+ {VM_WIPEONFORK, "wipeonfork" }, \
{VM_DONTDUMP, "dontdump" }, \
IF_HAVE_VM_SOFTDIRTY(VM_SOFTDIRTY, "softdirty" ) \
{VM_MIXEDMAP, "mixedmap" }, \
diff --git a/include/trace/events/qdisc.h b/include/trace/events/qdisc.h
new file mode 100644
index 000000000000..60d0d8bd336d
--- /dev/null
+++ b/include/trace/events/qdisc.h
@@ -0,0 +1,50 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM qdisc
+
+#if !defined(_TRACE_QDISC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_QDISC_H_
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/tracepoint.h>
+#include <linux/ftrace.h>
+
+TRACE_EVENT(qdisc_dequeue,
+
+ TP_PROTO(struct Qdisc *qdisc, const struct netdev_queue *txq,
+ int packets, struct sk_buff *skb),
+
+ TP_ARGS(qdisc, txq, packets, skb),
+
+ TP_STRUCT__entry(
+ __field( struct Qdisc *, qdisc )
+ __field(const struct netdev_queue *, txq )
+ __field( int, packets )
+ __field( void *, skbaddr )
+ __field( int, ifindex )
+ __field( u32, handle )
+ __field( u32, parent )
+ __field( unsigned long, txq_state)
+ ),
+
+ /* skb==NULL indicate packets dequeued was 0, even when packets==1 */
+ TP_fast_assign(
+ __entry->qdisc = qdisc;
+ __entry->txq = txq;
+ __entry->packets = skb ? packets : 0;
+ __entry->skbaddr = skb;
+ __entry->ifindex = txq->dev ? txq->dev->ifindex : 0;
+ __entry->handle = qdisc->handle;
+ __entry->parent = qdisc->parent;
+ __entry->txq_state = txq->state;
+ ),
+
+ TP_printk("dequeue ifindex=%d qdisc handle=0x%X parent=0x%X txq_state=0x%lX packets=%d skbaddr=%p",
+ __entry->ifindex, __entry->handle, __entry->parent,
+ __entry->txq_state, __entry->packets, __entry->skbaddr )
+);
+
+#endif /* _TRACE_QDISC_H_ */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index 91dc089d65b7..e91ae1f2290d 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -703,6 +703,7 @@ TRACE_EVENT(rcu_batch_end,
* at the beginning and end of the read, respectively. Note that the
* callback address can be NULL.
*/
+#define RCUTORTURENAME_LEN 8
TRACE_EVENT(rcu_torture_read,
TP_PROTO(const char *rcutorturename, struct rcu_head *rhp,
@@ -711,7 +712,7 @@ TRACE_EVENT(rcu_torture_read,
TP_ARGS(rcutorturename, rhp, secs, c_old, c),
TP_STRUCT__entry(
- __field(const char *, rcutorturename)
+ __field(char, rcutorturename[RCUTORTURENAME_LEN])
__field(struct rcu_head *, rhp)
__field(unsigned long, secs)
__field(unsigned long, c_old)
@@ -719,7 +720,9 @@ TRACE_EVENT(rcu_torture_read,
),
TP_fast_assign(
- __entry->rcutorturename = rcutorturename;
+ strncpy(__entry->rcutorturename, rcutorturename,
+ RCUTORTURENAME_LEN);
+ __entry->rcutorturename[RCUTORTURENAME_LEN - 1] = 0;
__entry->rhp = rhp;
__entry->secs = secs;
__entry->c_old = c_old;
diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h
index 1b61357d3f57..862575ac8da9 100644
--- a/include/trace/events/xdp.h
+++ b/include/trace/events/xdp.h
@@ -12,7 +12,8 @@
FN(ABORTED) \
FN(DROP) \
FN(PASS) \
- FN(TX)
+ FN(TX) \
+ FN(REDIRECT)
#define __XDP_ACT_TP_FN(x) \
TRACE_DEFINE_ENUM(XDP_##x);
@@ -30,24 +31,119 @@ TRACE_EVENT(xdp_exception,
TP_ARGS(dev, xdp, act),
TP_STRUCT__entry(
- __string(name, dev->name)
- __array(u8, prog_tag, 8)
+ __field(int, prog_id)
__field(u32, act)
+ __field(int, ifindex)
),
TP_fast_assign(
- BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(xdp->tag));
- memcpy(__entry->prog_tag, xdp->tag, sizeof(xdp->tag));
- __assign_str(name, dev->name);
- __entry->act = act;
+ __entry->prog_id = xdp->aux->id;
+ __entry->act = act;
+ __entry->ifindex = dev->ifindex;
),
- TP_printk("prog=%s device=%s action=%s",
- __print_hex_str(__entry->prog_tag, 8),
- __get_str(name),
- __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB))
+ TP_printk("prog_id=%d action=%s ifindex=%d",
+ __entry->prog_id,
+ __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
+ __entry->ifindex)
);
+DECLARE_EVENT_CLASS(xdp_redirect_template,
+
+ TP_PROTO(const struct net_device *dev,
+ const struct bpf_prog *xdp,
+ int to_ifindex, int err,
+ const struct bpf_map *map, u32 map_index),
+
+ TP_ARGS(dev, xdp, to_ifindex, err, map, map_index),
+
+ TP_STRUCT__entry(
+ __field(int, prog_id)
+ __field(u32, act)
+ __field(int, ifindex)
+ __field(int, err)
+ __field(int, to_ifindex)
+ __field(u32, map_id)
+ __field(int, map_index)
+ ),
+
+ TP_fast_assign(
+ __entry->prog_id = xdp->aux->id;
+ __entry->act = XDP_REDIRECT;
+ __entry->ifindex = dev->ifindex;
+ __entry->err = err;
+ __entry->to_ifindex = to_ifindex;
+ __entry->map_id = map ? map->id : 0;
+ __entry->map_index = map_index;
+ ),
+
+ TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d",
+ __entry->prog_id,
+ __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
+ __entry->ifindex, __entry->to_ifindex,
+ __entry->err)
+);
+
+DEFINE_EVENT(xdp_redirect_template, xdp_redirect,
+ TP_PROTO(const struct net_device *dev,
+ const struct bpf_prog *xdp,
+ int to_ifindex, int err,
+ const struct bpf_map *map, u32 map_index),
+ TP_ARGS(dev, xdp, to_ifindex, err, map, map_index)
+);
+
+DEFINE_EVENT(xdp_redirect_template, xdp_redirect_err,
+ TP_PROTO(const struct net_device *dev,
+ const struct bpf_prog *xdp,
+ int to_ifindex, int err,
+ const struct bpf_map *map, u32 map_index),
+ TP_ARGS(dev, xdp, to_ifindex, err, map, map_index)
+);
+
+#define _trace_xdp_redirect(dev, xdp, to) \
+ trace_xdp_redirect(dev, xdp, to, 0, NULL, 0);
+
+#define _trace_xdp_redirect_err(dev, xdp, to, err) \
+ trace_xdp_redirect_err(dev, xdp, to, err, NULL, 0);
+
+DEFINE_EVENT_PRINT(xdp_redirect_template, xdp_redirect_map,
+ TP_PROTO(const struct net_device *dev,
+ const struct bpf_prog *xdp,
+ int to_ifindex, int err,
+ const struct bpf_map *map, u32 map_index),
+ TP_ARGS(dev, xdp, to_ifindex, err, map, map_index),
+ TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d"
+ " map_id=%d map_index=%d",
+ __entry->prog_id,
+ __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
+ __entry->ifindex, __entry->to_ifindex,
+ __entry->err,
+ __entry->map_id, __entry->map_index)
+);
+
+DEFINE_EVENT_PRINT(xdp_redirect_template, xdp_redirect_map_err,
+ TP_PROTO(const struct net_device *dev,
+ const struct bpf_prog *xdp,
+ int to_ifindex, int err,
+ const struct bpf_map *map, u32 map_index),
+ TP_ARGS(dev, xdp, to_ifindex, err, map, map_index),
+ TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d"
+ " map_id=%d map_index=%d",
+ __entry->prog_id,
+ __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
+ __entry->ifindex, __entry->to_ifindex,
+ __entry->err,
+ __entry->map_id, __entry->map_index)
+);
+
+#define _trace_xdp_redirect_map(dev, xdp, fwd, map, idx) \
+ trace_xdp_redirect_map(dev, xdp, fwd ? fwd->ifindex : 0, \
+ 0, map, idx);
+
+#define _trace_xdp_redirect_map_err(dev, xdp, fwd, map, idx, err) \
+ trace_xdp_redirect_map_err(dev, xdp, fwd ? fwd->ifindex : 0, \
+ err, map, idx);
+
#endif /* _TRACE_XDP_H */
#include <trace/define_trace.h>
diff --git a/include/uapi/asm-generic/hugetlb_encode.h b/include/uapi/asm-generic/hugetlb_encode.h
new file mode 100644
index 000000000000..e4732d3c2998
--- /dev/null
+++ b/include/uapi/asm-generic/hugetlb_encode.h
@@ -0,0 +1,34 @@
+#ifndef _ASM_GENERIC_HUGETLB_ENCODE_H_
+#define _ASM_GENERIC_HUGETLB_ENCODE_H_
+
+/*
+ * Several system calls take a flag to request "hugetlb" huge pages.
+ * Without further specification, these system calls will use the
+ * system's default huge page size. If a system supports multiple
+ * huge page sizes, the desired huge page size can be specified in
+ * bits [26:31] of the flag arguments. The value in these 6 bits
+ * will encode the log2 of the huge page size.
+ *
+ * The following definitions are associated with this huge page size
+ * encoding in flag arguments. System call specific header files
+ * that use this encoding should include this file. They can then
+ * provide definitions based on these with their own specific prefix.
+ * for example:
+ * #define MAP_HUGE_SHIFT HUGETLB_FLAG_ENCODE_SHIFT
+ */
+
+#define HUGETLB_FLAG_ENCODE_SHIFT 26
+#define HUGETLB_FLAG_ENCODE_MASK 0x3f
+
+#define HUGETLB_FLAG_ENCODE_64KB (16 << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_512KB (19 << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_1MB (20 << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_2MB (21 << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_8MB (23 << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_16MB (24 << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_256MB (28 << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_1GB (30 << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_2GB (31 << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_16GB (34 << HUGETLB_FLAG_ENCODE_SHIFT)
+
+#endif /* _ASM_GENERIC_HUGETLB_ENCODE_H_ */
diff --git a/include/uapi/asm-generic/ioctls.h b/include/uapi/asm-generic/ioctls.h
index 06d5f7ddf84e..14baf9f23a14 100644
--- a/include/uapi/asm-generic/ioctls.h
+++ b/include/uapi/asm-generic/ioctls.h
@@ -77,7 +77,7 @@
#define TIOCGPKT _IOR('T', 0x38, int) /* Get packet mode state */
#define TIOCGPTLCK _IOR('T', 0x39, int) /* Get Pty lock state */
#define TIOCGEXCL _IOR('T', 0x40, int) /* Get exclusive mode state */
-#define TIOCGPTPEER _IOR('T', 0x41, int) /* Safely open the slave */
+#define TIOCGPTPEER _IO('T', 0x41) /* Safely open the slave */
#define FIONCLEX 0x5450
#define FIOCLEX 0x5451
diff --git a/include/uapi/asm-generic/mman-common.h b/include/uapi/asm-generic/mman-common.h
index 8c27db0c5c08..203268f9231e 100644
--- a/include/uapi/asm-generic/mman-common.h
+++ b/include/uapi/asm-generic/mman-common.h
@@ -58,20 +58,12 @@
overrides the coredump filter bits */
#define MADV_DODUMP 17 /* Clear the MADV_DONTDUMP flag */
+#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */
+#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */
+
/* compatibility flags */
#define MAP_FILE 0
-/*
- * When MAP_HUGETLB is set bits [26:31] encode the log2 of the huge page size.
- * This gives us 6 bits, which is enough until someone invents 128 bit address
- * spaces.
- *
- * Assume these are all power of twos.
- * When 0 use the default page size.
- */
-#define MAP_HUGE_SHIFT 26
-#define MAP_HUGE_MASK 0x3f
-
#define PKEY_DISABLE_ACCESS 0x1
#define PKEY_DISABLE_WRITE 0x2
#define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS |\
diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h
index 9861be8da65e..e47c9e436221 100644
--- a/include/uapi/asm-generic/socket.h
+++ b/include/uapi/asm-generic/socket.h
@@ -104,4 +104,6 @@
#define SO_PEERGROUPS 59
+#define SO_ZEROCOPY 60
+
#endif /* __ASM_GENERIC_SOCKET_H */
diff --git a/include/uapi/drm/armada_drm.h b/include/uapi/drm/armada_drm.h
index 72e326f9c7de..0cb932416cfe 100644
--- a/include/uapi/drm/armada_drm.h
+++ b/include/uapi/drm/armada_drm.h
@@ -23,27 +23,27 @@ extern "C" {
DRM_##dir(DRM_COMMAND_BASE + DRM_ARMADA_##name, struct drm_armada_##str)
struct drm_armada_gem_create {
- uint32_t handle;
- uint32_t size;
+ __u32 handle;
+ __u32 size;
};
#define DRM_IOCTL_ARMADA_GEM_CREATE \
ARMADA_IOCTL(IOWR, GEM_CREATE, gem_create)
struct drm_armada_gem_mmap {
- uint32_t handle;
- uint32_t pad;
- uint64_t offset;
- uint64_t size;
- uint64_t addr;
+ __u32 handle;
+ __u32 pad;
+ __u64 offset;
+ __u64 size;
+ __u64 addr;
};
#define DRM_IOCTL_ARMADA_GEM_MMAP \
ARMADA_IOCTL(IOWR, GEM_MMAP, gem_mmap)
struct drm_armada_gem_pwrite {
- uint64_t ptr;
- uint32_t handle;
- uint32_t offset;
- uint32_t size;
+ __u64 ptr;
+ __u32 handle;
+ __u32 offset;
+ __u32 size;
};
#define DRM_IOCTL_ARMADA_GEM_PWRITE \
ARMADA_IOCTL(IOW, GEM_PWRITE, gem_pwrite)
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 101593ab10ac..97677cd6964d 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -700,6 +700,7 @@ struct drm_prime_handle {
struct drm_syncobj_create {
__u32 handle;
+#define DRM_SYNCOBJ_CREATE_SIGNALED (1 << 0)
__u32 flags;
};
@@ -718,6 +719,24 @@ struct drm_syncobj_handle {
__u32 pad;
};
+#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0)
+#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1)
+struct drm_syncobj_wait {
+ __u64 handles;
+ /* absolute timeout */
+ __s64 timeout_nsec;
+ __u32 count_handles;
+ __u32 flags;
+ __u32 first_signaled; /* only valid when not waiting all */
+ __u32 pad;
+};
+
+struct drm_syncobj_array {
+ __u64 handles;
+ __u32 count_handles;
+ __u32 pad;
+};
+
#if defined(__cplusplus)
}
#endif
@@ -840,6 +859,9 @@ extern "C" {
#define DRM_IOCTL_SYNCOBJ_DESTROY DRM_IOWR(0xC0, struct drm_syncobj_destroy)
#define DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD DRM_IOWR(0xC1, struct drm_syncobj_handle)
#define DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE DRM_IOWR(0xC2, struct drm_syncobj_handle)
+#define DRM_IOCTL_SYNCOBJ_WAIT DRM_IOWR(0xC3, struct drm_syncobj_wait)
+#define DRM_IOCTL_SYNCOBJ_RESET DRM_IOWR(0xC4, struct drm_syncobj_array)
+#define DRM_IOCTL_SYNCOBJ_SIGNAL DRM_IOWR(0xC5, struct drm_syncobj_array)
/**
* Device specific ioctls should only be in their respective headers
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 7586c46f68bf..3ad838d3f93f 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -185,6 +185,8 @@ extern "C" {
#define DRM_FORMAT_MOD_VENDOR_BROADCOM 0x07
/* add more to the end as needed */
+#define DRM_FORMAT_RESERVED ((1ULL << 56) - 1)
+
#define fourcc_mod_code(vendor, val) \
((((__u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | (val & 0x00ffffffffffffffULL))
@@ -197,6 +199,15 @@ extern "C" {
*/
/*
+ * Invalid Modifier
+ *
+ * This modifier can be used as a sentinel to terminate the format modifiers
+ * list, or to initialize a variable with an invalid modifier. It might also be
+ * used to report an error back to userspace for certain APIs.
+ */
+#define DRM_FORMAT_MOD_INVALID fourcc_mod_code(NONE, DRM_FORMAT_RESERVED)
+
+/*
* Linear Layout
*
* Just plain linear layout. Note that this is different from no specifying any
@@ -253,6 +264,26 @@ extern "C" {
#define I915_FORMAT_MOD_Yf_TILED fourcc_mod_code(INTEL, 3)
/*
+ * Intel color control surface (CCS) for render compression
+ *
+ * The framebuffer format must be one of the 8:8:8:8 RGB formats.
+ * The main surface will be plane index 0 and must be Y/Yf-tiled,
+ * the CCS will be plane index 1.
+ *
+ * Each CCS tile matches a 1024x512 pixel area of the main surface.
+ * To match certain aspects of the 3D hardware the CCS is
+ * considered to be made up of normal 128Bx32 Y tiles, Thus
+ * the CCS pitch must be specified in multiples of 128 bytes.
+ *
+ * In reality the CCS tile appears to be a 64Bx64 Y tile, composed
+ * of QWORD (8 bytes) chunks instead of OWORD (16 bytes) chunks.
+ * But that fact is not relevant unless the memory is accessed
+ * directly.
+ */
+#define I915_FORMAT_MOD_Y_TILED_CCS fourcc_mod_code(INTEL, 4)
+#define I915_FORMAT_MOD_Yf_TILED_CCS fourcc_mod_code(INTEL, 5)
+
+/*
* Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks
*
* Macroblocks are laid in a Z-shape, and each pixel data is following the
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 403339f98a92..54fc38c3c3f1 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -712,6 +712,56 @@ struct drm_mode_atomic {
__u64 user_data;
};
+struct drm_format_modifier_blob {
+#define FORMAT_BLOB_CURRENT 1
+ /* Version of this blob format */
+ __u32 version;
+
+ /* Flags */
+ __u32 flags;
+
+ /* Number of fourcc formats supported */
+ __u32 count_formats;
+
+ /* Where in this blob the formats exist (in bytes) */
+ __u32 formats_offset;
+
+ /* Number of drm_format_modifiers */
+ __u32 count_modifiers;
+
+ /* Where in this blob the modifiers exist (in bytes) */
+ __u32 modifiers_offset;
+
+ /* __u32 formats[] */
+ /* struct drm_format_modifier modifiers[] */
+};
+
+struct drm_format_modifier {
+ /* Bitmask of formats in get_plane format list this info applies to. The
+ * offset allows a sliding window of which 64 formats (bits).
+ *
+ * Some examples:
+ * In today's world with < 65 formats, and formats 0, and 2 are
+ * supported
+ * 0x0000000000000005
+ * ^-offset = 0, formats = 5
+ *
+ * If the number formats grew to 128, and formats 98-102 are
+ * supported with the modifier:
+ *
+ * 0x0000003c00000000 0000000000000000
+ * ^
+ * |__offset = 64, formats = 0x3c00000000
+ *
+ */
+ __u64 formats;
+ __u32 offset;
+ __u32 pad;
+
+ /* The modifier that applies to the >get_plane format list bitmask. */
+ __u64 modifier;
+};
+
/**
* Create a new 'blob' data property, copying length bytes from data pointer,
* and returning new blob ID.
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 7ccbd6a2bbe0..6598fb76d2c2 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -260,6 +260,8 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_GEM_CONTEXT_GETPARAM 0x34
#define DRM_I915_GEM_CONTEXT_SETPARAM 0x35
#define DRM_I915_PERF_OPEN 0x36
+#define DRM_I915_PERF_ADD_CONFIG 0x37
+#define DRM_I915_PERF_REMOVE_CONFIG 0x38
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -315,6 +317,8 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param)
#define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param)
#define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param)
+#define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)
+#define DRM_IOCTL_I915_PERF_REMOVE_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
@@ -431,6 +435,11 @@ typedef struct drm_i915_irq_wait {
*/
#define I915_PARAM_HAS_EXEC_BATCH_FIRST 48
+/* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
+ * drm_i915_gem_exec_fence structures. See I915_EXEC_FENCE_ARRAY.
+ */
+#define I915_PARAM_HAS_EXEC_FENCE_ARRAY 49
+
typedef struct drm_i915_getparam {
__s32 param;
/*
@@ -812,6 +821,17 @@ struct drm_i915_gem_exec_object2 {
__u64 rsvd2;
};
+struct drm_i915_gem_exec_fence {
+ /**
+ * User's handle for a drm_syncobj to wait on or signal.
+ */
+ __u32 handle;
+
+#define I915_EXEC_FENCE_WAIT (1<<0)
+#define I915_EXEC_FENCE_SIGNAL (1<<1)
+ __u32 flags;
+};
+
struct drm_i915_gem_execbuffer2 {
/**
* List of gem_exec_object2 structs
@@ -826,7 +846,11 @@ struct drm_i915_gem_execbuffer2 {
__u32 DR1;
__u32 DR4;
__u32 num_cliprects;
- /** This is a struct drm_clip_rect *cliprects */
+ /**
+ * This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY
+ * is not set. If I915_EXEC_FENCE_ARRAY is set, then this is a
+ * struct drm_i915_gem_exec_fence *fences.
+ */
__u64 cliprects_ptr;
#define I915_EXEC_RING_MASK (7<<0)
#define I915_EXEC_DEFAULT (0<<0)
@@ -927,7 +951,14 @@ struct drm_i915_gem_execbuffer2 {
* element).
*/
#define I915_EXEC_BATCH_FIRST (1<<18)
-#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_BATCH_FIRST<<1))
+
+/* Setting I915_FENCE_ARRAY implies that num_cliprects and cliprects_ptr
+ * define an array of i915_gem_exec_fence structures which specify a set of
+ * dma fences to wait upon or signal.
+ */
+#define I915_EXEC_FENCE_ARRAY (1<<19)
+
+#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_ARRAY<<1))
#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
#define i915_execbuffer2_set_context_id(eb2, context) \
@@ -1467,6 +1498,22 @@ enum drm_i915_perf_record_type {
DRM_I915_PERF_RECORD_MAX /* non-ABI */
};
+/**
+ * Structure to upload perf dynamic configuration into the kernel.
+ */
+struct drm_i915_perf_oa_config {
+ /** String formatted like "%08x-%04x-%04x-%04x-%012x" */
+ char uuid[36];
+
+ __u32 n_mux_regs;
+ __u32 n_boolean_regs;
+ __u32 n_flex_regs;
+
+ __u64 __user mux_regs_ptr;
+ __u64 __user boolean_regs_ptr;
+ __u64 __user flex_regs_ptr;
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 26c54f6d595d..ad4eb2863e70 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -171,7 +171,7 @@ struct drm_msm_gem_submit_cmd {
__u32 size; /* in, cmdstream size */
__u32 pad;
__u32 nr_relocs; /* in, number of submit_reloc's */
- __u64 __user relocs; /* in, ptr to array of submit_reloc's */
+ __u64 relocs; /* in, ptr to array of submit_reloc's */
};
/* Each buffer referenced elsewhere in the cmdstream submit (ie. the
@@ -215,8 +215,8 @@ struct drm_msm_gem_submit {
__u32 fence; /* out */
__u32 nr_bos; /* in, number of submit_bo's */
__u32 nr_cmds; /* in, number of submit_cmd's */
- __u64 __user bos; /* in, ptr to array of submit_bo's */
- __u64 __user cmds; /* in, ptr to array of submit_cmd's */
+ __u64 bos; /* in, ptr to array of submit_bo's */
+ __u64 cmds; /* in, ptr to array of submit_cmd's */
__s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */
};
diff --git a/include/uapi/drm/qxl_drm.h b/include/uapi/drm/qxl_drm.h
index 7eef42213051..880999d2d863 100644
--- a/include/uapi/drm/qxl_drm.h
+++ b/include/uapi/drm/qxl_drm.h
@@ -80,8 +80,8 @@ struct drm_qxl_reloc {
};
struct drm_qxl_command {
- __u64 __user command; /* void* */
- __u64 __user relocs; /* struct drm_qxl_reloc* */
+ __u64 command; /* void* */
+ __u64 relocs; /* struct drm_qxl_reloc* */
__u32 type;
__u32 command_size;
__u32 relocs_num;
@@ -91,7 +91,7 @@ struct drm_qxl_command {
struct drm_qxl_execbuffer {
__u32 flags; /* for future use */
__u32 commands_num;
- __u64 __user commands; /* struct drm_qxl_command* */
+ __u64 commands; /* struct drm_qxl_command* */
};
struct drm_qxl_update_area {
diff --git a/include/uapi/drm/vc4_drm.h b/include/uapi/drm/vc4_drm.h
index 6ac4c5c014cb..afae87004963 100644
--- a/include/uapi/drm/vc4_drm.h
+++ b/include/uapi/drm/vc4_drm.h
@@ -40,6 +40,7 @@ extern "C" {
#define DRM_VC4_GET_PARAM 0x07
#define DRM_VC4_SET_TILING 0x08
#define DRM_VC4_GET_TILING 0x09
+#define DRM_VC4_LABEL_BO 0x0a
#define DRM_IOCTL_VC4_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl)
#define DRM_IOCTL_VC4_WAIT_SEQNO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno)
@@ -51,6 +52,7 @@ extern "C" {
#define DRM_IOCTL_VC4_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_PARAM, struct drm_vc4_get_param)
#define DRM_IOCTL_VC4_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SET_TILING, struct drm_vc4_set_tiling)
#define DRM_IOCTL_VC4_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_TILING, struct drm_vc4_get_tiling)
+#define DRM_IOCTL_VC4_LABEL_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_LABEL_BO, struct drm_vc4_label_bo)
struct drm_vc4_submit_rcl_surface {
__u32 hindex; /* Handle index, or ~0 if not present. */
@@ -153,6 +155,16 @@ struct drm_vc4_submit_cl {
__u32 pad:24;
#define VC4_SUBMIT_CL_USE_CLEAR_COLOR (1 << 0)
+/* By default, the kernel gets to choose the order that the tiles are
+ * rendered in. If this is set, then the tiles will be rendered in a
+ * raster order, with the right-to-left vs left-to-right and
+ * top-to-bottom vs bottom-to-top dictated by
+ * VC4_SUBMIT_CL_RCL_ORDER_INCREASING_*. This allows overlapping
+ * blits to be implemented using the 3D engine.
+ */
+#define VC4_SUBMIT_CL_FIXED_RCL_ORDER (1 << 1)
+#define VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X (1 << 2)
+#define VC4_SUBMIT_CL_RCL_ORDER_INCREASING_Y (1 << 3)
__u32 flags;
/* Returned value of the seqno of this render job (for the
@@ -292,6 +304,7 @@ struct drm_vc4_get_hang_state {
#define DRM_VC4_PARAM_SUPPORTS_BRANCHES 3
#define DRM_VC4_PARAM_SUPPORTS_ETC1 4
#define DRM_VC4_PARAM_SUPPORTS_THREADED_FS 5
+#define DRM_VC4_PARAM_SUPPORTS_FIXED_RCL_ORDER 6
struct drm_vc4_get_param {
__u32 param;
@@ -311,6 +324,15 @@ struct drm_vc4_set_tiling {
__u64 modifier;
};
+/**
+ * struct drm_vc4_label_bo - Attach a name to a BO for debug purposes.
+ */
+struct drm_vc4_label_bo {
+ __u32 handle;
+ __u32 len;
+ __u64 name;
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
index d9dfde9aa757..0bc784f5e0db 100644
--- a/include/uapi/drm/vmwgfx_drm.h
+++ b/include/uapi/drm/vmwgfx_drm.h
@@ -297,13 +297,17 @@ union drm_vmw_surface_reference_arg {
* @version: Allows expanding the execbuf ioctl parameters without breaking
* backwards compatibility, since user-space will always tell the kernel
* which version it uses.
- * @flags: Execbuf flags. None currently.
+ * @flags: Execbuf flags.
+ * @imported_fence_fd: FD for a fence imported from another device
*
* Argument to the DRM_VMW_EXECBUF Ioctl.
*/
#define DRM_VMW_EXECBUF_VERSION 2
+#define DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD (1 << 0)
+#define DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD (1 << 1)
+
struct drm_vmw_execbuf_arg {
__u64 commands;
__u32 command_size;
@@ -312,7 +316,7 @@ struct drm_vmw_execbuf_arg {
__u32 version;
__u32 flags;
__u32 context_handle;
- __u32 pad64;
+ __s32 imported_fence_fd;
};
/**
@@ -328,6 +332,7 @@ struct drm_vmw_execbuf_arg {
* @passed_seqno: The highest seqno number processed by the hardware
* so far. This can be used to mark user-space fence objects as signaled, and
* to determine whether a fence seqno might be stale.
+ * @fd: FD associated with the fence, -1 if not exported
* @error: This member should've been set to -EFAULT on submission.
* The following actions should be take on completion:
* error == -EFAULT: Fence communication failed. The host is synchronized.
@@ -345,7 +350,7 @@ struct drm_vmw_fence_rep {
__u32 mask;
__u32 seqno;
__u32 passed_seqno;
- __u32 pad64;
+ __s32 fd;
__s32 error;
};
diff --git a/include/uapi/linux/aio_abi.h b/include/uapi/linux/aio_abi.h
index a2d4a8ac94ca..a04adbc70ddf 100644
--- a/include/uapi/linux/aio_abi.h
+++ b/include/uapi/linux/aio_abi.h
@@ -28,6 +28,7 @@
#define __LINUX__AIO_ABI_H
#include <linux/types.h>
+#include <linux/fs.h>
#include <asm/byteorder.h>
typedef __kernel_ulong_t aio_context_t;
@@ -62,14 +63,6 @@ struct io_event {
__s64 res2; /* secondary result */
};
-#if defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN)
-#define PADDED(x,y) x, y
-#elif defined(__BYTE_ORDER) ? __BYTE_ORDER == __BIG_ENDIAN : defined(__BIG_ENDIAN)
-#define PADDED(x,y) y, x
-#else
-#error edit for your odd byteorder.
-#endif
-
/*
* we always use a 64bit off_t when communicating
* with userland. its up to libraries to do the
@@ -79,8 +72,16 @@ struct io_event {
struct iocb {
/* these are internal to the kernel/libc. */
__u64 aio_data; /* data to be returned in event's data */
- __u32 PADDED(aio_key, aio_rw_flags);
- /* the kernel sets aio_key to the req # */
+
+#if defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN)
+ __u32 aio_key; /* the kernel sets aio_key to the req # */
+ __kernel_rwf_t aio_rw_flags; /* RWF_* flags */
+#elif defined(__BYTE_ORDER) ? __BYTE_ORDER == __BIG_ENDIAN : defined(__BIG_ENDIAN)
+ __kernel_rwf_t aio_rw_flags; /* RWF_* flags */
+ __u32 aio_key; /* the kernel sets aio_key to the req # */
+#else
+#error edit for your odd byteorder.
+#endif
/* common fields */
__u16 aio_lio_opcode; /* see IOCB_CMD_ above */
diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
index 51f891fb1b18..84a9a0944e13 100644
--- a/include/uapi/linux/android/binder.h
+++ b/include/uapi/linux/android/binder.h
@@ -132,6 +132,7 @@ enum {
/* struct binder_fd_array_object - object describing an array of fds in a buffer
* @hdr: common header structure
+ * @pad: padding to ensure correct alignment
* @num_fds: number of file descriptors in the buffer
* @parent: index in offset array to buffer holding the fd array
* @parent_offset: start offset of fd array in the buffer
@@ -152,6 +153,7 @@ enum {
*/
struct binder_fd_array_object {
struct binder_object_header hdr;
+ __u32 pad;
binder_size_t num_fds;
binder_size_t parent;
binder_size_t parent_offset;
@@ -184,6 +186,19 @@ struct binder_version {
#define BINDER_CURRENT_PROTOCOL_VERSION 8
#endif
+/*
+ * Use with BINDER_GET_NODE_DEBUG_INFO, driver reads ptr, writes to all fields.
+ * Set ptr to NULL for the first call to get the info for the first node, and
+ * then repeat the call passing the previously returned value to get the next
+ * nodes. ptr will be 0 when there are no more nodes.
+ */
+struct binder_node_debug_info {
+ binder_uintptr_t ptr;
+ binder_uintptr_t cookie;
+ __u32 has_strong_ref;
+ __u32 has_weak_ref;
+};
+
#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64)
#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32)
@@ -191,6 +206,7 @@ struct binder_version {
#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32)
#define BINDER_THREAD_EXIT _IOW('b', 8, __s32)
#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
+#define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info)
/*
* NOTE: Two special error codes you should check for when calling
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index e99e3e6f8b37..ba848b761cfb 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -30,9 +30,14 @@
#define BPF_FROM_LE BPF_TO_LE
#define BPF_FROM_BE BPF_TO_BE
+/* jmp encodings */
#define BPF_JNE 0x50 /* jump != */
+#define BPF_JLT 0xa0 /* LT is unsigned, '<' */
+#define BPF_JLE 0xb0 /* LE is unsigned, '<=' */
#define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */
#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */
+#define BPF_JSLT 0xc0 /* SLT is signed, '<' */
+#define BPF_JSLE 0xd0 /* SLE is signed, '<=' */
#define BPF_CALL 0x80 /* function call */
#define BPF_EXIT 0x90 /* function return */
@@ -104,6 +109,8 @@ enum bpf_map_type {
BPF_MAP_TYPE_LPM_TRIE,
BPF_MAP_TYPE_ARRAY_OF_MAPS,
BPF_MAP_TYPE_HASH_OF_MAPS,
+ BPF_MAP_TYPE_DEVMAP,
+ BPF_MAP_TYPE_SOCKMAP,
};
enum bpf_prog_type {
@@ -121,6 +128,7 @@ enum bpf_prog_type {
BPF_PROG_TYPE_LWT_OUT,
BPF_PROG_TYPE_LWT_XMIT,
BPF_PROG_TYPE_SOCK_OPS,
+ BPF_PROG_TYPE_SK_SKB,
};
enum bpf_attach_type {
@@ -128,6 +136,8 @@ enum bpf_attach_type {
BPF_CGROUP_INET_EGRESS,
BPF_CGROUP_INET_SOCK_CREATE,
BPF_CGROUP_SOCK_OPS,
+ BPF_SK_SKB_STREAM_PARSER,
+ BPF_SK_SKB_STREAM_VERDICT,
__MAX_BPF_ATTACH_TYPE
};
@@ -153,6 +163,7 @@ enum bpf_attach_type {
#define BPF_NOEXIST 1 /* create new element if it didn't exist */
#define BPF_EXIST 2 /* update existing element */
+/* flags for BPF_MAP_CREATE command */
#define BPF_F_NO_PREALLOC (1U << 0)
/* Instead of having one common LRU list in the
* BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list
@@ -161,6 +172,8 @@ enum bpf_attach_type {
* across different LRU lists.
*/
#define BPF_F_NO_COMMON_LRU (1U << 1)
+/* Specify numa node during map creation */
+#define BPF_F_NUMA_NODE (1U << 2)
union bpf_attr {
struct { /* anonymous struct used by BPF_MAP_CREATE command */
@@ -168,8 +181,13 @@ union bpf_attr {
__u32 key_size; /* size of key in bytes */
__u32 value_size; /* size of value in bytes */
__u32 max_entries; /* max number of entries in a map */
- __u32 map_flags; /* prealloc or not */
+ __u32 map_flags; /* BPF_MAP_CREATE related
+ * flags defined above.
+ */
__u32 inner_map_fd; /* fd pointing to the inner map */
+ __u32 numa_node; /* numa node (effective only if
+ * BPF_F_NUMA_NODE is set).
+ */
};
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
@@ -344,9 +362,20 @@ union bpf_attr {
* int bpf_redirect(ifindex, flags)
* redirect to another netdev
* @ifindex: ifindex of the net device
- * @flags: bit 0 - if set, redirect to ingress instead of egress
- * other bits - reserved
- * Return: TC_ACT_REDIRECT
+ * @flags:
+ * cls_bpf:
+ * bit 0 - if set, redirect to ingress instead of egress
+ * other bits - reserved
+ * xdp_bpf:
+ * all bits - reserved
+ * Return: cls_bpf: TC_ACT_REDIRECT on success or TC_ACT_SHOT on error
+ * xdp_bfp: XDP_REDIRECT on success or XDP_ABORT on error
+ * int bpf_redirect_map(map, key, flags)
+ * redirect to endpoint in map
+ * @map: pointer to dev map
+ * @key: index in map to lookup
+ * @flags: --
+ * Return: XDP_REDIRECT on success or XDP_ABORT on error
*
* u32 bpf_get_route_realm(skb)
* retrieve a dst's tclassid
@@ -539,6 +568,20 @@ union bpf_attr {
* @mode: operation mode (enum bpf_adj_room_mode)
* @flags: reserved for future use
* Return: 0 on success or negative error code
+ *
+ * int bpf_sk_redirect_map(map, key, flags)
+ * Redirect skb to a sock in map using key as a lookup key for the
+ * sock in map.
+ * @map: pointer to sockmap
+ * @key: key to lookup sock in map
+ * @flags: reserved for future use
+ * Return: SK_REDIRECT
+ *
+ * int bpf_sock_map_update(skops, map, key, flags)
+ * @skops: pointer to bpf_sock_ops
+ * @map: pointer to sockmap to update
+ * @key: key to insert/update sock in map
+ * @flags: same flags as map update elem
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -591,7 +634,10 @@ union bpf_attr {
FN(get_socket_uid), \
FN(set_hash), \
FN(setsockopt), \
- FN(skb_adjust_room),
+ FN(skb_adjust_room), \
+ FN(redirect_map), \
+ FN(sk_redirect_map), \
+ FN(sock_map_update), \
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
@@ -668,6 +714,15 @@ struct __sk_buff {
__u32 data;
__u32 data_end;
__u32 napi_id;
+
+ /* accessed by BPF_PROG_TYPE_sk_skb types */
+ __u32 family;
+ __u32 remote_ip4; /* Stored in network byte order */
+ __u32 local_ip4; /* Stored in network byte order */
+ __u32 remote_ip6[4]; /* Stored in network byte order */
+ __u32 local_ip6[4]; /* Stored in network byte order */
+ __u32 remote_port; /* Stored in network byte order */
+ __u32 local_port; /* stored in host byte order */
};
struct bpf_tunnel_key {
@@ -703,6 +758,8 @@ struct bpf_sock {
__u32 family;
__u32 type;
__u32 protocol;
+ __u32 mark;
+ __u32 priority;
};
#define XDP_PACKET_HEADROOM 256
@@ -717,6 +774,7 @@ enum xdp_action {
XDP_DROP,
XDP_PASS,
XDP_TX,
+ XDP_REDIRECT,
};
/* user accessible metadata for XDP packet hook
@@ -727,6 +785,12 @@ struct xdp_md {
__u32 data_end;
};
+enum sk_action {
+ SK_ABORTED = 0,
+ SK_DROP,
+ SK_REDIRECT,
+};
+
#define BPF_TAG_SIZE 8
struct bpf_prog_info {
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index b0e807ac53bb..0cbca96c66b9 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -226,4 +226,22 @@ enum devlink_dpipe_action_type {
DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY,
};
+enum devlink_dpipe_field_ethernet_id {
+ DEVLINK_DPIPE_FIELD_ETHERNET_DST_MAC,
+};
+
+enum devlink_dpipe_field_ipv4_id {
+ DEVLINK_DPIPE_FIELD_IPV4_DST_IP,
+};
+
+enum devlink_dpipe_field_ipv6_id {
+ DEVLINK_DPIPE_FIELD_IPV6_DST_IP,
+};
+
+enum devlink_dpipe_header_id {
+ DEVLINK_DPIPE_HEADER_ETHERNET,
+ DEVLINK_DPIPE_HEADER_IPV4,
+ DEVLINK_DPIPE_HEADER_IPV6,
+};
+
#endif /* _UAPI_LINUX_DEVLINK_H_ */
diff --git a/include/uapi/linux/dlm_netlink.h b/include/uapi/linux/dlm_netlink.h
index 647c8ef27227..ef1e2e08769a 100644
--- a/include/uapi/linux/dlm_netlink.h
+++ b/include/uapi/linux/dlm_netlink.h
@@ -10,6 +10,7 @@
#define _DLM_NETLINK_H
#include <linux/types.h>
+#include <linux/dlmconstants.h>
enum {
DLM_STATUS_WAITING = 1,
diff --git a/include/uapi/linux/errqueue.h b/include/uapi/linux/errqueue.h
index 07bdce1f444a..78fdf52d6b2f 100644
--- a/include/uapi/linux/errqueue.h
+++ b/include/uapi/linux/errqueue.h
@@ -18,10 +18,13 @@ struct sock_extended_err {
#define SO_EE_ORIGIN_ICMP 2
#define SO_EE_ORIGIN_ICMP6 3
#define SO_EE_ORIGIN_TXSTATUS 4
+#define SO_EE_ORIGIN_ZEROCOPY 5
#define SO_EE_ORIGIN_TIMESTAMPING SO_EE_ORIGIN_TXSTATUS
#define SO_EE_OFFENDER(ee) ((struct sockaddr*)((ee)+1))
+#define SO_EE_CODE_ZEROCOPY_COPIED 1
+
/**
* struct scm_timestamping - timestamps exposed through cmsg
*
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 7d4a594d5d58..9c041dae8e2c 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -1238,6 +1238,47 @@ struct ethtool_per_queue_op {
char data[];
};
+/**
+ * struct ethtool_fecparam - Ethernet forward error correction(fec) parameters
+ * @cmd: Command number = %ETHTOOL_GFECPARAM or %ETHTOOL_SFECPARAM
+ * @active_fec: FEC mode which is active on porte
+ * @fec: Bitmask of supported/configured FEC modes
+ * @rsvd: Reserved for future extensions. i.e FEC bypass feature.
+ *
+ * Drivers should reject a non-zero setting of @autoneg when
+ * autoneogotiation is disabled (or not supported) for the link.
+ *
+ */
+struct ethtool_fecparam {
+ __u32 cmd;
+ /* bitmask of FEC modes */
+ __u32 active_fec;
+ __u32 fec;
+ __u32 reserved;
+};
+
+/**
+ * enum ethtool_fec_config_bits - flags definition of ethtool_fec_configuration
+ * @ETHTOOL_FEC_NONE: FEC mode configuration is not supported
+ * @ETHTOOL_FEC_AUTO: Default/Best FEC mode provided by driver
+ * @ETHTOOL_FEC_OFF: No FEC Mode
+ * @ETHTOOL_FEC_RS: Reed-Solomon Forward Error Detection mode
+ * @ETHTOOL_FEC_BASER: Base-R/Reed-Solomon Forward Error Detection mode
+ */
+enum ethtool_fec_config_bits {
+ ETHTOOL_FEC_NONE_BIT,
+ ETHTOOL_FEC_AUTO_BIT,
+ ETHTOOL_FEC_OFF_BIT,
+ ETHTOOL_FEC_RS_BIT,
+ ETHTOOL_FEC_BASER_BIT,
+};
+
+#define ETHTOOL_FEC_NONE (1 << ETHTOOL_FEC_NONE_BIT)
+#define ETHTOOL_FEC_AUTO (1 << ETHTOOL_FEC_AUTO_BIT)
+#define ETHTOOL_FEC_OFF (1 << ETHTOOL_FEC_OFF_BIT)
+#define ETHTOOL_FEC_RS (1 << ETHTOOL_FEC_RS_BIT)
+#define ETHTOOL_FEC_BASER (1 << ETHTOOL_FEC_BASER_BIT)
+
/* CMDs currently supported */
#define ETHTOOL_GSET 0x00000001 /* DEPRECATED, Get settings.
* Please use ETHTOOL_GLINKSETTINGS
@@ -1330,6 +1371,8 @@ struct ethtool_per_queue_op {
#define ETHTOOL_SLINKSETTINGS 0x0000004d /* Set ethtool_link_settings */
#define ETHTOOL_PHY_GTUNABLE 0x0000004e /* Get PHY tunable configuration */
#define ETHTOOL_PHY_STUNABLE 0x0000004f /* Set PHY tunable configuration */
+#define ETHTOOL_GFECPARAM 0x00000050 /* Get FEC settings */
+#define ETHTOOL_SFECPARAM 0x00000051 /* Set FEC settings */
/* compatibility with older code */
#define SPARC_ETH_GSET ETHTOOL_GSET
@@ -1387,6 +1430,9 @@ enum ethtool_link_mode_bit_indices {
ETHTOOL_LINK_MODE_2500baseT_Full_BIT = 47,
ETHTOOL_LINK_MODE_5000baseT_Full_BIT = 48,
+ ETHTOOL_LINK_MODE_FEC_NONE_BIT = 49,
+ ETHTOOL_LINK_MODE_FEC_RS_BIT = 50,
+ ETHTOOL_LINK_MODE_FEC_BASER_BIT = 51,
/* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit
* 31. Please do NOT define any SUPPORTED_* or ADVERTISED_*
@@ -1395,7 +1441,7 @@ enum ethtool_link_mode_bit_indices {
*/
__ETHTOOL_LINK_MODE_LAST
- = ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+ = ETHTOOL_LINK_MODE_FEC_BASER_BIT,
};
#define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index b7495d05e8de..56235dddea7d 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -358,13 +358,25 @@ struct fscrypt_key {
#define SYNC_FILE_RANGE_WRITE 2
#define SYNC_FILE_RANGE_WAIT_AFTER 4
-/* flags for preadv2/pwritev2: */
-#define RWF_HIPRI 0x00000001 /* high priority request, poll if possible */
-#define RWF_DSYNC 0x00000002 /* per-IO O_DSYNC */
-#define RWF_SYNC 0x00000004 /* per-IO O_SYNC */
-#define RWF_NOWAIT 0x00000008 /* per-IO, return -EAGAIN if operation would block */
-
-#define RWF_SUPPORTED (RWF_HIPRI | RWF_DSYNC | RWF_SYNC |\
- RWF_NOWAIT)
+/*
+ * Flags for preadv2/pwritev2:
+ */
+
+typedef int __bitwise __kernel_rwf_t;
+
+/* high priority request, poll if possible */
+#define RWF_HIPRI ((__force __kernel_rwf_t)0x00000001)
+
+/* per-IO O_DSYNC */
+#define RWF_DSYNC ((__force __kernel_rwf_t)0x00000002)
+
+/* per-IO O_SYNC */
+#define RWF_SYNC ((__force __kernel_rwf_t)0x00000004)
+
+/* per-IO, return -EAGAIN if operation would block */
+#define RWF_NOWAIT ((__force __kernel_rwf_t)0x00000008)
+
+/* mask of flags supported by the kernel */
+#define RWF_SUPPORTED (RWF_HIPRI | RWF_DSYNC | RWF_SYNC | RWF_NOWAIT)
#endif /* _UAPI_LINUX_FS_H */
diff --git a/include/uapi/linux/fsmap.h b/include/uapi/linux/fsmap.h
index 7e8e5f0bd6d2..e5213c3e38b2 100644
--- a/include/uapi/linux/fsmap.h
+++ b/include/uapi/linux/fsmap.h
@@ -96,7 +96,7 @@ fsmap_advance(
#define FMR_OF_EXTENT_MAP 0x4 /* segment = extent map */
#define FMR_OF_SHARED 0x8 /* segment = shared with another file */
#define FMR_OF_SPECIAL_OWNER 0x10 /* owner is a special value */
-#define FMR_OF_LAST 0x20 /* segment is the last in the FS */
+#define FMR_OF_LAST 0x20 /* segment is the last in the dataset */
/* Each FS gets to define its own special owner codes. */
#define FMR_OWNER(type, code) (((__u64)type << 32) | \
diff --git a/include/uapi/linux/if_arp.h b/include/uapi/linux/if_arp.h
index cf73510b9238..a2a635620600 100644
--- a/include/uapi/linux/if_arp.h
+++ b/include/uapi/linux/if_arp.h
@@ -59,6 +59,7 @@
#define ARPHRD_LAPB 516 /* LAPB */
#define ARPHRD_DDCMP 517 /* Digital's DDCMP protocol */
#define ARPHRD_RAWHDLC 518 /* Raw HDLC */
+#define ARPHRD_RAWIP 519 /* Raw IP */
#define ARPHRD_TUNNEL 768 /* IPIP tunnel */
#define ARPHRD_TUNNEL6 769 /* IP6IP6 tunnel */
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index 5bc9bfd816b7..9037065e23d0 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -66,6 +66,7 @@
#define ETH_P_ATALK 0x809B /* Appletalk DDP */
#define ETH_P_AARP 0x80F3 /* Appletalk AARP */
#define ETH_P_8021Q 0x8100 /* 802.1Q VLAN Extended Header */
+#define ETH_P_ERSPAN 0x88BE /* ERSPAN type II */
#define ETH_P_IPX 0x8137 /* IPX over DIX */
#define ETH_P_IPV6 0x86DD /* IPv6 over bluebook */
#define ETH_P_PAUSE 0x8808 /* IEEE Pause frames. See 802.3 31B */
@@ -98,11 +99,13 @@
#define ETH_P_FIP 0x8914 /* FCoE Initialization Protocol */
#define ETH_P_80221 0x8917 /* IEEE 802.21 Media Independent Handover Protocol */
#define ETH_P_HSR 0x892F /* IEC 62439-3 HSRv1 */
+#define ETH_P_NSH 0x894F /* Network Service Header */
#define ETH_P_LOOPBACK 0x9000 /* Ethernet loopback packet, per IEEE 802.3 */
#define ETH_P_QINQ1 0x9100 /* deprecated QinQ VLAN [ NOT AN OFFICIALLY REGISTERED ID ] */
#define ETH_P_QINQ2 0x9200 /* deprecated QinQ VLAN [ NOT AN OFFICIALLY REGISTERED ID ] */
#define ETH_P_QINQ3 0x9300 /* deprecated QinQ VLAN [ NOT AN OFFICIALLY REGISTERED ID ] */
#define ETH_P_EDSA 0xDADA /* Ethertype DSA [ NOT AN OFFICIALLY REGISTERED ID ] */
+#define ETH_P_IFE 0xED3E /* ForCES inter-FE LFB type */
#define ETH_P_AF_IUCV 0xFBFB /* IBM af_iucv [ NOT AN OFFICIALLY REGISTERED ID ] */
#define ETH_P_802_3_MIN 0x0600 /* If the value in the ethernet type is less than this value
@@ -137,6 +140,9 @@
#define ETH_P_IEEE802154 0x00F6 /* IEEE802.15.4 frame */
#define ETH_P_CAIF 0x00F7 /* ST-Ericsson CAIF protocol */
#define ETH_P_XDSA 0x00F8 /* Multiplexed DSA protocol */
+#define ETH_P_MAP 0x00F9 /* Qualcomm multiplexing and
+ * aggregation protocol
+ */
/*
* This is an Ethernet frame header.
diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h
index 6792d1967d31..2e520883c054 100644
--- a/include/uapi/linux/if_tunnel.h
+++ b/include/uapi/linux/if_tunnel.h
@@ -134,6 +134,7 @@ enum {
IFLA_GRE_COLLECT_METADATA,
IFLA_GRE_IGNORE_DF,
IFLA_GRE_FWMARK,
+ IFLA_GRE_ERSPAN_INDEX,
__IFLA_GRE_MAX,
};
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
index bbe201047df6..f52ff62bfabe 100644
--- a/include/uapi/linux/inet_diag.h
+++ b/include/uapi/linux/inet_diag.h
@@ -142,6 +142,8 @@ enum {
INET_DIAG_PAD,
INET_DIAG_MARK,
INET_DIAG_BBRINFO,
+ INET_DIAG_CLASS_ID,
+ INET_DIAG_MD5SIG,
__INET_DIAG_MAX,
};
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
index d6833426fdef..7b4567bacfc2 100644
--- a/include/uapi/linux/kfd_ioctl.h
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -232,6 +232,35 @@ struct kfd_ioctl_wait_events_args {
uint32_t wait_result; /* from KFD */
};
+struct kfd_ioctl_set_scratch_backing_va_args {
+ uint64_t va_addr; /* to KFD */
+ uint32_t gpu_id; /* to KFD */
+ uint32_t pad;
+};
+
+struct kfd_ioctl_get_tile_config_args {
+ /* to KFD: pointer to tile array */
+ uint64_t tile_config_ptr;
+ /* to KFD: pointer to macro tile array */
+ uint64_t macro_tile_config_ptr;
+ /* to KFD: array size allocated by user mode
+ * from KFD: array size filled by kernel
+ */
+ uint32_t num_tile_configs;
+ /* to KFD: array size allocated by user mode
+ * from KFD: array size filled by kernel
+ */
+ uint32_t num_macro_tile_configs;
+
+ uint32_t gpu_id; /* to KFD */
+ uint32_t gb_addr_config; /* from KFD */
+ uint32_t num_banks; /* from KFD */
+ uint32_t num_ranks; /* from KFD */
+ /* struct size can be extended later if needed
+ * without breaking ABI compatibility
+ */
+};
+
#define AMDKFD_IOCTL_BASE 'K'
#define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr)
#define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type)
@@ -286,7 +315,13 @@ struct kfd_ioctl_wait_events_args {
#define AMDKFD_IOC_DBG_WAVE_CONTROL \
AMDKFD_IOW(0x10, struct kfd_ioctl_dbg_wave_control_args)
+#define AMDKFD_IOC_SET_SCRATCH_BACKING_VA \
+ AMDKFD_IOWR(0x11, struct kfd_ioctl_set_scratch_backing_va_args)
+
+#define AMDKFD_IOC_GET_TILE_CONFIG \
+ AMDKFD_IOWR(0x12, struct kfd_ioctl_get_tile_config_args)
+
#define AMDKFD_COMMAND_START 0x01
-#define AMDKFD_COMMAND_END 0x11
+#define AMDKFD_COMMAND_END 0x13
#endif
diff --git a/include/uapi/linux/loop.h b/include/uapi/linux/loop.h
index a3960f98679c..c8125ec1f4f2 100644
--- a/include/uapi/linux/loop.h
+++ b/include/uapi/linux/loop.h
@@ -22,7 +22,6 @@ enum {
LO_FLAGS_AUTOCLEAR = 4,
LO_FLAGS_PARTSCAN = 8,
LO_FLAGS_DIRECT_IO = 16,
- LO_FLAGS_BLOCKSIZE = 32,
};
#include <asm/posix_types.h> /* for __kernel_old_dev_t */
@@ -60,8 +59,6 @@ struct loop_info64 {
__u64 lo_init[2];
};
-#define LO_INFO_BLOCKSIZE(l) (l)->lo_init[0]
-
/*
* Loop filter types
*/
diff --git a/include/uapi/linux/lwtunnel.h b/include/uapi/linux/lwtunnel.h
index 92724cba1eba..7fdd19ca7511 100644
--- a/include/uapi/linux/lwtunnel.h
+++ b/include/uapi/linux/lwtunnel.h
@@ -11,6 +11,7 @@ enum lwtunnel_encap_types {
LWTUNNEL_ENCAP_IP6,
LWTUNNEL_ENCAP_SEG6,
LWTUNNEL_ENCAP_BPF,
+ LWTUNNEL_ENCAP_SEG6_LOCAL,
__LWTUNNEL_ENCAP_MAX,
};
diff --git a/include/uapi/linux/membarrier.h b/include/uapi/linux/membarrier.h
index e0b108bd2624..6d47b3249d8a 100644
--- a/include/uapi/linux/membarrier.h
+++ b/include/uapi/linux/membarrier.h
@@ -40,14 +40,33 @@
* (non-running threads are de facto in such a
* state). This covers threads from all processes
* running on the system. This command returns 0.
+ * @MEMBARRIER_CMD_PRIVATE_EXPEDITED:
+ * Execute a memory barrier on each running
+ * thread belonging to the same process as the current
+ * thread. Upon return from system call, the
+ * caller thread is ensured that all its running
+ * threads siblings have passed through a state
+ * where all memory accesses to user-space
+ * addresses match program order between entry
+ * to and return from the system call
+ * (non-running threads are de facto in such a
+ * state). This only covers threads from the
+ * same processes as the caller thread. This
+ * command returns 0. The "expedited" commands
+ * complete faster than the non-expedited ones,
+ * they never block, but have the downside of
+ * causing extra overhead.
*
* Command to be passed to the membarrier system call. The commands need to
* be a single bit each, except for MEMBARRIER_CMD_QUERY which is assigned to
* the value 0.
*/
enum membarrier_cmd {
- MEMBARRIER_CMD_QUERY = 0,
- MEMBARRIER_CMD_SHARED = (1 << 0),
+ MEMBARRIER_CMD_QUERY = 0,
+ MEMBARRIER_CMD_SHARED = (1 << 0),
+ /* reserved for MEMBARRIER_CMD_SHARED_EXPEDITED (1 << 1) */
+ /* reserved for MEMBARRIER_CMD_PRIVATE (1 << 2) */
+ MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3),
};
#endif /* _UAPI_LINUX_MEMBARRIER_H */
diff --git a/include/uapi/linux/memfd.h b/include/uapi/linux/memfd.h
index 534e364bda92..7f3a722dbd72 100644
--- a/include/uapi/linux/memfd.h
+++ b/include/uapi/linux/memfd.h
@@ -1,8 +1,32 @@
#ifndef _UAPI_LINUX_MEMFD_H
#define _UAPI_LINUX_MEMFD_H
+#include <asm-generic/hugetlb_encode.h>
+
/* flags for memfd_create(2) (unsigned int) */
#define MFD_CLOEXEC 0x0001U
#define MFD_ALLOW_SEALING 0x0002U
+#define MFD_HUGETLB 0x0004U
+
+/*
+ * Huge page size encoding when MFD_HUGETLB is specified, and a huge page
+ * size other than the default is desired. See hugetlb_encode.h.
+ * All known huge page size encodings are provided here. It is the
+ * responsibility of the application to know which sizes are supported on
+ * the running system. See mmap(2) man page for details.
+ */
+#define MFD_HUGE_SHIFT HUGETLB_FLAG_ENCODE_SHIFT
+#define MFD_HUGE_MASK HUGETLB_FLAG_ENCODE_MASK
+
+#define MFD_HUGE_64KB HUGETLB_FLAG_ENCODE_64KB
+#define MFD_HUGE_512KB HUGETLB_FLAG_ENCODE_512KB
+#define MFD_HUGE_1MB HUGETLB_FLAG_ENCODE_1MB
+#define MFD_HUGE_2MB HUGETLB_FLAG_ENCODE_2MB
+#define MFD_HUGE_8MB HUGETLB_FLAG_ENCODE_8MB
+#define MFD_HUGE_16MB HUGETLB_FLAG_ENCODE_16MB
+#define MFD_HUGE_256MB HUGETLB_FLAG_ENCODE_256MB
+#define MFD_HUGE_1GB HUGETLB_FLAG_ENCODE_1GB
+#define MFD_HUGE_2GB HUGETLB_FLAG_ENCODE_2GB
+#define MFD_HUGE_16GB HUGETLB_FLAG_ENCODE_16GB
#endif /* _UAPI_LINUX_MEMFD_H */
diff --git a/include/uapi/linux/mman.h b/include/uapi/linux/mman.h
index ade4acd3a90c..a937480d7cd3 100644
--- a/include/uapi/linux/mman.h
+++ b/include/uapi/linux/mman.h
@@ -2,6 +2,7 @@
#define _UAPI_LINUX_MMAN_H
#include <asm/mman.h>
+#include <asm-generic/hugetlb_encode.h>
#define MREMAP_MAYMOVE 1
#define MREMAP_FIXED 2
@@ -10,4 +11,25 @@
#define OVERCOMMIT_ALWAYS 1
#define OVERCOMMIT_NEVER 2
+/*
+ * Huge page size encoding when MAP_HUGETLB is specified, and a huge page
+ * size other than the default is desired. See hugetlb_encode.h.
+ * All known huge page size encodings are provided here. It is the
+ * responsibility of the application to know which sizes are supported on
+ * the running system. See mmap(2) man page for details.
+ */
+#define MAP_HUGE_SHIFT HUGETLB_FLAG_ENCODE_SHIFT
+#define MAP_HUGE_MASK HUGETLB_FLAG_ENCODE_MASK
+
+#define MAP_HUGE_64KB HUGETLB_FLAG_ENCODE_64KB
+#define MAP_HUGE_512KB HUGETLB_FLAG_ENCODE_512KB
+#define MAP_HUGE_1MB HUGETLB_FLAG_ENCODE_1MB
+#define MAP_HUGE_2MB HUGETLB_FLAG_ENCODE_2MB
+#define MAP_HUGE_8MB HUGETLB_FLAG_ENCODE_8MB
+#define MAP_HUGE_16MB HUGETLB_FLAG_ENCODE_16MB
+#define MAP_HUGE_256MB HUGETLB_FLAG_ENCODE_256MB
+#define MAP_HUGE_1GB HUGETLB_FLAG_ENCODE_1GB
+#define MAP_HUGE_2GB HUGETLB_FLAG_ENCODE_2GB
+#define MAP_HUGE_16GB HUGETLB_FLAG_ENCODE_16GB
+
#endif /* _UAPI_LINUX_MMAN_H */
diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h
index 6d3c54264d8e..3f03567631cb 100644
--- a/include/uapi/linux/ndctl.h
+++ b/include/uapi/linux/ndctl.h
@@ -145,43 +145,6 @@ struct nd_cmd_clear_error {
__u64 cleared;
} __packed;
-struct nd_cmd_trans_spa {
- __u64 spa;
- __u32 status;
- __u8 flags;
- __u8 _reserved[3];
- __u64 trans_length;
- __u32 num_nvdimms;
- struct nd_nvdimm_device {
- __u32 nfit_device_handle;
- __u32 _reserved;
- __u64 dpa;
- } __packed devices[0];
-
-} __packed;
-
-struct nd_cmd_ars_err_inj {
- __u64 err_inj_spa_range_base;
- __u64 err_inj_spa_range_length;
- __u8 err_inj_options;
- __u32 status;
-} __packed;
-
-struct nd_cmd_ars_err_inj_clr {
- __u64 err_inj_clr_spa_range_base;
- __u64 err_inj_clr_spa_range_length;
- __u32 status;
-} __packed;
-
-struct nd_cmd_ars_err_inj_stat {
- __u32 status;
- __u32 inj_err_rec_count;
- struct nd_error_stat_query_record {
- __u64 err_inj_stat_spa_range_base;
- __u64 err_inj_stat_spa_range_length;
- } __packed record[0];
-} __packed;
-
enum {
ND_CMD_IMPLEMENTED = 0,
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 683f6f88fcac..871afa4871bf 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -1,10 +1,11 @@
#ifndef _LINUX_NF_TABLES_H
#define _LINUX_NF_TABLES_H
-#define NFT_TABLE_MAXNAMELEN 32
-#define NFT_CHAIN_MAXNAMELEN 32
-#define NFT_SET_MAXNAMELEN 32
-#define NFT_OBJ_MAXNAMELEN 32
+#define NFT_NAME_MAXLEN 256
+#define NFT_TABLE_MAXNAMELEN NFT_NAME_MAXLEN
+#define NFT_CHAIN_MAXNAMELEN NFT_NAME_MAXLEN
+#define NFT_SET_MAXNAMELEN NFT_NAME_MAXLEN
+#define NFT_OBJ_MAXNAMELEN NFT_NAME_MAXLEN
#define NFT_USERDATA_MAXLEN 256
/**
@@ -731,7 +732,8 @@ enum nft_exthdr_op {
* @NFTA_EXTHDR_OFFSET: extension header offset (NLA_U32)
* @NFTA_EXTHDR_LEN: extension header length (NLA_U32)
* @NFTA_EXTHDR_FLAGS: extension header flags (NLA_U32)
- * @NFTA_EXTHDR_OP: option match type (NLA_U8)
+ * @NFTA_EXTHDR_OP: option match type (NLA_U32)
+ * @NFTA_EXTHDR_SREG: option match type (NLA_U32)
*/
enum nft_exthdr_attributes {
NFTA_EXTHDR_UNSPEC,
@@ -741,6 +743,7 @@ enum nft_exthdr_attributes {
NFTA_EXTHDR_LEN,
NFTA_EXTHDR_FLAGS,
NFTA_EXTHDR_OP,
+ NFTA_EXTHDR_SREG,
__NFTA_EXTHDR_MAX
};
#define NFTA_EXTHDR_MAX (__NFTA_EXTHDR_MAX - 1)
@@ -808,11 +811,13 @@ enum nft_meta_keys {
* @NFT_RT_CLASSID: realm value of packet's route (skb->dst->tclassid)
* @NFT_RT_NEXTHOP4: routing nexthop for IPv4
* @NFT_RT_NEXTHOP6: routing nexthop for IPv6
+ * @NFT_RT_TCPMSS: fetch current path tcp mss
*/
enum nft_rt_keys {
NFT_RT_CLASSID,
NFT_RT_NEXTHOP4,
NFT_RT_NEXTHOP6,
+ NFT_RT_TCPMSS,
};
/**
@@ -1221,6 +1226,8 @@ enum nft_objref_attributes {
enum nft_gen_attributes {
NFTA_GEN_UNSPEC,
NFTA_GEN_ID,
+ NFTA_GEN_PROC_PID,
+ NFTA_GEN_PROC_NAME,
__NFTA_GEN_MAX
};
#define NFTA_GEN_MAX (__NFTA_GEN_MAX - 1)
@@ -1275,7 +1282,8 @@ enum nft_ct_helper_attributes {
#define NFT_OBJECT_COUNTER 1
#define NFT_OBJECT_QUOTA 2
#define NFT_OBJECT_CT_HELPER 3
-#define __NFT_OBJECT_MAX 4
+#define NFT_OBJECT_LIMIT 4
+#define __NFT_OBJECT_MAX 5
#define NFT_OBJECT_MAX (__NFT_OBJECT_MAX - 1)
/**
diff --git a/include/uapi/linux/netfilter/xt_hashlimit.h b/include/uapi/linux/netfilter/xt_hashlimit.h
index 79da349f1060..aa98573248b1 100644
--- a/include/uapi/linux/netfilter/xt_hashlimit.h
+++ b/include/uapi/linux/netfilter/xt_hashlimit.h
@@ -19,12 +19,13 @@
struct xt_hashlimit_htable;
enum {
- XT_HASHLIMIT_HASH_DIP = 1 << 0,
- XT_HASHLIMIT_HASH_DPT = 1 << 1,
- XT_HASHLIMIT_HASH_SIP = 1 << 2,
- XT_HASHLIMIT_HASH_SPT = 1 << 3,
- XT_HASHLIMIT_INVERT = 1 << 4,
- XT_HASHLIMIT_BYTES = 1 << 5,
+ XT_HASHLIMIT_HASH_DIP = 1 << 0,
+ XT_HASHLIMIT_HASH_DPT = 1 << 1,
+ XT_HASHLIMIT_HASH_SIP = 1 << 2,
+ XT_HASHLIMIT_HASH_SPT = 1 << 3,
+ XT_HASHLIMIT_INVERT = 1 << 4,
+ XT_HASHLIMIT_BYTES = 1 << 5,
+ XT_HASHLIMIT_RATE_MATCH = 1 << 6,
};
struct hashlimit_cfg {
@@ -79,6 +80,21 @@ struct hashlimit_cfg2 {
__u8 srcmask, dstmask;
};
+struct hashlimit_cfg3 {
+ __u64 avg; /* Average secs between packets * scale */
+ __u64 burst; /* Period multiplier for upper limit. */
+ __u32 mode; /* bitmask of XT_HASHLIMIT_HASH_* */
+
+ /* user specified */
+ __u32 size; /* how many buckets */
+ __u32 max; /* max number of entries */
+ __u32 gc_interval; /* gc interval */
+ __u32 expire; /* when do entries expire? */
+
+ __u32 interval;
+ __u8 srcmask, dstmask;
+};
+
struct xt_hashlimit_mtinfo1 {
char name[IFNAMSIZ];
struct hashlimit_cfg1 cfg;
@@ -95,4 +111,12 @@ struct xt_hashlimit_mtinfo2 {
struct xt_hashlimit_htable *hinfo __attribute__((aligned(8)));
};
+struct xt_hashlimit_mtinfo3 {
+ char name[NAME_MAX];
+ struct hashlimit_cfg3 cfg;
+
+ /* Used internally by the kernel */
+ struct xt_hashlimit_htable *hinfo __attribute__((aligned(8)));
+};
+
#endif /* _UAPI_XT_HASHLIMIT_H */
diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h
index f86127a46cfc..e8af60a7c56d 100644
--- a/include/uapi/linux/netlink.h
+++ b/include/uapi/linux/netlink.h
@@ -69,6 +69,9 @@ struct nlmsghdr {
#define NLM_F_CREATE 0x400 /* Create, if it does not exist */
#define NLM_F_APPEND 0x800 /* Add to end of list */
+/* Modifiers to DELETE request */
+#define NLM_F_NONREC 0x100 /* Do not delete recursively */
+
/* Flags for ACK message */
#define NLM_F_CAPPED 0x100 /* request was capped */
#define NLM_F_ACK_TLVS 0x200 /* extended ACK TVLs were included */
@@ -226,5 +229,22 @@ struct nlattr {
#define NLA_ALIGN(len) (((len) + NLA_ALIGNTO - 1) & ~(NLA_ALIGNTO - 1))
#define NLA_HDRLEN ((int) NLA_ALIGN(sizeof(struct nlattr)))
+/* Generic 32 bitflags attribute content sent to the kernel.
+ *
+ * The value is a bitmap that defines the values being set
+ * The selector is a bitmask that defines which value is legit
+ *
+ * Examples:
+ * value = 0x0, and selector = 0x1
+ * implies we are selecting bit 1 and we want to set its value to 0.
+ *
+ * value = 0x2, and selector = 0x2
+ * implies we are selecting bit 2 and we want to set its value to 1.
+ *
+ */
+struct nla_bitfield32 {
+ __u32 value;
+ __u32 selector;
+};
#endif /* _UAPI__LINUX_NETLINK_H */
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index b1c0b187acfe..140ae638cfd6 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -139,8 +139,9 @@ enum perf_event_sample_format {
PERF_SAMPLE_IDENTIFIER = 1U << 16,
PERF_SAMPLE_TRANSACTION = 1U << 17,
PERF_SAMPLE_REGS_INTR = 1U << 18,
+ PERF_SAMPLE_PHYS_ADDR = 1U << 19,
- PERF_SAMPLE_MAX = 1U << 19, /* non-ABI */
+ PERF_SAMPLE_MAX = 1U << 20, /* non-ABI */
};
/*
@@ -174,6 +175,8 @@ enum perf_branch_sample_type_shift {
PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT = 14, /* no flags */
PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 15, /* no cycles */
+ PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 16, /* save branch type */
+
PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */
};
@@ -198,9 +201,30 @@ enum perf_branch_sample_type {
PERF_SAMPLE_BRANCH_NO_FLAGS = 1U << PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT,
PERF_SAMPLE_BRANCH_NO_CYCLES = 1U << PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT,
+ PERF_SAMPLE_BRANCH_TYPE_SAVE =
+ 1U << PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT,
+
PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
};
+/*
+ * Common flow change classification
+ */
+enum {
+ PERF_BR_UNKNOWN = 0, /* unknown */
+ PERF_BR_COND = 1, /* conditional */
+ PERF_BR_UNCOND = 2, /* unconditional */
+ PERF_BR_IND = 3, /* indirect */
+ PERF_BR_CALL = 4, /* function call */
+ PERF_BR_IND_CALL = 5, /* indirect function call */
+ PERF_BR_RET = 6, /* function return */
+ PERF_BR_SYSCALL = 7, /* syscall */
+ PERF_BR_SYSRET = 8, /* syscall return */
+ PERF_BR_COND_CALL = 9, /* conditional function call */
+ PERF_BR_COND_RET = 10, /* conditional function return */
+ PERF_BR_MAX,
+};
+
#define PERF_SAMPLE_BRANCH_PLM_ALL \
(PERF_SAMPLE_BRANCH_USER|\
PERF_SAMPLE_BRANCH_KERNEL|\
@@ -791,6 +815,7 @@ enum perf_event_type {
* { u64 transaction; } && PERF_SAMPLE_TRANSACTION
* { u64 abi; # enum perf_sample_regs_abi
* u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR
+ * { u64 phys_addr;} && PERF_SAMPLE_PHYS_ADDR
* };
*/
PERF_RECORD_SAMPLE = 9,
@@ -931,14 +956,20 @@ union perf_mem_data_src {
mem_snoop:5, /* snoop mode */
mem_lock:2, /* lock instr */
mem_dtlb:7, /* tlb access */
- mem_rsvd:31;
+ mem_lvl_num:4, /* memory hierarchy level number */
+ mem_remote:1, /* remote */
+ mem_snoopx:2, /* snoop mode, ext */
+ mem_rsvd:24;
};
};
#elif defined(__BIG_ENDIAN_BITFIELD)
union perf_mem_data_src {
__u64 val;
struct {
- __u64 mem_rsvd:31,
+ __u64 mem_rsvd:24,
+ mem_snoopx:2, /* snoop mode, ext */
+ mem_remote:1, /* remote */
+ mem_lvl_num:4, /* memory hierarchy level number */
mem_dtlb:7, /* tlb access */
mem_lock:2, /* lock instr */
mem_snoop:5, /* snoop mode */
@@ -975,6 +1006,22 @@ union perf_mem_data_src {
#define PERF_MEM_LVL_UNC 0x2000 /* Uncached memory */
#define PERF_MEM_LVL_SHIFT 5
+#define PERF_MEM_REMOTE_REMOTE 0x01 /* Remote */
+#define PERF_MEM_REMOTE_SHIFT 37
+
+#define PERF_MEM_LVLNUM_L1 0x01 /* L1 */
+#define PERF_MEM_LVLNUM_L2 0x02 /* L2 */
+#define PERF_MEM_LVLNUM_L3 0x03 /* L3 */
+#define PERF_MEM_LVLNUM_L4 0x04 /* L4 */
+/* 5-0xa available */
+#define PERF_MEM_LVLNUM_ANY_CACHE 0x0b /* Any cache */
+#define PERF_MEM_LVLNUM_LFB 0x0c /* LFB */
+#define PERF_MEM_LVLNUM_RAM 0x0d /* RAM */
+#define PERF_MEM_LVLNUM_PMEM 0x0e /* PMEM */
+#define PERF_MEM_LVLNUM_NA 0x0f /* N/A */
+
+#define PERF_MEM_LVLNUM_SHIFT 33
+
/* snoop mode */
#define PERF_MEM_SNOOP_NA 0x01 /* not available */
#define PERF_MEM_SNOOP_NONE 0x02 /* no snoop */
@@ -983,6 +1030,10 @@ union perf_mem_data_src {
#define PERF_MEM_SNOOP_HITM 0x10 /* snoop hit modified */
#define PERF_MEM_SNOOP_SHIFT 19
+#define PERF_MEM_SNOOPX_FWD 0x01 /* forward */
+/* 1 free */
+#define PERF_MEM_SNOOPX_SHIFT 37
+
/* locked instruction */
#define PERF_MEM_LOCK_NA 0x01 /* not available */
#define PERF_MEM_LOCK_LOCKED 0x02 /* locked transaction */
@@ -1015,6 +1066,7 @@ union perf_mem_data_src {
* in_tx: running in a hardware transaction
* abort: aborting a hardware transaction
* cycles: cycles from last branch (or 0 if not supported)
+ * type: branch type
*/
struct perf_branch_entry {
__u64 from;
@@ -1024,7 +1076,8 @@ struct perf_branch_entry {
in_tx:1, /* in transaction */
abort:1, /* transaction abort */
cycles:16, /* cycle count to last branch */
- reserved:44;
+ type:4, /* branch type */
+ reserved:40;
};
#endif /* _UAPI_LINUX_PERF_EVENT_H */
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index d148505010a7..dab7dad9e01a 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -683,10 +683,29 @@ struct tcamsg {
unsigned char tca__pad1;
unsigned short tca__pad2;
};
+
+enum {
+ TCA_ROOT_UNSPEC,
+ TCA_ROOT_TAB,
+#define TCA_ACT_TAB TCA_ROOT_TAB
+#define TCAA_MAX TCA_ROOT_TAB
+ TCA_ROOT_FLAGS,
+ TCA_ROOT_COUNT,
+ TCA_ROOT_TIME_DELTA, /* in msecs */
+ __TCA_ROOT_MAX,
+#define TCA_ROOT_MAX (__TCA_ROOT_MAX - 1)
+};
+
#define TA_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct tcamsg))))
#define TA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct tcamsg))
-#define TCA_ACT_TAB 1 /* attr type must be >=1 */
-#define TCAA_MAX 1
+/* tcamsg flags stored in attribute TCA_ROOT_FLAGS
+ *
+ * TCA_FLAG_LARGE_DUMP_ON user->kernel to request for larger than TCA_ACT_MAX_PRIO
+ * actions in a dump. All dump responses will contain the number of actions
+ * being dumped stored in for user app's consumption in TCA_ROOT_COUNT
+ *
+ */
+#define TCA_FLAG_LARGE_DUMP_ON (1 << 0)
/* New extended info filters for IFLA_EXT_MASK */
#define RTEXT_FILTER_VF (1 << 0)
diff --git a/include/linux/rxrpc.h b/include/uapi/linux/rxrpc.h
index 7343f71783dc..9656aad8f8f7 100644
--- a/include/linux/rxrpc.h
+++ b/include/uapi/linux/rxrpc.h
@@ -1,17 +1,18 @@
-/* AF_RXRPC parameters
+/* Types and definitions for AF_RXRPC.
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
+ * modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
+ * 2 of the Licence, or (at your option) any later version.
*/
-#ifndef _LINUX_RXRPC_H
-#define _LINUX_RXRPC_H
+#ifndef _UAPI_LINUX_RXRPC_H
+#define _UAPI_LINUX_RXRPC_H
+#include <linux/types.h>
#include <linux/in.h>
#include <linux/in6.h>
@@ -76,4 +77,48 @@ enum rxrpc_cmsg_type {
#define RXRPC_SECURITY_RXGK 4 /* gssapi-based */
#define RXRPC_SECURITY_RXK5 5 /* kerberos 5 */
-#endif /* _LINUX_RXRPC_H */
+/*
+ * RxRPC-level abort codes
+ */
+#define RX_CALL_DEAD -1 /* call/conn has been inactive and is shut down */
+#define RX_INVALID_OPERATION -2 /* invalid operation requested / attempted */
+#define RX_CALL_TIMEOUT -3 /* call timeout exceeded */
+#define RX_EOF -4 /* unexpected end of data on read op */
+#define RX_PROTOCOL_ERROR -5 /* low-level protocol error */
+#define RX_USER_ABORT -6 /* generic user abort */
+#define RX_ADDRINUSE -7 /* UDP port in use */
+#define RX_DEBUGI_BADTYPE -8 /* bad debugging packet type */
+
+/*
+ * (un)marshalling abort codes (rxgen)
+ */
+#define RXGEN_CC_MARSHAL -450
+#define RXGEN_CC_UNMARSHAL -451
+#define RXGEN_SS_MARSHAL -452
+#define RXGEN_SS_UNMARSHAL -453
+#define RXGEN_DECODE -454
+#define RXGEN_OPCODE -455
+#define RXGEN_SS_XDRFREE -456
+#define RXGEN_CC_XDRFREE -457
+
+/*
+ * Rx kerberos security abort codes
+ * - unfortunately we have no generalised security abort codes to say things
+ * like "unsupported security", so we have to use these instead and hope the
+ * other side understands
+ */
+#define RXKADINCONSISTENCY 19270400 /* security module structure inconsistent */
+#define RXKADPACKETSHORT 19270401 /* packet too short for security challenge */
+#define RXKADLEVELFAIL 19270402 /* security level negotiation failed */
+#define RXKADTICKETLEN 19270403 /* ticket length too short or too long */
+#define RXKADOUTOFSEQUENCE 19270404 /* packet had bad sequence number */
+#define RXKADNOAUTH 19270405 /* caller not authorised */
+#define RXKADBADKEY 19270406 /* illegal key: bad parity or weak */
+#define RXKADBADTICKET 19270407 /* security object was passed a bad ticket */
+#define RXKADUNKNOWNKEY 19270408 /* ticket contained unknown key version number */
+#define RXKADEXPIRED 19270409 /* authentication expired */
+#define RXKADSEALEDINCON 19270410 /* sealed data inconsistent */
+#define RXKADDATALEN 19270411 /* user data too long */
+#define RXKADILLEGALLEVEL 19270412 /* caller not authorised to use encrypted conns */
+
+#endif /* _UAPI_LINUX_RXRPC_H */
diff --git a/include/uapi/linux/seg6_iptunnel.h b/include/uapi/linux/seg6_iptunnel.h
index b6e5a0a1afd7..b23df9f58354 100644
--- a/include/uapi/linux/seg6_iptunnel.h
+++ b/include/uapi/linux/seg6_iptunnel.h
@@ -33,16 +33,26 @@ struct seg6_iptunnel_encap {
enum {
SEG6_IPTUN_MODE_INLINE,
SEG6_IPTUN_MODE_ENCAP,
+ SEG6_IPTUN_MODE_L2ENCAP,
};
#ifdef __KERNEL__
static inline size_t seg6_lwt_headroom(struct seg6_iptunnel_encap *tuninfo)
{
- int encap = (tuninfo->mode == SEG6_IPTUN_MODE_ENCAP);
-
- return ((tuninfo->srh->hdrlen + 1) << 3) +
- (encap * sizeof(struct ipv6hdr));
+ int head = 0;
+
+ switch (tuninfo->mode) {
+ case SEG6_IPTUN_MODE_INLINE:
+ break;
+ case SEG6_IPTUN_MODE_ENCAP:
+ head = sizeof(struct ipv6hdr);
+ break;
+ case SEG6_IPTUN_MODE_L2ENCAP:
+ return 0;
+ }
+
+ return ((tuninfo->srh->hdrlen + 1) << 3) + head;
}
#endif
diff --git a/include/uapi/linux/seg6_local.h b/include/uapi/linux/seg6_local.h
new file mode 100644
index 000000000000..ef2d8c3e76c1
--- /dev/null
+++ b/include/uapi/linux/seg6_local.h
@@ -0,0 +1,68 @@
+/*
+ * SR-IPv6 implementation
+ *
+ * Author:
+ * David Lebrun <david.lebrun@uclouvain.be>
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _UAPI_LINUX_SEG6_LOCAL_H
+#define _UAPI_LINUX_SEG6_LOCAL_H
+
+#include <linux/seg6.h>
+
+enum {
+ SEG6_LOCAL_UNSPEC,
+ SEG6_LOCAL_ACTION,
+ SEG6_LOCAL_SRH,
+ SEG6_LOCAL_TABLE,
+ SEG6_LOCAL_NH4,
+ SEG6_LOCAL_NH6,
+ SEG6_LOCAL_IIF,
+ SEG6_LOCAL_OIF,
+ __SEG6_LOCAL_MAX,
+};
+#define SEG6_LOCAL_MAX (__SEG6_LOCAL_MAX - 1)
+
+enum {
+ SEG6_LOCAL_ACTION_UNSPEC = 0,
+ /* node segment */
+ SEG6_LOCAL_ACTION_END = 1,
+ /* adjacency segment (IPv6 cross-connect) */
+ SEG6_LOCAL_ACTION_END_X = 2,
+ /* lookup of next seg NH in table */
+ SEG6_LOCAL_ACTION_END_T = 3,
+ /* decap and L2 cross-connect */
+ SEG6_LOCAL_ACTION_END_DX2 = 4,
+ /* decap and IPv6 cross-connect */
+ SEG6_LOCAL_ACTION_END_DX6 = 5,
+ /* decap and IPv4 cross-connect */
+ SEG6_LOCAL_ACTION_END_DX4 = 6,
+ /* decap and lookup of DA in v6 table */
+ SEG6_LOCAL_ACTION_END_DT6 = 7,
+ /* decap and lookup of DA in v4 table */
+ SEG6_LOCAL_ACTION_END_DT4 = 8,
+ /* binding segment with insertion */
+ SEG6_LOCAL_ACTION_END_B6 = 9,
+ /* binding segment with encapsulation */
+ SEG6_LOCAL_ACTION_END_B6_ENCAP = 10,
+ /* binding segment with MPLS encap */
+ SEG6_LOCAL_ACTION_END_BM = 11,
+ /* lookup last seg in table */
+ SEG6_LOCAL_ACTION_END_S = 12,
+ /* forward to SR-unaware VNF with static proxy */
+ SEG6_LOCAL_ACTION_END_AS = 13,
+ /* forward to SR-unaware VNF with masquerading */
+ SEG6_LOCAL_ACTION_END_AM = 14,
+
+ __SEG6_LOCAL_ACTION_MAX,
+};
+
+#define SEG6_LOCAL_ACTION_MAX (__SEG6_LOCAL_ACTION_MAX - 1)
+
+#endif
diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h
index c34a2a3eeff5..50d71c436323 100644
--- a/include/uapi/linux/serial_core.h
+++ b/include/uapi/linux/serial_core.h
@@ -56,8 +56,6 @@
#define PORT_ALTR_16550_F128 28 /* Altera 16550 UART with 128 FIFOs */
#define PORT_RT2880 29 /* Ralink RT2880 internal UART */
#define PORT_16550A_FSL64 30 /* Freescale 16550 UART with 64 FIFOs */
-#define PORT_DA830 31 /* TI DA8xx/66AK2x */
-#define PORT_MAX_8250 31 /* max port ID */
/*
* ARM specific type numbers. These are not currently guaranteed
@@ -70,12 +68,17 @@
#define PORT_CLPS711X 33
#define PORT_SA1100 34
#define PORT_UART00 35
+#define PORT_OWL 36
#define PORT_21285 37
/* Sparc type numbers. */
#define PORT_SUNZILOG 38
#define PORT_SUNSAB 39
+/* Intel EG20 */
+#define PORT_PCH_8LINE 44
+#define PORT_PCH_2LINE 45
+
/* DEC */
#define PORT_DZ 46
#define PORT_ZS 47
@@ -205,8 +208,8 @@
/* MAX310X */
#define PORT_MAX310X 94
-/* High Speed UART for Medfield */
-#define PORT_MFD 95
+/* TI DA8xx/66AK2x */
+#define PORT_DA830 95
/* TI OMAP-UART */
#define PORT_OMAP 96
@@ -271,4 +274,7 @@
/* MPS2 UART */
#define PORT_MPS2UART 116
+/* MediaTek BTIF */
+#define PORT_MTK_BTIF 117
+
#endif /* _UAPILINUX_SERIAL_CORE_H */
diff --git a/include/uapi/linux/shm.h b/include/uapi/linux/shm.h
index 1fbf24ea37fd..cf23c873719d 100644
--- a/include/uapi/linux/shm.h
+++ b/include/uapi/linux/shm.h
@@ -3,6 +3,7 @@
#include <linux/ipc.h>
#include <linux/errno.h>
+#include <asm-generic/hugetlb_encode.h>
#ifndef __KERNEL__
#include <unistd.h>
#endif
@@ -40,11 +41,37 @@ struct shmid_ds {
/* Include the definition of shmid64_ds and shminfo64 */
#include <asm/shmbuf.h>
-/* permission flag for shmget */
+/*
+ * shmget() shmflg values.
+ */
+/* The bottom nine bits are the same as open(2) mode flags */
#define SHM_R 0400 /* or S_IRUGO from <linux/stat.h> */
#define SHM_W 0200 /* or S_IWUGO from <linux/stat.h> */
+/* Bits 9 & 10 are IPC_CREAT and IPC_EXCL */
+#define SHM_HUGETLB 04000 /* segment will use huge TLB pages */
+#define SHM_NORESERVE 010000 /* don't check for reservations */
+
+/*
+ * Huge page size encoding when SHM_HUGETLB is specified, and a huge page
+ * size other than the default is desired. See hugetlb_encode.h
+ */
+#define SHM_HUGE_SHIFT HUGETLB_FLAG_ENCODE_SHIFT
+#define SHM_HUGE_MASK HUGETLB_FLAG_ENCODE_MASK
+
+#define SHM_HUGE_64KB HUGETLB_FLAG_ENCODE_64KB
+#define SHM_HUGE_512KB HUGETLB_FLAG_ENCODE_512KB
+#define SHM_HUGE_1MB HUGETLB_FLAG_ENCODE_1MB
+#define SHM_HUGE_2MB HUGETLB_FLAG_ENCODE_2MB
+#define SHM_HUGE_8MB HUGETLB_FLAG_ENCODE_8MB
+#define SHM_HUGE_16MB HUGETLB_FLAG_ENCODE_16MB
+#define SHM_HUGE_256MB HUGETLB_FLAG_ENCODE_256MB
+#define SHM_HUGE_1GB HUGETLB_FLAG_ENCODE_1GB
+#define SHM_HUGE_2GB HUGETLB_FLAG_ENCODE_2GB
+#define SHM_HUGE_16GB HUGETLB_FLAG_ENCODE_16GB
-/* mode for attach */
+/*
+ * shmat() shmflg values
+ */
#define SHM_RDONLY 010000 /* read-only access */
#define SHM_RND 020000 /* round attach address to SHMLBA boundary */
#define SHM_REMAP 040000 /* take-over region on attach */
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index d85693295798..758f12b58541 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -184,12 +184,7 @@ enum
LINUX_MIB_DELAYEDACKLOST, /* DelayedACKLost */
LINUX_MIB_LISTENOVERFLOWS, /* ListenOverflows */
LINUX_MIB_LISTENDROPS, /* ListenDrops */
- LINUX_MIB_TCPPREQUEUED, /* TCPPrequeued */
- LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, /* TCPDirectCopyFromBacklog */
- LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, /* TCPDirectCopyFromPrequeue */
- LINUX_MIB_TCPPREQUEUEDROPPED, /* TCPPrequeueDropped */
LINUX_MIB_TCPHPHITS, /* TCPHPHits */
- LINUX_MIB_TCPHPHITSTOUSER, /* TCPHPHitsToUser */
LINUX_MIB_TCPPUREACKS, /* TCPPureAcks */
LINUX_MIB_TCPHPACKS, /* TCPHPAcks */
LINUX_MIB_TCPRENORECOVERY, /* TCPRenoRecovery */
@@ -208,14 +203,12 @@ enum
LINUX_MIB_TCPSACKFAILURES, /* TCPSackFailures */
LINUX_MIB_TCPLOSSFAILURES, /* TCPLossFailures */
LINUX_MIB_TCPFASTRETRANS, /* TCPFastRetrans */
- LINUX_MIB_TCPFORWARDRETRANS, /* TCPForwardRetrans */
LINUX_MIB_TCPSLOWSTARTRETRANS, /* TCPSlowStartRetrans */
LINUX_MIB_TCPTIMEOUTS, /* TCPTimeouts */
LINUX_MIB_TCPLOSSPROBES, /* TCPLossProbes */
LINUX_MIB_TCPLOSSPROBERECOVERY, /* TCPLossProbeRecovery */
LINUX_MIB_TCPRENORECOVERYFAIL, /* TCPRenoRecoveryFail */
LINUX_MIB_TCPSACKRECOVERYFAIL, /* TCPSackRecoveryFail */
- LINUX_MIB_TCPSCHEDULERFAILED, /* TCPSchedulerFailed */
LINUX_MIB_TCPRCVCOLLAPSED, /* TCPRcvCollapsed */
LINUX_MIB_TCPDSACKOLDSENT, /* TCPDSACKOldSent */
LINUX_MIB_TCPDSACKOFOSENT, /* TCPDSACKOfoSent */
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index a5507c977497..15c25eccab2b 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -231,6 +231,14 @@ enum {
TCP_NLA_SNDBUF_LIMITED, /* Time (usec) limited by send buffer */
TCP_NLA_DATA_SEGS_OUT, /* Data pkts sent including retransmission */
TCP_NLA_TOTAL_RETRANS, /* Data pkts retransmitted */
+ TCP_NLA_PACING_RATE, /* Pacing rate in bytes per second */
+ TCP_NLA_DELIVERY_RATE, /* Delivery rate in bytes per second */
+ TCP_NLA_SND_CWND, /* Sending congestion window */
+ TCP_NLA_REORDERING, /* Reordering metric */
+ TCP_NLA_MIN_RTT, /* minimum RTT */
+ TCP_NLA_RECUR_RETRANS, /* Recurring retransmits for the current pkt */
+ TCP_NLA_DELIVERY_RATE_APP_LMT, /* delivery rate application limited ? */
+
};
/* for TCP_MD5SIG socket option */
@@ -248,4 +256,13 @@ struct tcp_md5sig {
__u8 tcpm_key[TCP_MD5SIG_MAXKEYLEN]; /* key (binary) */
};
+/* INET_DIAG_MD5SIG */
+struct tcp_diag_md5sig {
+ __u8 tcpm_family;
+ __u8 tcpm_prefixlen;
+ __u16 tcpm_keylen;
+ __be32 tcpm_addr[4];
+ __u8 tcpm_key[TCP_MD5SIG_MAXKEYLEN];
+};
+
#endif /* _UAPI_LINUX_TCP_H */
diff --git a/include/uapi/linux/usb/audio.h b/include/uapi/linux/usb/audio.h
index d2314be4f0c0..a4680a5bf5dd 100644
--- a/include/uapi/linux/usb/audio.h
+++ b/include/uapi/linux/usb/audio.h
@@ -333,7 +333,7 @@ struct uac_processing_unit_descriptor {
__u8 bDescriptorType;
__u8 bDescriptorSubtype;
__u8 bUnitID;
- __u16 wProcessType;
+ __le16 wProcessType;
__u8 bNrInPins;
__u8 baSourceID[];
} __attribute__ ((packed));
@@ -491,8 +491,8 @@ struct uac_format_type_ii_ext_descriptor {
__u8 bDescriptorType;
__u8 bDescriptorSubtype;
__u8 bFormatType;
- __u16 wMaxBitRate;
- __u16 wSamplesPerFrame;
+ __le16 wMaxBitRate;
+ __le16 wSamplesPerFrame;
__u8 bHeaderLength;
__u8 bSideBandProtocol;
} __attribute__((packed));
diff --git a/include/uapi/linux/usb/charger.h b/include/uapi/linux/usb/charger.h
new file mode 100644
index 000000000000..5f72af35b3ed
--- /dev/null
+++ b/include/uapi/linux/usb/charger.h
@@ -0,0 +1,31 @@
+/*
+ * This file defines the USB charger type and state that are needed for
+ * USB device APIs.
+ */
+
+#ifndef _UAPI__LINUX_USB_CHARGER_H
+#define _UAPI__LINUX_USB_CHARGER_H
+
+/*
+ * USB charger type:
+ * SDP (Standard Downstream Port)
+ * DCP (Dedicated Charging Port)
+ * CDP (Charging Downstream Port)
+ * ACA (Accessory Charger Adapters)
+ */
+enum usb_charger_type {
+ UNKNOWN_TYPE,
+ SDP_TYPE,
+ DCP_TYPE,
+ CDP_TYPE,
+ ACA_TYPE,
+};
+
+/* USB charger state */
+enum usb_charger_state {
+ USB_CHARGER_DEFAULT,
+ USB_CHARGER_PRESENT,
+ USB_CHARGER_ABSENT,
+};
+
+#endif /* _UAPI__LINUX_USB_CHARGER_H */
diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h
index 3b059530dac9..d6d1f65cb3c3 100644
--- a/include/uapi/linux/userfaultfd.h
+++ b/include/uapi/linux/userfaultfd.h
@@ -23,7 +23,9 @@
UFFD_FEATURE_EVENT_REMOVE | \
UFFD_FEATURE_EVENT_UNMAP | \
UFFD_FEATURE_MISSING_HUGETLBFS | \
- UFFD_FEATURE_MISSING_SHMEM)
+ UFFD_FEATURE_MISSING_SHMEM | \
+ UFFD_FEATURE_SIGBUS | \
+ UFFD_FEATURE_THREAD_ID)
#define UFFD_API_IOCTLS \
((__u64)1 << _UFFDIO_REGISTER | \
(__u64)1 << _UFFDIO_UNREGISTER | \
@@ -78,6 +80,9 @@ struct uffd_msg {
struct {
__u64 flags;
__u64 address;
+ union {
+ __u32 ptid;
+ } feat;
} pagefault;
struct {
@@ -153,6 +158,13 @@ struct uffdio_api {
* UFFD_FEATURE_MISSING_SHMEM works the same as
* UFFD_FEATURE_MISSING_HUGETLBFS, but it applies to shmem
* (i.e. tmpfs and other shmem based APIs).
+ *
+ * UFFD_FEATURE_SIGBUS feature means no page-fault
+ * (UFFD_EVENT_PAGEFAULT) event will be delivered, instead
+ * a SIGBUS signal will be sent to the faulting process.
+ *
+ * UFFD_FEATURE_THREAD_ID pid of the page faulted task_struct will
+ * be returned, if feature is not requested 0 will be returned.
*/
#define UFFD_FEATURE_PAGEFAULT_FLAG_WP (1<<0)
#define UFFD_FEATURE_EVENT_FORK (1<<1)
@@ -161,6 +173,8 @@ struct uffdio_api {
#define UFFD_FEATURE_MISSING_HUGETLBFS (1<<4)
#define UFFD_FEATURE_MISSING_SHMEM (1<<5)
#define UFFD_FEATURE_EVENT_UNMAP (1<<6)
+#define UFFD_FEATURE_SIGBUS (1<<7)
+#define UFFD_FEATURE_THREAD_ID (1<<8)
__u64 features;
__u64 ioctls;
diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h
index c07295969b7e..6d5d5faa989b 100644
--- a/include/uapi/linux/virtio_ring.h
+++ b/include/uapi/linux/virtio_ring.h
@@ -1,7 +1,7 @@
#ifndef _UAPI_LINUX_VIRTIO_RING_H
#define _UAPI_LINUX_VIRTIO_RING_H
-/* An interface for efficient virtio implementation, currently for use by KVM
- * and lguest, but hopefully others soon. Do NOT change this since it will
+/* An interface for efficient virtio implementation, currently for use by KVM,
+ * but hopefully others soon. Do NOT change this since it will
* break existing servers and clients.
*
* This header is BSD licensed so anyone can use the definitions to implement
diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h
index 2b384ff09fa0..5fe7370a2bef 100644
--- a/include/uapi/linux/xfrm.h
+++ b/include/uapi/linux/xfrm.h
@@ -304,6 +304,7 @@ enum xfrm_attr_type_t {
XFRMA_ADDRESS_FILTER, /* struct xfrm_address_filter */
XFRMA_PAD,
XFRMA_OFFLOAD_DEV, /* struct xfrm_state_offload */
+ XFRMA_OUTPUT_MARK, /* __u32 */
__XFRMA_MAX
#define XFRMA_MAX (__XFRMA_MAX - 1)
diff --git a/include/uapi/rdma/ib_user_ioctl_verbs.h b/include/uapi/rdma/ib_user_ioctl_verbs.h
new file mode 100644
index 000000000000..842792eae383
--- /dev/null
+++ b/include/uapi/rdma/ib_user_ioctl_verbs.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2017, Mellanox Technologies inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef IB_USER_IOCTL_VERBS_H
+#define IB_USER_IOCTL_VERBS_H
+
+#include <rdma/rdma_user_ioctl.h>
+
+#define UVERBS_UDATA_DRIVER_DATA_NS 1
+#define UVERBS_UDATA_DRIVER_DATA_FLAG (1UL << UVERBS_ID_NS_SHIFT)
+
+enum uverbs_default_objects {
+ UVERBS_OBJECT_DEVICE, /* No instances of DEVICE are allowed */
+ UVERBS_OBJECT_PD,
+ UVERBS_OBJECT_COMP_CHANNEL,
+ UVERBS_OBJECT_CQ,
+ UVERBS_OBJECT_QP,
+ UVERBS_OBJECT_SRQ,
+ UVERBS_OBJECT_AH,
+ UVERBS_OBJECT_MR,
+ UVERBS_OBJECT_MW,
+ UVERBS_OBJECT_FLOW,
+ UVERBS_OBJECT_XRCD,
+ UVERBS_OBJECT_RWQ_IND_TBL,
+ UVERBS_OBJECT_WQ,
+ UVERBS_OBJECT_LAST,
+};
+
+enum {
+ UVERBS_UHW_IN = UVERBS_UDATA_DRIVER_DATA_FLAG,
+ UVERBS_UHW_OUT,
+};
+
+enum uverbs_create_cq_cmd_attr_ids {
+ CREATE_CQ_HANDLE,
+ CREATE_CQ_CQE,
+ CREATE_CQ_USER_HANDLE,
+ CREATE_CQ_COMP_CHANNEL,
+ CREATE_CQ_COMP_VECTOR,
+ CREATE_CQ_FLAGS,
+ CREATE_CQ_RESP_CQE,
+};
+
+enum uverbs_destroy_cq_cmd_attr_ids {
+ DESTROY_CQ_HANDLE,
+ DESTROY_CQ_RESP,
+};
+
+enum uverbs_actions_cq_ops {
+ UVERBS_CQ_CREATE,
+ UVERBS_CQ_DESTROY,
+};
+
+#endif
+
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 270c350bedc6..9a0b6479fe0c 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -236,6 +236,20 @@ struct ib_uverbs_rss_caps {
__u32 reserved;
};
+struct ib_uverbs_tm_caps {
+ /* Max size of rendezvous request message */
+ __u32 max_rndv_hdr_size;
+ /* Max number of entries in tag matching list */
+ __u32 max_num_tags;
+ /* TM flags */
+ __u32 flags;
+ /* Max number of outstanding list operations */
+ __u32 max_ops;
+ /* Max number of SGE in tag matching entry */
+ __u32 max_sge;
+ __u32 reserved;
+};
+
struct ib_uverbs_ex_query_device_resp {
struct ib_uverbs_query_device_resp base;
__u32 comp_mask;
@@ -247,6 +261,7 @@ struct ib_uverbs_ex_query_device_resp {
struct ib_uverbs_rss_caps rss_caps;
__u32 max_wq_type_rq;
__u32 raw_packet_caps;
+ struct ib_uverbs_tm_caps xrq_caps;
};
struct ib_uverbs_query_port {
@@ -578,7 +593,7 @@ struct ib_uverbs_ex_create_qp {
__u32 comp_mask;
__u32 create_flags;
__u32 rwq_ind_tbl_handle;
- __u32 reserved1;
+ __u32 source_qpn;
};
struct ib_uverbs_open_qp {
@@ -1024,7 +1039,7 @@ struct ib_uverbs_create_xsrq {
__u32 max_wr;
__u32 max_sge;
__u32 srq_limit;
- __u32 reserved;
+ __u32 max_num_tags;
__u32 xrcd_handle;
__u32 cq_handle;
__u64 driver_data[0];
diff --git a/include/uapi/rdma/mlx4-abi.h b/include/uapi/rdma/mlx4-abi.h
index af431752655c..c55f60e05f86 100644
--- a/include/uapi/rdma/mlx4-abi.h
+++ b/include/uapi/rdma/mlx4-abi.h
@@ -95,13 +95,63 @@ struct mlx4_ib_create_srq_resp {
__u32 reserved;
};
+struct mlx4_ib_create_qp_rss {
+ __u64 rx_hash_fields_mask;
+ __u8 rx_hash_function;
+ __u8 reserved[7];
+ __u8 rx_hash_key[40];
+ __u32 comp_mask;
+ __u32 reserved1;
+};
+
struct mlx4_ib_create_qp {
__u64 buf_addr;
__u64 db_addr;
__u8 log_sq_bb_count;
__u8 log_sq_stride;
__u8 sq_no_prefetch;
- __u8 reserved[5];
+ __u8 reserved;
+ __u32 inl_recv_sz;
+};
+
+struct mlx4_ib_create_wq {
+ __u64 buf_addr;
+ __u64 db_addr;
+ __u8 log_range_size;
+ __u8 reserved[3];
+ __u32 comp_mask;
+};
+
+struct mlx4_ib_modify_wq {
+ __u32 comp_mask;
+ __u32 reserved;
+};
+
+struct mlx4_ib_create_rwq_ind_tbl_resp {
+ __u32 response_length;
+ __u32 reserved;
+};
+
+/* RX Hash function flags */
+enum mlx4_ib_rx_hash_function_flags {
+ MLX4_IB_RX_HASH_FUNC_TOEPLITZ = 1 << 0,
+};
+
+/*
+ * RX Hash flags, these flags allows to set which incoming packet's field should
+ * participates in RX Hash. Each flag represent certain packet's field,
+ * when the flag is set the field that is represented by the flag will
+ * participate in RX Hash calculation.
+ */
+enum mlx4_ib_rx_hash_fields {
+ MLX4_IB_RX_HASH_SRC_IPV4 = 1 << 0,
+ MLX4_IB_RX_HASH_DST_IPV4 = 1 << 1,
+ MLX4_IB_RX_HASH_SRC_IPV6 = 1 << 2,
+ MLX4_IB_RX_HASH_DST_IPV6 = 1 << 3,
+ MLX4_IB_RX_HASH_SRC_PORT_TCP = 1 << 4,
+ MLX4_IB_RX_HASH_DST_PORT_TCP = 1 << 5,
+ MLX4_IB_RX_HASH_SRC_PORT_UDP = 1 << 6,
+ MLX4_IB_RX_HASH_DST_PORT_UDP = 1 << 7
};
#endif /* MLX4_ABI_USER_H */
diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h
index 0b3d30837a9f..1791bf123ba9 100644
--- a/include/uapi/rdma/mlx5-abi.h
+++ b/include/uapi/rdma/mlx5-abi.h
@@ -168,6 +168,28 @@ struct mlx5_packet_pacing_caps {
__u32 reserved;
};
+enum mlx5_ib_mpw_caps {
+ MPW_RESERVED = 1 << 0,
+ MLX5_IB_ALLOW_MPW = 1 << 1,
+ MLX5_IB_SUPPORT_EMPW = 1 << 2,
+};
+
+enum mlx5_ib_sw_parsing_offloads {
+ MLX5_IB_SW_PARSING = 1 << 0,
+ MLX5_IB_SW_PARSING_CSUM = 1 << 1,
+ MLX5_IB_SW_PARSING_LSO = 1 << 2,
+};
+
+struct mlx5_ib_sw_parsing_caps {
+ __u32 sw_parsing_offloads; /* enum mlx5_ib_sw_parsing_offloads */
+
+ /* Corresponding bit will be set if qp type from
+ * 'enum ib_qp_type' is supported, e.g.
+ * supported_qpts |= 1 << IB_QPT_RAW_PACKET
+ */
+ __u32 supported_qpts;
+};
+
struct mlx5_ib_query_device_resp {
__u32 comp_mask;
__u32 response_length;
@@ -177,6 +199,7 @@ struct mlx5_ib_query_device_resp {
struct mlx5_packet_pacing_caps packet_pacing_caps;
__u32 mlx5_ib_support_multi_pkt_send_wqes;
__u32 reserved;
+ struct mlx5_ib_sw_parsing_caps sw_parsing_caps;
};
struct mlx5_ib_create_cq {
diff --git a/include/uapi/rdma/qedr-abi.h b/include/uapi/rdma/qedr-abi.h
index 75c270d839c8..54b64357ab24 100644
--- a/include/uapi/rdma/qedr-abi.h
+++ b/include/uapi/rdma/qedr-abi.h
@@ -49,6 +49,9 @@ struct qedr_alloc_ucontext_resp {
__u32 sges_per_recv_wr;
__u32 sges_per_srq_wr;
__u32 max_cqes;
+ __u8 dpm_enabled;
+ __u8 wids_enabled;
+ __u16 wid_count;
};
struct qedr_alloc_pd_ureq {
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h
index 02fe8390c18f..861440a87e7c 100644
--- a/include/uapi/rdma/rdma_netlink.h
+++ b/include/uapi/rdma/rdma_netlink.h
@@ -8,7 +8,7 @@ enum {
RDMA_NL_IWCM,
RDMA_NL_RSVD,
RDMA_NL_LS, /* RDMA Local Services */
- RDMA_NL_I40IW,
+ RDMA_NL_NLDEV, /* RDMA device interface */
RDMA_NL_NUM_CLIENTS
};
@@ -222,4 +222,86 @@ struct rdma_nla_ls_gid {
__u8 gid[16];
};
+enum rdma_nldev_command {
+ RDMA_NLDEV_CMD_UNSPEC,
+
+ RDMA_NLDEV_CMD_GET, /* can dump */
+ RDMA_NLDEV_CMD_SET,
+ RDMA_NLDEV_CMD_NEW,
+ RDMA_NLDEV_CMD_DEL,
+
+ RDMA_NLDEV_CMD_PORT_GET, /* can dump */
+ RDMA_NLDEV_CMD_PORT_SET,
+ RDMA_NLDEV_CMD_PORT_NEW,
+ RDMA_NLDEV_CMD_PORT_DEL,
+
+ RDMA_NLDEV_NUM_OPS
+};
+
+enum rdma_nldev_attr {
+ /* don't change the order or add anything between, this is ABI! */
+ RDMA_NLDEV_ATTR_UNSPEC,
+
+ /* Identifier for ib_device */
+ RDMA_NLDEV_ATTR_DEV_INDEX, /* u32 */
+
+ RDMA_NLDEV_ATTR_DEV_NAME, /* string */
+ /*
+ * Device index together with port index are identifiers
+ * for port/link properties.
+ *
+ * For RDMA_NLDEV_CMD_GET commamnd, port index will return number
+ * of available ports in ib_device, while for port specific operations,
+ * it will be real port index as it appears in sysfs. Port index follows
+ * sysfs notation and starts from 1 for the first port.
+ */
+ RDMA_NLDEV_ATTR_PORT_INDEX, /* u32 */
+
+ /*
+ * Device and port capabilities
+ */
+ RDMA_NLDEV_ATTR_CAP_FLAGS, /* u64 */
+
+ /*
+ * FW version
+ */
+ RDMA_NLDEV_ATTR_FW_VERSION, /* string */
+
+ /*
+ * Node GUID (in host byte order) associated with the RDMA device.
+ */
+ RDMA_NLDEV_ATTR_NODE_GUID, /* u64 */
+
+ /*
+ * System image GUID (in host byte order) associated with
+ * this RDMA device and other devices which are part of a
+ * single system.
+ */
+ RDMA_NLDEV_ATTR_SYS_IMAGE_GUID, /* u64 */
+
+ /*
+ * Subnet prefix (in host byte order)
+ */
+ RDMA_NLDEV_ATTR_SUBNET_PREFIX, /* u64 */
+
+ /*
+ * Local Identifier (LID),
+ * According to IB specification, It is 16-bit address assigned
+ * by the Subnet Manager. Extended to be 32-bit for OmniPath users.
+ */
+ RDMA_NLDEV_ATTR_LID, /* u32 */
+ RDMA_NLDEV_ATTR_SM_LID, /* u32 */
+
+ /*
+ * LID mask control (LMC)
+ */
+ RDMA_NLDEV_ATTR_LMC, /* u8 */
+
+ RDMA_NLDEV_ATTR_PORT_STATE, /* u8 */
+ RDMA_NLDEV_ATTR_PORT_PHYS_STATE, /* u8 */
+
+ RDMA_NLDEV_ATTR_DEV_NODE_TYPE, /* u8 */
+
+ RDMA_NLDEV_ATTR_MAX
+};
#endif /* _UAPI_RDMA_NETLINK_H */
diff --git a/include/uapi/rdma/rdma_user_ioctl.h b/include/uapi/rdma/rdma_user_ioctl.h
index 9388125ad51b..165a27e969d5 100644
--- a/include/uapi/rdma/rdma_user_ioctl.h
+++ b/include/uapi/rdma/rdma_user_ioctl.h
@@ -43,6 +43,39 @@
/* Legacy name, for user space application which already use it */
#define IB_IOCTL_MAGIC RDMA_IOCTL_MAGIC
+#define RDMA_VERBS_IOCTL \
+ _IOWR(RDMA_IOCTL_MAGIC, 1, struct ib_uverbs_ioctl_hdr)
+
+#define UVERBS_ID_NS_MASK 0xF000
+#define UVERBS_ID_NS_SHIFT 12
+
+enum {
+ /* User input */
+ UVERBS_ATTR_F_MANDATORY = 1U << 0,
+ /*
+ * Valid output bit should be ignored and considered set in
+ * mandatory fields. This bit is kernel output.
+ */
+ UVERBS_ATTR_F_VALID_OUTPUT = 1U << 1,
+};
+
+struct ib_uverbs_attr {
+ __u16 attr_id; /* command specific type attribute */
+ __u16 len; /* only for pointers */
+ __u16 flags; /* combination of UVERBS_ATTR_F_XXXX */
+ __u16 reserved;
+ __u64 data; /* ptr to command, inline data or idr/fd */
+};
+
+struct ib_uverbs_ioctl_hdr {
+ __u16 length;
+ __u16 object_id;
+ __u16 method_id;
+ __u16 num_attrs;
+ __u64 reserved;
+ struct ib_uverbs_attr attrs[0];
+};
+
/*
* General blocks assignments
* It is closed on purpose do not expose it it user space
diff --git a/include/uapi/rdma/vmw_pvrdma-abi.h b/include/uapi/rdma/vmw_pvrdma-abi.h
index c8c1d2d6df4d..c6569b0032ec 100644
--- a/include/uapi/rdma/vmw_pvrdma-abi.h
+++ b/include/uapi/rdma/vmw_pvrdma-abi.h
@@ -125,7 +125,8 @@ enum pvrdma_wc_flags {
PVRDMA_WC_IP_CSUM_OK = 1 << 3,
PVRDMA_WC_WITH_SMAC = 1 << 4,
PVRDMA_WC_WITH_VLAN = 1 << 5,
- PVRDMA_WC_FLAGS_MAX = PVRDMA_WC_WITH_VLAN,
+ PVRDMA_WC_WITH_NETWORK_HDR_TYPE = 1 << 6,
+ PVRDMA_WC_FLAGS_MAX = PVRDMA_WC_WITH_NETWORK_HDR_TYPE,
};
struct pvrdma_alloc_ucontext_resp {
@@ -283,7 +284,8 @@ struct pvrdma_cqe {
__u8 dlid_path_bits;
__u8 port_num;
__u8 smac[6];
- __u8 reserved2[7]; /* Pad to next power of 2 (64). */
+ __u8 network_hdr_type;
+ __u8 reserved2[6]; /* Pad to next power of 2 (64). */
};
#endif /* __VMW_PVRDMA_ABI_H__ */
diff --git a/include/xen/balloon.h b/include/xen/balloon.h
index d1767dfb0d95..8906361bb50c 100644
--- a/include/xen/balloon.h
+++ b/include/xen/balloon.h
@@ -35,3 +35,11 @@ static inline int register_xen_selfballooning(struct device *dev)
return -ENOSYS;
}
#endif
+
+#ifdef CONFIG_XEN_BALLOON
+void xen_balloon_init(void);
+#else
+static inline void xen_balloon_init(void)
+{
+}
+#endif