summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acpi_bus.h12
-rw-r--r--include/acpi/acpi_drivers.h1
-rw-r--r--include/acpi/acpiosxf.h10
-rw-r--r--include/acpi/acpixf.h25
-rw-r--r--include/acpi/acrestyp.h1
-rw-r--r--include/acpi/actbl.h4
-rw-r--r--include/acpi/actbl1.h74
-rw-r--r--include/acpi/actbl2.h39
-rw-r--r--include/acpi/actbl3.h66
-rw-r--r--include/acpi/actypes.h48
-rw-r--r--include/acpi/platform/acenv.h44
-rw-r--r--include/acpi/platform/acmsvcex.h54
-rw-r--r--include/acpi/platform/acwinex.h49
-rw-r--r--include/acpi/video.h22
-rw-r--r--include/asm-generic/io.h4
-rw-r--r--include/asm-generic/pgtable.h8
-rw-r--r--include/asm-generic/preempt.h4
-rw-r--r--include/asm-generic/qspinlock.h27
-rw-r--r--include/asm-generic/rwsem.h13
-rw-r--r--include/asm-generic/seccomp.h14
-rw-r--r--include/asm-generic/siginfo.h15
-rw-r--r--include/asm-generic/vmlinux.lds.h4
-rw-r--r--include/clocksource/arm_arch_timer.h12
-rw-r--r--include/crypto/aead.h3
-rw-r--r--include/crypto/hash.h3
-rw-r--r--include/crypto/pkcs7.h6
-rw-r--r--include/crypto/public_key.h33
-rw-r--r--include/crypto/skcipher.h3
-rw-r--r--include/drm/bridge/analogix_dp.h41
-rw-r--r--include/drm/drmP.h26
-rw-r--r--include/drm/drm_agpsupport.h4
-rw-r--r--include/drm/drm_atomic.h2
-rw-r--r--include/drm/drm_atomic_helper.h15
-rw-r--r--include/drm/drm_crtc.h116
-rw-r--r--include/drm/drm_displayid.h17
-rw-r--r--include/drm/drm_dp_dual_mode_helper.h92
-rw-r--r--include/drm/drm_dp_helper.h1
-rw-r--r--include/drm/drm_edid.h8
-rw-r--r--include/drm/drm_fb_cma_helper.h19
-rw-r--r--include/drm/drm_fb_helper.h15
-rw-r--r--include/drm/drm_gem.h52
-rw-r--r--include/drm/drm_legacy.h4
-rw-r--r--include/drm/drm_mem_util.h19
-rw-r--r--include/drm/drm_modeset_helper_vtables.h2
-rw-r--r--include/drm/drm_panel.h59
-rw-r--r--include/drm/drm_vma_manager.h15
-rw-r--r--include/drm/ttm/ttm_bo_api.h2
-rw-r--r--include/drm/ttm/ttm_bo_driver.h34
-rw-r--r--include/dt-bindings/clock/ath79-clk.h19
-rw-r--r--include/dt-bindings/clock/axis,artpec6-clkctrl.h38
-rw-r--r--include/dt-bindings/clock/bcm2835.h20
-rw-r--r--include/dt-bindings/clock/exynos3250.h11
-rw-r--r--include/dt-bindings/clock/exynos5420.h24
-rw-r--r--include/dt-bindings/clock/hi3519-clock.h40
-rw-r--r--include/dt-bindings/clock/imx7d-clock.h3
-rw-r--r--include/dt-bindings/clock/microchip,pic32-clock.h42
-rw-r--r--include/dt-bindings/clock/mt8173-clk.h3
-rw-r--r--include/dt-bindings/clock/r8a7790-clock.h1
-rw-r--r--include/dt-bindings/clock/r8a7794-clock.h5
-rw-r--r--include/dt-bindings/clock/rk3399-cru.h755
-rw-r--r--include/dt-bindings/clock/tegra210-car.h2
-rw-r--r--include/dt-bindings/clock/vf610-clock.h8
-rw-r--r--include/dt-bindings/gpio/meson-gxbb-gpio.h154
-rw-r--r--include/dt-bindings/gpio/tegra-gpio.h68
-rw-r--r--include/dt-bindings/gpio/tegra186-gpio.h56
-rw-r--r--include/dt-bindings/iio/adi,ad5592r.h16
-rw-r--r--include/dt-bindings/mfd/arizona.h5
-rw-r--r--include/dt-bindings/mfd/max77620.h39
-rw-r--r--include/dt-bindings/pinctrl/hisi.h59
-rw-r--r--include/dt-bindings/power/r8a7779-sysc.h27
-rw-r--r--include/dt-bindings/power/r8a7790-sysc.h34
-rw-r--r--include/dt-bindings/power/r8a7791-sysc.h26
-rw-r--r--include/dt-bindings/power/r8a7793-sysc.h28
-rw-r--r--include/dt-bindings/power/r8a7794-sysc.h26
-rw-r--r--include/dt-bindings/power/r8a7795-sysc.h42
-rw-r--r--include/dt-bindings/power/rk3399-power.h53
-rw-r--r--include/dt-bindings/thermal/tegra124-soctherm.h1
-rw-r--r--include/keys/asymmetric-subtype.h2
-rw-r--r--include/keys/asymmetric-type.h13
-rw-r--r--include/keys/system_keyring.h41
-rw-r--r--include/kvm/arm_arch_timer.h11
-rw-r--r--include/kvm/arm_vgic.h27
-rw-r--r--include/kvm/vgic/vgic.h246
-rw-r--r--include/linux/acpi.h13
-rw-r--r--include/linux/amba/pl08x.h2
-rw-r--r--include/linux/apple-gmux.h2
-rw-r--r--include/linux/ata.h75
-rw-r--r--include/linux/ath9k_platform.h4
-rw-r--r--include/linux/atomic.h4
-rw-r--r--include/linux/audit.h24
-rw-r--r--include/linux/backlight.h3
-rw-r--r--include/linux/bcm47xx_sprom.h24
-rw-r--r--include/linux/bcma/bcma.h1
-rw-r--r--include/linux/bcma/bcma_driver_arm_c9.h15
-rw-r--r--include/linux/bcma/bcma_driver_chipcommon.h1
-rw-r--r--include/linux/bio.h11
-rw-r--r--include/linux/bitops.h16
-rw-r--r--include/linux/blk-mq.h4
-rw-r--r--include/linux/blk_types.h2
-rw-r--r--include/linux/blkdev.h28
-rw-r--r--include/linux/blktrace_api.h9
-rw-r--r--include/linux/bootmem.h16
-rw-r--r--include/linux/bpf.h12
-rw-r--r--include/linux/can/dev.h22
-rw-r--r--include/linux/ccp.h36
-rw-r--r--include/linux/ceph/auth.h10
-rw-r--r--include/linux/ceph/ceph_frag.h4
-rw-r--r--include/linux/ceph/ceph_fs.h20
-rw-r--r--include/linux/ceph/decode.h2
-rw-r--r--include/linux/ceph/libceph.h57
-rw-r--r--include/linux/ceph/mon_client.h23
-rw-r--r--include/linux/ceph/osd_client.h237
-rw-r--r--include/linux/ceph/osdmap.h163
-rw-r--r--include/linux/ceph/rados.h34
-rw-r--r--include/linux/cgroup-defs.h1
-rw-r--r--include/linux/clk-provider.h103
-rw-r--r--include/linux/clk/renesas.h16
-rw-r--r--include/linux/clk/tegra.h5
-rw-r--r--include/linux/clk/ti.h2
-rw-r--r--include/linux/clkdev.h7
-rw-r--r--include/linux/clocksource.h1
-rw-r--r--include/linux/compaction.h151
-rw-r--r--include/linux/compiler-gcc.h3
-rw-r--r--include/linux/compiler.h4
-rw-r--r--include/linux/console.h4
-rw-r--r--include/linux/coresight-stm.h6
-rw-r--r--include/linux/cpu.h18
-rw-r--r--include/linux/cpufreq-dt.h22
-rw-r--r--include/linux/cpufreq.h54
-rw-r--r--include/linux/cpuhotplug.h2
-rw-r--r--include/linux/cpumask.h6
-rw-r--r--include/linux/cpuset.h48
-rw-r--r--include/linux/crash_dump.h8
-rw-r--r--include/linux/crypto.h3
-rw-r--r--include/linux/dax.h43
-rw-r--r--include/linux/dcache.h65
-rw-r--r--include/linux/debugfs.h49
-rw-r--r--include/linux/debugobjects.h17
-rw-r--r--include/linux/devcoredump.h86
-rw-r--r--include/linux/devfreq.h99
-rw-r--r--include/linux/device.h24
-rw-r--r--include/linux/devpts_fs.h15
-rw-r--r--include/linux/dma-buf.h13
-rw-r--r--include/linux/dma-iommu.h4
-rw-r--r--include/linux/dma-mapping.h2
-rw-r--r--include/linux/dma/dw.h5
-rw-r--r--include/linux/dma/xilinx_dma.h14
-rw-r--r--include/linux/dmaengine.h20
-rw-r--r--include/linux/efi.h181
-rw-r--r--include/linux/err.h2
-rw-r--r--include/linux/errno.h1
-rw-r--r--include/linux/ethtool.h7
-rw-r--r--include/linux/export.h33
-rw-r--r--include/linux/f2fs_fs.h2
-rw-r--r--include/linux/fb.h1
-rw-r--r--include/linux/fence.h2
-rw-r--r--include/linux/file.h13
-rw-r--r--include/linux/filter.h72
-rw-r--r--include/linux/fs.h139
-rw-r--r--include/linux/fscache-cache.h2
-rw-r--r--include/linux/fscrypto.h1
-rw-r--r--include/linux/fsl_ifc.h45
-rw-r--r--include/linux/fsnotify_backend.h2
-rw-r--r--include/linux/ftrace.h1
-rw-r--r--include/linux/genhd.h23
-rw-r--r--include/linux/genl_magic_struct.h7
-rw-r--r--include/linux/gpio/driver.h25
-rw-r--r--include/linux/hardirq.h2
-rw-r--r--include/linux/hash.h110
-rw-r--r--include/linux/huge_mm.h9
-rw-r--r--include/linux/hugetlb.h11
-rw-r--r--include/linux/hugetlb_cgroup.h4
-rw-r--r--include/linux/hugetlb_inline.h6
-rw-r--r--include/linux/hyperv.h170
-rw-r--r--include/linux/i2c-mux.h55
-rw-r--r--include/linux/i2c.h50
-rw-r--r--include/linux/i2c/sx150x.h82
-rw-r--r--include/linux/ieee80211.h24
-rw-r--r--include/linux/ieee802154.h45
-rw-r--r--include/linux/if_ether.h5
-rw-r--r--include/linux/iio/buffer.h2
-rw-r--r--include/linux/iio/common/st_sensors.h9
-rw-r--r--include/linux/iio/consumer.h53
-rw-r--r--include/linux/iio/iio.h33
-rw-r--r--include/linux/iio/imu/adis.h1
-rw-r--r--include/linux/iio/magnetometer/ak8975.h16
-rw-r--r--include/linux/ima.h6
-rw-r--r--include/linux/io-64-nonatomic-hi-lo.h25
-rw-r--r--include/linux/io-64-nonatomic-lo-hi.h25
-rw-r--r--include/linux/iommu.h6
-rw-r--r--include/linux/ioport.h4
-rw-r--r--include/linux/iova.h23
-rw-r--r--include/linux/ipv6.h20
-rw-r--r--include/linux/irq.h4
-rw-r--r--include/linux/irqbypass.h4
-rw-r--r--include/linux/irqchip/arm-gic-common.h34
-rw-r--r--include/linux/irqchip/arm-gic-v3.h14
-rw-r--r--include/linux/irqchip/arm-gic.h2
-rw-r--r--include/linux/irqchip/irq-partition-percpu.h59
-rw-r--r--include/linux/irqchip/mips-gic.h17
-rw-r--r--include/linux/irqdesc.h1
-rw-r--r--include/linux/irqdomain.h20
-rw-r--r--include/linux/isa.h32
-rw-r--r--include/linux/iscsi_boot_sysfs.h13
-rw-r--r--include/linux/jbd2.h16
-rw-r--r--include/linux/kasan-checks.h12
-rw-r--r--include/linux/kasan.h13
-rw-r--r--include/linux/kernel.h11
-rw-r--r--include/linux/kernfs.h3
-rw-r--r--include/linux/kexec.h4
-rw-r--r--include/linux/key-type.h1
-rw-r--r--include/linux/key.h44
-rw-r--r--include/linux/kvm_host.h46
-rw-r--r--include/linux/leds.h8
-rw-r--r--include/linux/libata.h35
-rw-r--r--include/linux/libnvdimm.h7
-rw-r--r--include/linux/lightnvm.h48
-rw-r--r--include/linux/livepatch.h26
-rw-r--r--include/linux/lockdep.h46
-rw-r--r--include/linux/lsm_hooks.h39
-rw-r--r--include/linux/mcb.h14
-rw-r--r--include/linux/mdio.h11
-rw-r--r--include/linux/memcontrol.h31
-rw-r--r--include/linux/memory_hotplug.h8
-rw-r--r--include/linux/mempolicy.h16
-rw-r--r--include/linux/mempool.h3
-rw-r--r--include/linux/mfd/as3722.h1
-rw-r--r--include/linux/mfd/axp20x.h59
-rw-r--r--include/linux/mfd/core.h8
-rw-r--r--include/linux/mfd/cros_ec.h6
-rw-r--r--include/linux/mfd/hi655x-pmic.h55
-rw-r--r--include/linux/mfd/max77620.h346
-rw-r--r--include/linux/mfd/samsung/core.h3
-rw-r--r--include/linux/mfd/samsung/s2mps11.h2
-rw-r--r--include/linux/mfd/syscon.h1
-rw-r--r--include/linux/mfd/syscon/exynos5-pmu.h3
-rw-r--r--include/linux/mfd/syscon/imx6q-iomuxc-gpr.h7
-rw-r--r--include/linux/mfd/tmio.h4
-rw-r--r--include/linux/mfd/twl6040.h1
-rw-r--r--include/linux/mfd/wm8400-private.h1
-rw-r--r--include/linux/mlx4/device.h4
-rw-r--r--include/linux/mlx5/cq.h5
-rw-r--r--include/linux/mlx5/device.h115
-rw-r--r--include/linux/mlx5/driver.h45
-rw-r--r--include/linux/mlx5/fs.h23
-rw-r--r--include/linux/mlx5/mlx5_ifc.h354
-rw-r--r--include/linux/mlx5/port.h31
-rw-r--r--include/linux/mlx5/qp.h6
-rw-r--r--include/linux/mlx5/vport.h2
-rw-r--r--include/linux/mm.h68
-rw-r--r--include/linux/mm_inline.h24
-rw-r--r--include/linux/mm_types.h18
-rw-r--r--include/linux/mmc/dw_mmc.h12
-rw-r--r--include/linux/mmc/host.h35
-rw-r--r--include/linux/mmc/sdio_ids.h1
-rw-r--r--include/linux/mmc/sh_mobile_sdhi.h10
-rw-r--r--include/linux/mmc/tmio.h71
-rw-r--r--include/linux/mmu_context.h7
-rw-r--r--include/linux/mmzone.h51
-rw-r--r--include/linux/module.h25
-rw-r--r--include/linux/mtd/fsmc.h18
-rw-r--r--include/linux/mtd/map.h19
-rw-r--r--include/linux/mtd/mtd.h75
-rw-r--r--include/linux/mtd/nand.h28
-rw-r--r--include/linux/mtd/onenand.h2
-rw-r--r--include/linux/mtd/sharpsl.h2
-rw-r--r--include/linux/mtd/spi-nor.h1
-rw-r--r--include/linux/namei.h4
-rw-r--r--include/linux/nd.h11
-rw-r--r--include/linux/net.h13
-rw-r--r--include/linux/netdev_features.h29
-rw-r--r--include/linux/netdevice.h90
-rw-r--r--include/linux/netfilter/ipset/ip_set.h9
-rw-r--r--include/linux/netfilter/x_tables.h18
-rw-r--r--include/linux/nfs4.h28
-rw-r--r--include/linux/nfs_fs.h16
-rw-r--r--include/linux/nfs_fs_sb.h1
-rw-r--r--include/linux/nfs_xdr.h34
-rw-r--r--include/linux/nilfs2_fs.h79
-rw-r--r--include/linux/nl802154.h2
-rw-r--r--include/linux/nodemask.h11
-rw-r--r--include/linux/nvme.h4
-rw-r--r--include/linux/nvmem-provider.h10
-rw-r--r--include/linux/of.h67
-rw-r--r--include/linux/of_address.h9
-rw-r--r--include/linux/of_fdt.h5
-rw-r--r--include/linux/of_graph.h1
-rw-r--r--include/linux/of_iommu.h8
-rw-r--r--include/linux/of_mtd.h50
-rw-r--r--include/linux/omap-gpmc.h172
-rw-r--r--include/linux/omap-mailbox.h2
-rw-r--r--include/linux/oom.h32
-rw-r--r--include/linux/padata.h5
-rw-r--r--include/linux/page-flags.h47
-rw-r--r--include/linux/page_idle.h43
-rw-r--r--include/linux/page_ref.h26
-rw-r--r--include/linux/pagemap.h32
-rw-r--r--include/linux/pci.h18
-rw-r--r--include/linux/pci_ids.h18
-rw-r--r--include/linux/pcieport_if.h2
-rw-r--r--include/linux/percpu.h3
-rw-r--r--include/linux/perf/arm_pmu.h2
-rw-r--r--include/linux/perf_event.h175
-rw-r--r--include/linux/phy.h8
-rw-r--r--include/linux/phy/phy.h31
-rw-r--r--include/linux/phy/tegra/xusb.h30
-rw-r--r--include/linux/pinctrl/pinctrl.h6
-rw-r--r--include/linux/platform_data/at24.h2
-rw-r--r--include/linux/platform_data/dma-dw.h12
-rw-r--r--include/linux/platform_data/gpio-dwapb.h3
-rw-r--r--include/linux/platform_data/gpmc-omap.h172
-rw-r--r--include/linux/platform_data/invensense_mpu6050.h5
-rw-r--r--include/linux/platform_data/mailbox-omap.h58
-rw-r--r--include/linux/platform_data/media/ir-rx51.h1
-rw-r--r--include/linux/platform_data/mtd-nand-omap2.h12
-rw-r--r--include/linux/platform_data/pwm_omap_dmtimer.h21
-rw-r--r--include/linux/platform_data/st_sensors_pdata.h2
-rw-r--r--include/linux/platform_device.h6
-rw-r--r--include/linux/pm.h2
-rw-r--r--include/linux/pm_domain.h6
-rw-r--r--include/linux/pm_opp.h62
-rw-r--r--include/linux/pm_runtime.h6
-rw-r--r--include/linux/pnp.h2
-rw-r--r--include/linux/poll.h11
-rw-r--r--include/linux/posix_acl.h1
-rw-r--r--include/linux/printk.h14
-rw-r--r--include/linux/property.h15
-rw-r--r--include/linux/proportions.h137
-rw-r--r--include/linux/psci.h2
-rw-r--r--include/linux/pwm.h338
-rw-r--r--include/linux/qed/common_hsi.h62
-rw-r--r--include/linux/qed/qed_eth_if.h22
-rw-r--r--include/linux/qed/qed_if.h117
-rw-r--r--include/linux/qed/qed_iov_if.h34
-rw-r--r--include/linux/radix-tree.h180
-rw-r--r--include/linux/random.h1
-rw-r--r--include/linux/rculist.h36
-rw-r--r--include/linux/rcupdate.h30
-rw-r--r--include/linux/rcutiny.h16
-rw-r--r--include/linux/rcutree.h2
-rw-r--r--include/linux/regulator/act8865.h2
-rw-r--r--include/linux/regulator/driver.h7
-rw-r--r--include/linux/regulator/machine.h1
-rw-r--r--include/linux/regulator/max8973-regulator.h5
-rw-r--r--include/linux/remoteproc.h4
-rw-r--r--include/linux/reservation.h71
-rw-r--r--include/linux/reset-controller.h2
-rw-r--r--include/linux/reset.h194
-rw-r--r--include/linux/rhashtable.h3
-rw-r--r--include/linux/rpmsg.h18
-rw-r--r--include/linux/rwsem-spinlock.h2
-rw-r--r--include/linux/rwsem.h5
-rw-r--r--include/linux/scatterlist.h25
-rw-r--r--include/linux/sched.h186
-rw-r--r--include/linux/sctp.h69
-rw-r--r--include/linux/security.h78
-rw-r--r--include/linux/selection.h8
-rw-r--r--include/linux/seqlock.h4
-rw-r--r--include/linux/serial_8250.h2
-rw-r--r--include/linux/serial_core.h3
-rw-r--r--include/linux/signal.h35
-rw-r--r--include/linux/skbuff.h56
-rw-r--r--include/linux/slab.h16
-rw-r--r--include/linux/slab_def.h4
-rw-r--r--include/linux/slub_def.h16
-rw-r--r--include/linux/soc/qcom/smd.h61
-rw-r--r--include/linux/soc/qcom/smem_state.h35
-rw-r--r--include/linux/soc/renesas/rcar-sysc.h16
-rw-r--r--include/linux/socket.h4
-rw-r--r--include/linux/spi/spi.h6
-rw-r--r--include/linux/stm.h3
-rw-r--r--include/linux/stmmac.h2
-rw-r--r--include/linux/string.h2
-rw-r--r--include/linux/string_helpers.h6
-rw-r--r--include/linux/stringhash.h76
-rw-r--r--include/linux/sunrpc/auth.h26
-rw-r--r--include/linux/sunrpc/clnt.h1
-rw-r--r--include/linux/sunrpc/msg_prot.h4
-rw-r--r--include/linux/sunrpc/svc_rdma.h2
-rw-r--r--include/linux/sunrpc/svcauth.h40
-rw-r--r--include/linux/sunrpc/xprt.h1
-rw-r--r--include/linux/sunrpc/xprtrdma.h4
-rw-r--r--include/linux/swap.h11
-rw-r--r--include/linux/sync_file.h57
-rw-r--r--include/linux/syscalls.h8
-rw-r--r--include/linux/thermal.h1
-rw-r--r--include/linux/time64.h17
-rw-r--r--include/linux/timekeeping.h20
-rw-r--r--include/linux/timer.h2
-rw-r--r--include/linux/trace_events.h134
-rw-r--r--include/linux/tty.h93
-rw-r--r--include/linux/tty_driver.h4
-rw-r--r--include/linux/types.h1
-rw-r--r--include/linux/u64_stats_sync.h14
-rw-r--r--include/linux/udp.h16
-rw-r--r--include/linux/uio.h1
-rw-r--r--include/linux/usb.h11
-rw-r--r--include/linux/usb/gadget.h4
-rw-r--r--include/linux/usb/hcd.h1
-rw-r--r--include/linux/usb/otg-fsm.h91
-rw-r--r--include/linux/uuid.h21
-rw-r--r--include/linux/verification.h49
-rw-r--r--include/linux/verify_pefile.h22
-rw-r--r--include/linux/vmalloc.h7
-rw-r--r--include/linux/vmstat.h6
-rw-r--r--include/linux/xattr.h12
-rw-r--r--include/linux/zsmalloc.h4
-rw-r--r--include/media/media-device.h13
-rw-r--r--include/media/media-entity.h81
-rw-r--r--include/media/rc-core.h18
-rw-r--r--include/media/v4l2-dev.h3
-rw-r--r--include/media/v4l2-device.h55
-rw-r--r--include/media/v4l2-rect.h173
-rw-r--r--include/media/v4l2-subdev.h8
-rw-r--r--include/media/v4l2-tpg-colors.h68
-rw-r--r--include/media/v4l2-tpg.h597
-rw-r--r--include/media/videobuf2-core.h8
-rw-r--r--include/media/vsp1.h23
-rw-r--r--include/misc/cxl.h8
-rw-r--r--include/net/6lowpan.h37
-rw-r--r--include/net/act_api.h14
-rw-r--r--include/net/af_rxrpc.h4
-rw-r--r--include/net/bluetooth/hci.h2
-rw-r--r--include/net/cfg80211.h158
-rw-r--r--include/net/codel.h217
-rw-r--r--include/net/codel_impl.h255
-rw-r--r--include/net/codel_qdisc.h73
-rw-r--r--include/net/devlink.h61
-rw-r--r--include/net/dsa.h48
-rw-r--r--include/net/dst.h5
-rw-r--r--include/net/fou.h10
-rw-r--r--include/net/fq.h95
-rw-r--r--include/net/fq_impl.h277
-rw-r--r--include/net/gen_stats.h6
-rw-r--r--include/net/geneve.h6
-rw-r--r--include/net/gre.h104
-rw-r--r--include/net/gtp.h34
-rw-r--r--include/net/icmp.h4
-rw-r--r--include/net/inet6_hashtables.h12
-rw-r--r--include/net/inet_common.h5
-rw-r--r--include/net/inet_hashtables.h47
-rw-r--r--include/net/ip.h16
-rw-r--r--include/net/ip6_tunnel.h72
-rw-r--r--include/net/ip_tunnels.h97
-rw-r--r--include/net/ip_vs.h17
-rw-r--r--include/net/ipv6.h68
-rw-r--r--include/net/l3mdev.h68
-rw-r--r--include/net/mac80211.h94
-rw-r--r--include/net/mac802154.h10
-rw-r--r--include/net/netfilter/nf_conntrack.h2
-rw-r--r--include/net/netfilter/nf_conntrack_core.h1
-rw-r--r--include/net/netfilter/nf_conntrack_ecache.h108
-rw-r--r--include/net/netfilter/nf_conntrack_expect.h1
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h3
-rw-r--r--include/net/netfilter/nf_conntrack_labels.h5
-rw-r--r--include/net/netfilter/nf_tables.h2
-rw-r--r--include/net/netlink.h121
-rw-r--r--include/net/netns/conntrack.h10
-rw-r--r--include/net/netns/ipv4.h3
-rw-r--r--include/net/netns/xfrm.h1
-rw-r--r--include/net/nfc/nci_core.h17
-rw-r--r--include/net/nl802154.h6
-rw-r--r--include/net/pkt_cls.h21
-rw-r--r--include/net/pkt_sched.h1
-rw-r--r--include/net/protocol.h3
-rw-r--r--include/net/request_sock.h31
-rw-r--r--include/net/route.h7
-rw-r--r--include/net/rtnetlink.h7
-rw-r--r--include/net/sch_generic.h20
-rw-r--r--include/net/sctp/sctp.h38
-rw-r--r--include/net/sctp/structs.h13
-rw-r--r--include/net/snmp.h40
-rw-r--r--include/net/sock.h170
-rw-r--r--include/net/switchdev.h6
-rw-r--r--include/net/tc_act/tc_mirred.h15
-rw-r--r--include/net/tcp.h81
-rw-r--r--include/net/transp_v6.h4
-rw-r--r--include/net/udp.h56
-rw-r--r--include/net/udp_tunnel.h19
-rw-r--r--include/net/vxlan.h81
-rw-r--r--include/net/xfrm.h4
-rw-r--r--include/rdma/ib.h16
-rw-r--r--include/rdma/ib_mad.h60
-rw-r--r--include/rdma/ib_pack.h5
-rw-r--r--include/rdma/ib_sa.h12
-rw-r--r--include/rdma/ib_verbs.h187
-rw-r--r--include/rdma/mr_pool.h25
-rw-r--r--include/rdma/rdma_vt.h14
-rw-r--r--include/rdma/rdmavt_qp.h10
-rw-r--r--include/rdma/rw.h88
-rw-r--r--include/rxrpc/packet.h2
-rw-r--r--include/scsi/scsi.h19
-rw-r--r--include/scsi/scsi_common.h1
-rw-r--r--include/scsi/scsi_device.h14
-rw-r--r--include/scsi/scsi_eh.h1
-rw-r--r--include/scsi/scsi_host.h2
-rw-r--r--include/scsi/scsi_proto.h9
-rw-r--r--include/soc/at91/atmel-sfr.h18
-rw-r--r--include/soc/nps/common.h166
-rw-r--r--include/soc/tegra/fuse.h1
-rw-r--r--include/soc/tegra/pmc.h36
-rw-r--r--include/sound/dmaengine_pcm.h12
-rw-r--r--include/sound/hda_chmap.h2
-rw-r--r--include/sound/hda_i915.h15
-rw-r--r--include/sound/hdaudio_ext.h13
-rw-r--r--include/sound/hdmi-codec.h100
-rw-r--r--include/sound/pcm_iec958.h2
-rw-r--r--include/sound/soc-dapm.h3
-rw-r--r--include/sound/soc.h5
-rw-r--r--include/target/iscsi/iscsi_target_core.h27
-rw-r--r--include/target/iscsi/iscsi_transport.h41
-rw-r--r--include/target/target_core_backend.h1
-rw-r--r--include/target/target_core_base.h2
-rw-r--r--include/target/target_core_fabric.h10
-rw-r--r--include/trace/events/compaction.h3
-rw-r--r--include/trace/events/ext4.h6
-rw-r--r--include/trace/events/f2fs.h24
-rw-r--r--include/trace/events/kvm.h17
-rw-r--r--include/trace/events/libata.h10
-rw-r--r--include/trace/events/mmc.h182
-rw-r--r--include/trace/events/rcu.h79
-rw-r--r--include/trace/events/scsi.h6
-rw-r--r--include/trace/perf.h16
-rw-r--r--include/trace/trace_events.h3
-rw-r--r--include/uapi/asm-generic/unistd.h7
-rw-r--r--include/uapi/drm/amdgpu_drm.h8
-rw-r--r--include/uapi/drm/armada_drm.h8
-rw-r--r--include/uapi/drm/drm.h34
-rw-r--r--include/uapi/drm/drm_fourcc.h8
-rw-r--r--include/uapi/drm/drm_mode.h20
-rw-r--r--include/uapi/drm/drm_sarea.h8
-rw-r--r--include/uapi/drm/etnaviv_drm.h8
-rw-r--r--include/uapi/drm/exynos_drm.h8
-rw-r--r--include/uapi/drm/i810_drm.h8
-rw-r--r--include/uapi/drm/i915_drm.h8
-rw-r--r--include/uapi/drm/mga_drm.h8
-rw-r--r--include/uapi/drm/msm_drm.h8
-rw-r--r--include/uapi/drm/nouveau_drm.h10
-rw-r--r--include/uapi/drm/omap_drm.h8
-rw-r--r--include/uapi/drm/qxl_drm.h9
-rw-r--r--include/uapi/drm/r128_drm.h8
-rw-r--r--include/uapi/drm/radeon_drm.h8
-rw-r--r--include/uapi/drm/savage_drm.h8
-rw-r--r--include/uapi/drm/sis_drm.h10
-rw-r--r--include/uapi/drm/tegra_drm.h8
-rw-r--r--include/uapi/drm/vc4_drm.h8
-rw-r--r--include/uapi/drm/via_drm.h8
-rw-r--r--include/uapi/drm/virtgpu_drm.h8
-rw-r--r--include/uapi/drm/vmwgfx_drm.h9
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/uapi/linux/bpf.h7
-rw-r--r--include/uapi/linux/btrfs.h188
-rw-r--r--include/uapi/linux/btrfs_tree.h966
-rw-r--r--include/uapi/linux/coresight-stm.h21
-rw-r--r--include/uapi/linux/devlink.h63
-rw-r--r--include/uapi/linux/elf.h10
-rw-r--r--include/uapi/linux/ethtool.h11
-rw-r--r--include/uapi/linux/fib_rules.h1
-rw-r--r--include/uapi/linux/fs.h3
-rw-r--r--include/uapi/linux/gen_stats.h1
-rw-r--r--include/uapi/linux/gtp.h33
-rw-r--r--include/uapi/linux/i2c.h13
-rw-r--r--include/uapi/linux/if.h28
-rw-r--r--include/uapi/linux/if_bridge.h18
-rw-r--r--include/uapi/linux/if_ether.h1
-rw-r--r--include/uapi/linux/if_link.h62
-rw-r--r--include/uapi/linux/if_macsec.h14
-rw-r--r--include/uapi/linux/iio/types.h2
-rw-r--r--include/uapi/linux/ila.h8
-rw-r--r--include/uapi/linux/inet_diag.h6
-rw-r--r--include/uapi/linux/ip_vs.h1
-rw-r--r--include/uapi/linux/keyctl.h10
-rw-r--r--include/uapi/linux/kvm.h1
-rw-r--r--include/uapi/linux/l2tp.h2
-rw-r--r--include/uapi/linux/libc-compat.h44
-rw-r--r--include/uapi/linux/lwtunnel.h2
-rw-r--r--include/uapi/linux/magic.h2
-rw-r--r--include/uapi/linux/ndctl.h80
-rw-r--r--include/uapi/linux/neighbour.h2
-rw-r--r--include/uapi/linux/net_tstamp.h10
-rw-r--r--include/uapi/linux/netfilter/ipset/ip_set.h1
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h9
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_acct.h1
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_conntrack.h3
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_queue.h10
-rw-r--r--include/uapi/linux/nl80211.h110
-rw-r--r--include/uapi/linux/nvme_ioctl.h1
-rw-r--r--include/uapi/linux/openvswitch.h4
-rw-r--r--include/uapi/linux/pci_regs.h20
-rw-r--r--include/uapi/linux/perf_event.h5
-rw-r--r--include/uapi/linux/pkt_cls.h10
-rw-r--r--include/uapi/linux/pkt_sched.h7
-rw-r--r--include/uapi/linux/qrtr.h12
-rw-r--r--include/uapi/linux/quota.h1
-rw-r--r--include/uapi/linux/rio_mport_cdev.h (renamed from include/linux/rio_mport_cdev.h)144
-rw-r--r--include/uapi/linux/rtnetlink.h7
-rw-r--r--include/uapi/linux/serial_core.h6
-rw-r--r--include/uapi/linux/signal.h5
-rw-r--r--include/uapi/linux/sock_diag.h1
-rw-r--r--include/uapi/linux/swab.h24
-rw-r--r--include/uapi/linux/sync_file.h100
-rw-r--r--include/uapi/linux/tc_act/Kbuild1
-rw-r--r--include/uapi/linux/tc_act/tc_bpf.h1
-rw-r--r--include/uapi/linux/tc_act/tc_connmark.h1
-rw-r--r--include/uapi/linux/tc_act/tc_csum.h1
-rw-r--r--include/uapi/linux/tc_act/tc_defact.h1
-rw-r--r--include/uapi/linux/tc_act/tc_gact.h1
-rw-r--r--include/uapi/linux/tc_act/tc_ife.h1
-rw-r--r--include/uapi/linux/tc_act/tc_ipt.h1
-rw-r--r--include/uapi/linux/tc_act/tc_mirred.h1
-rw-r--r--include/uapi/linux/tc_act/tc_nat.h1
-rw-r--r--include/uapi/linux/tc_act/tc_pedit.h1
-rw-r--r--include/uapi/linux/tc_act/tc_skbedit.h1
-rw-r--r--include/uapi/linux/tc_act/tc_vlan.h1
-rw-r--r--include/uapi/linux/tcp_metrics.h1
-rw-r--r--include/uapi/linux/tty_flags.h13
-rw-r--r--include/uapi/linux/udp.h3
-rw-r--r--include/uapi/linux/usb/ch9.h121
-rw-r--r--include/uapi/linux/uuid.h4
-rw-r--r--include/uapi/linux/v4l2-dv-timings.h30
-rw-r--r--include/uapi/linux/videodev2.h38
-rw-r--r--include/uapi/linux/vt.h1
-rw-r--r--include/uapi/linux/xfrm.h1
-rw-r--r--include/uapi/mtd/mtd-abi.h2
-rw-r--r--include/uapi/rdma/hfi/hfi1_user.h80
-rw-r--r--include/uapi/rdma/ib_user_verbs.h1
-rw-r--r--include/uapi/rdma/rdma_netlink.h10
-rw-r--r--include/uapi/sound/asoc.h44
-rw-r--r--include/uapi/sound/asound.h2
-rw-r--r--include/video/exynos5433_decon.h6
-rw-r--r--include/video/imx-ipu-v3.h2
-rw-r--r--include/video/mipi_display.h8
-rw-r--r--include/video/sh_mipi_dsi.h59
-rw-r--r--include/xen/page.h4
634 files changed, 16450 insertions, 4140 deletions
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 14362a84c78e..788c6c35291a 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -61,12 +61,12 @@ bool acpi_ata_match(acpi_handle handle);
bool acpi_bay_match(acpi_handle handle);
bool acpi_dock_match(acpi_handle handle);
-bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, int rev, u64 funcs);
+bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 funcs);
union acpi_object *acpi_evaluate_dsm(acpi_handle handle, const u8 *uuid,
- int rev, int func, union acpi_object *argv4);
+ u64 rev, u64 func, union acpi_object *argv4);
static inline union acpi_object *
-acpi_evaluate_dsm_typed(acpi_handle handle, const u8 *uuid, int rev, int func,
+acpi_evaluate_dsm_typed(acpi_handle handle, const u8 *uuid, u64 rev, u64 func,
union acpi_object *argv4, acpi_object_type type)
{
union acpi_object *obj;
@@ -87,7 +87,7 @@ acpi_evaluate_dsm_typed(acpi_handle handle, const u8 *uuid, int rev, int func,
.package.elements = (eles) \
}
-bool acpi_dev_present(const char *hid);
+bool acpi_dev_found(const char *hid);
#ifdef CONFIG_ACPI
@@ -394,13 +394,13 @@ struct acpi_data_node {
static inline bool is_acpi_node(struct fwnode_handle *fwnode)
{
- return fwnode && (fwnode->type == FWNODE_ACPI
+ return !IS_ERR_OR_NULL(fwnode) && (fwnode->type == FWNODE_ACPI
|| fwnode->type == FWNODE_ACPI_DATA);
}
static inline bool is_acpi_device_node(struct fwnode_handle *fwnode)
{
- return fwnode && fwnode->type == FWNODE_ACPI;
+ return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_ACPI;
}
static inline struct acpi_device *to_acpi_device_node(struct fwnode_handle *fwnode)
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index 29c691265b49..797ae2ec8eee 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -78,7 +78,6 @@
/* ACPI PCI Interrupt Link (pci_link.c) */
-int acpi_irq_penalty_init(void);
int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering,
int *polarity, char **name);
int acpi_pci_link_free_irq(acpi_handle handle);
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index d1e34d1eeea6..562603d7aabe 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -96,7 +96,7 @@ acpi_physical_address acpi_os_get_root_pointer(void);
#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_predefined_override
acpi_status
acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
- char **new_val);
+ acpi_string *new_val);
#endif
#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_table_override
@@ -108,7 +108,7 @@ acpi_os_table_override(struct acpi_table_header *existing_table,
#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_physical_table_override
acpi_status
acpi_os_physical_table_override(struct acpi_table_header *existing_table,
- acpi_physical_address * new_address,
+ acpi_physical_address *new_address,
u32 *new_table_length);
#endif
@@ -203,7 +203,7 @@ void acpi_os_unmap_memory(void *logical_address, acpi_size size);
#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_physical_address
acpi_status
acpi_os_get_physical_address(void *logical_address,
- acpi_physical_address * physical_address);
+ acpi_physical_address *physical_address);
#endif
/*
@@ -379,14 +379,14 @@ acpi_status
acpi_os_get_table_by_name(char *signature,
u32 instance,
struct acpi_table_header **table,
- acpi_physical_address * address);
+ acpi_physical_address *address);
#endif
#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_table_by_index
acpi_status
acpi_os_get_table_by_index(u32 index,
struct acpi_table_header **table,
- u32 *instance, acpi_physical_address * address);
+ u32 *instance, acpi_physical_address *address);
#endif
#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_table_by_address
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 17556979dc79..4e4c21491c41 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -46,7 +46,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20160108
+#define ACPI_CA_VERSION 0x20160422
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
@@ -192,7 +192,7 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_do_not_use_xsdt, FALSE);
/*
* Optionally support group module level code.
*/
-ACPI_INIT_GLOBAL(u8, acpi_gbl_group_module_level_code, TRUE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_group_module_level_code, FALSE);
/*
* Optionally use 32-bit FADT addresses if and when there is a conflict
@@ -484,8 +484,8 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init acpi_load_tables(void))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init acpi_reallocate_root_table(void))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init
- acpi_find_root_pointer(acpi_physical_address *
- rsdp_address))
+ acpi_find_root_pointer(acpi_physical_address
+ *rsdp_address))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_get_table_header(acpi_string signature,
u32 instance,
@@ -530,7 +530,7 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_get_handle(acpi_handle parent,
acpi_string pathname,
- acpi_handle * ret_handle))
+ acpi_handle *ret_handle))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_attach_data(acpi_handle object,
acpi_object_handler handler,
@@ -575,15 +575,15 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_get_next_object(acpi_object_type type,
acpi_handle parent,
acpi_handle child,
- acpi_handle * out_handle))
+ acpi_handle *out_handle))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_get_type(acpi_handle object,
- acpi_object_type * out_type))
+ acpi_object_type *out_type))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_get_parent(acpi_handle object,
- acpi_handle * out_handle))
+ acpi_handle *out_handle))
/*
* Handler interfaces
@@ -755,7 +755,7 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
acpi_get_gpe_device(u32 gpe_index,
- acpi_handle * gpe_device))
+ acpi_handle *gpe_device))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
acpi_install_gpe_block(acpi_handle gpe_device,
@@ -771,8 +771,8 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
* Resource interfaces
*/
typedef
-acpi_status(*acpi_walk_resource_callback) (struct acpi_resource * resource,
- void *context);
+acpi_status (*acpi_walk_resource_callback) (struct acpi_resource * resource,
+ void *context);
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_get_vendor_resource(acpi_handle device,
@@ -938,7 +938,8 @@ ACPI_DBG_DEPENDENT_RETURN_VOID(void
ACPI_APP_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(1)
void ACPI_INTERNAL_VAR_XFACE
acpi_log_error(const char *format, ...))
- acpi_status acpi_initialize_debugger(void);
+
+acpi_status acpi_initialize_debugger(void);
void acpi_terminate_debugger(void);
diff --git a/include/acpi/acrestyp.h b/include/acpi/acrestyp.h
index cf2acb84dfeb..16c189283ea0 100644
--- a/include/acpi/acrestyp.h
+++ b/include/acpi/acrestyp.h
@@ -417,6 +417,7 @@ struct acpi_resource_gpio {
u8 type; \
u8 producer_consumer; /* For values, see Producer/Consumer above */\
u8 slave_mode; \
+ u8 connection_sharing; \
u8 type_revision_id; \
u16 type_data_length; \
u16 vendor_length; \
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index 0cb1a0036986..c19700e2a2fe 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -223,7 +223,7 @@ struct acpi_table_facs {
/*******************************************************************************
*
* FADT - Fixed ACPI Description Table (Signature "FACP")
- * Version 4
+ * Version 6
*
******************************************************************************/
@@ -413,4 +413,6 @@ struct acpi_table_desc {
#define ACPI_FADT_V5_SIZE (u32) (ACPI_FADT_OFFSET (hypervisor_id))
#define ACPI_FADT_V6_SIZE (u32) (sizeof (struct acpi_table_fadt))
+#define ACPI_FADT_CONFORMANCE "ACPI 6.1 (FADT version 6)"
+
#endif /* __ACTBL_H__ */
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 16e013600c19..796d6baae3a3 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -236,7 +236,8 @@ enum acpi_einj_actions {
ACPI_EINJ_CHECK_BUSY_STATUS = 6,
ACPI_EINJ_GET_COMMAND_STATUS = 7,
ACPI_EINJ_SET_ERROR_TYPE_WITH_ADDRESS = 8,
- ACPI_EINJ_ACTION_RESERVED = 9, /* 9 and greater are reserved */
+ ACPI_EINJ_GET_EXECUTE_TIMINGS = 9,
+ ACPI_EINJ_ACTION_RESERVED = 10, /* 10 and greater are reserved */
ACPI_EINJ_TRIGGER_ERROR = 0xFF /* Except for this value */
};
@@ -348,7 +349,8 @@ enum acpi_erst_actions {
ACPI_ERST_GET_ERROR_RANGE = 13,
ACPI_ERST_GET_ERROR_LENGTH = 14,
ACPI_ERST_GET_ERROR_ATTRIBUTES = 15,
- ACPI_ERST_ACTION_RESERVED = 16 /* 16 and greater are reserved */
+ ACPI_ERST_EXECUTE_TIMINGS = 16,
+ ACPI_ERST_ACTION_RESERVED = 17 /* 17 and greater are reserved */
};
/* Values for Instruction field above */
@@ -427,7 +429,8 @@ enum acpi_hest_types {
ACPI_HEST_TYPE_AER_ENDPOINT = 7,
ACPI_HEST_TYPE_AER_BRIDGE = 8,
ACPI_HEST_TYPE_GENERIC_ERROR = 9,
- ACPI_HEST_TYPE_RESERVED = 10 /* 10 and greater are reserved */
+ ACPI_HEST_TYPE_GENERIC_ERROR_V2 = 10,
+ ACPI_HEST_TYPE_RESERVED = 11 /* 11 and greater are reserved */
};
/*
@@ -506,7 +509,11 @@ enum acpi_hest_notify_types {
ACPI_HEST_NOTIFY_NMI = 4,
ACPI_HEST_NOTIFY_CMCI = 5, /* ACPI 5.0 */
ACPI_HEST_NOTIFY_MCE = 6, /* ACPI 5.0 */
- ACPI_HEST_NOTIFY_RESERVED = 7 /* 7 and greater are reserved */
+ ACPI_HEST_NOTIFY_GPIO = 7, /* ACPI 6.0 */
+ ACPI_HEST_NOTIFY_SEA = 8, /* ACPI 6.1 */
+ ACPI_HEST_NOTIFY_SEI = 9, /* ACPI 6.1 */
+ ACPI_HEST_NOTIFY_GSIV = 10, /* ACPI 6.1 */
+ ACPI_HEST_NOTIFY_RESERVED = 11 /* 11 and greater are reserved */
};
/* Values for config_write_enable bitfield above */
@@ -603,6 +610,24 @@ struct acpi_hest_generic {
u32 error_block_length;
};
+/* 10: Generic Hardware Error Source, version 2 */
+
+struct acpi_hest_generic_v2 {
+ struct acpi_hest_header header;
+ u16 related_source_id;
+ u8 reserved;
+ u8 enabled;
+ u32 records_to_preallocate;
+ u32 max_sections_per_record;
+ u32 max_raw_data_length;
+ struct acpi_generic_address error_status_address;
+ struct acpi_hest_notify notify;
+ u32 error_block_length;
+ struct acpi_generic_address read_ack_register;
+ u64 read_ack_preserve;
+ u64 read_ack_write;
+};
+
/* Generic Error Status block */
struct acpi_hest_generic_status {
@@ -634,6 +659,33 @@ struct acpi_hest_generic_data {
u8 fru_text[20];
};
+/* Extension for revision 0x0300 */
+
+struct acpi_hest_generic_data_v300 {
+ u8 section_type[16];
+ u32 error_severity;
+ u16 revision;
+ u8 validation_bits;
+ u8 flags;
+ u32 error_data_length;
+ u8 fru_id[16];
+ u8 fru_text[20];
+ u64 time_stamp;
+};
+
+/* Values for error_severity above */
+
+#define ACPI_HEST_GEN_ERROR_RECOVERABLE 0
+#define ACPI_HEST_GEN_ERROR_FATAL 1
+#define ACPI_HEST_GEN_ERROR_CORRECTED 2
+#define ACPI_HEST_GEN_ERROR_NONE 3
+
+/* Flags for validation_bits above */
+
+#define ACPI_HEST_GEN_VALID_FRU_ID (1)
+#define ACPI_HEST_GEN_VALID_FRU_STRING (1<<1)
+#define ACPI_HEST_GEN_VALID_TIMESTAMP (1<<2)
+
/*******************************************************************************
*
* MADT - Multiple APIC Description Table
@@ -934,7 +986,7 @@ struct acpi_msct_proximity {
/*******************************************************************************
*
- * NFIT - NVDIMM Interface Table (ACPI 6.0)
+ * NFIT - NVDIMM Interface Table (ACPI 6.0+)
* Version 1
*
******************************************************************************/
@@ -1015,6 +1067,7 @@ struct acpi_nfit_memory_map {
#define ACPI_NFIT_MEM_NOT_ARMED (1<<3) /* 03: Memory Device is not armed */
#define ACPI_NFIT_MEM_HEALTH_OBSERVED (1<<4) /* 04: Memory Device observed SMART/health events */
#define ACPI_NFIT_MEM_HEALTH_ENABLED (1<<5) /* 05: SMART/health events enabled */
+#define ACPI_NFIT_MEM_MAP_FAILED (1<<6) /* 06: Mapping to SPA failed */
/* 2: Interleave Structure */
@@ -1046,7 +1099,10 @@ struct acpi_nfit_control_region {
u16 subsystem_vendor_id;
u16 subsystem_device_id;
u16 subsystem_revision_id;
- u8 reserved[6]; /* Reserved, must be zero */
+ u8 valid_fields;
+ u8 manufacturing_location;
+ u16 manufacturing_date;
+ u8 reserved[2]; /* Reserved, must be zero */
u32 serial_number;
u16 code;
u16 windows;
@@ -1061,7 +1117,11 @@ struct acpi_nfit_control_region {
/* Flags */
-#define ACPI_NFIT_CONTROL_BUFFERED (1) /* Block Data Windows implementation is buffered */
+#define ACPI_NFIT_CONTROL_BUFFERED (1) /* Block Data Windows implementation is buffered */
+
+/* valid_fields bits */
+
+#define ACPI_NFIT_CONTROL_MFG_INFO_VALID (1) /* Manufacturing fields are valid */
/* 5: NVDIMM Block Data Window Region Structure */
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index a4ef62537cac..c93dbadfc71d 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -321,7 +321,7 @@ struct acpi_csrt_descriptor {
* DBG2 - Debug Port Table 2
* Version 0 (Both main table and subtables)
*
- * Conforms to "Microsoft Debug Port Table 2 (DBG2)", May 22 2012.
+ * Conforms to "Microsoft Debug Port Table 2 (DBG2)", December 10, 2015
*
******************************************************************************/
@@ -371,6 +371,11 @@ struct acpi_dbg2_device {
#define ACPI_DBG2_16550_COMPATIBLE 0x0000
#define ACPI_DBG2_16550_SUBSET 0x0001
+#define ACPI_DBG2_ARM_PL011 0x0003
+#define ACPI_DBG2_ARM_SBSA_32BIT 0x000D
+#define ACPI_DBG2_ARM_SBSA_GENERIC 0x000E
+#define ACPI_DBG2_ARM_DCC 0x000F
+#define ACPI_DBG2_BCM2835 0x0010
#define ACPI_DBG2_1394_STANDARD 0x0000
@@ -399,7 +404,7 @@ struct acpi_table_dbgp {
* Version 1
*
* Conforms to "Intel Virtualization Technology for Directed I/O",
- * Version 2.2, Sept. 2013
+ * Version 2.3, October 2014
*
******************************************************************************/
@@ -413,6 +418,8 @@ struct acpi_table_dmar {
/* Masks for Flags field above */
#define ACPI_DMAR_INTR_REMAP (1)
+#define ACPI_DMAR_X2APIC_OPT_OUT (1<<1)
+#define ACPI_DMAR_X2APIC_MODE (1<<2)
/* DMAR subtable header */
@@ -655,7 +662,7 @@ struct acpi_ibft_target {
* IORT - IO Remapping Table
*
* Conforms to "IO Remapping Table System Software on ARM Platforms",
- * Document number: ARM DEN 0049A, 2015
+ * Document number: ARM DEN 0049B, October 2015
*
******************************************************************************/
@@ -685,7 +692,8 @@ enum acpi_iort_node_type {
ACPI_IORT_NODE_ITS_GROUP = 0x00,
ACPI_IORT_NODE_NAMED_COMPONENT = 0x01,
ACPI_IORT_NODE_PCI_ROOT_COMPLEX = 0x02,
- ACPI_IORT_NODE_SMMU = 0x03
+ ACPI_IORT_NODE_SMMU = 0x03,
+ ACPI_IORT_NODE_SMMU_V3 = 0x04
};
struct acpi_iort_id_mapping {
@@ -775,6 +783,23 @@ struct acpi_iort_smmu {
#define ACPI_IORT_SMMU_DVM_SUPPORTED (1)
#define ACPI_IORT_SMMU_COHERENT_WALK (1<<1)
+struct acpi_iort_smmu_v3 {
+ u64 base_address; /* SMMUv3 base address */
+ u32 flags;
+ u32 reserved;
+ u64 vatos_address;
+ u32 model; /* O: generic SMMUv3 */
+ u32 event_gsiv;
+ u32 pri_gsiv;
+ u32 gerr_gsiv;
+ u32 sync_gsiv;
+};
+
+/* Masks for Flags field above */
+
+#define ACPI_IORT_SMMU_V3_COHACC_OVERRIDE (1)
+#define ACPI_IORT_SMMU_V3_HTTU_OVERRIDE (1<<1)
+
/*******************************************************************************
*
* IVRS - I/O Virtualization Reporting Structure
@@ -1102,10 +1127,10 @@ struct acpi_table_slic {
/*******************************************************************************
*
* SPCR - Serial Port Console Redirection table
- * Version 1
+ * Version 2
*
* Conforms to "Serial Port Console Redirection Table",
- * Version 1.00, January 11, 2002
+ * Version 1.03, August 10, 2015
*
******************************************************************************/
@@ -1137,6 +1162,8 @@ struct acpi_table_spcr {
#define ACPI_SPCR_DO_NOT_DISABLE (1)
+/* Values for Interface Type: See the definition of the DBG2 table */
+
/*******************************************************************************
*
* SPMI - Server Platform Management Interface table
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h
index ddf5e66c3b15..ebc1f4f9fe66 100644
--- a/include/acpi/actbl3.h
+++ b/include/acpi/actbl3.h
@@ -184,7 +184,7 @@ struct acpi_table_fpdt {
struct acpi_table_header header; /* Common ACPI table header */
};
-/* FPDT subtable header */
+/* FPDT subtable header (Performance Record Structure) */
struct acpi_fpdt_header {
u16 type;
@@ -205,19 +205,15 @@ enum acpi_fpdt_type {
/* 0: Firmware Basic Boot Performance Record */
-struct acpi_fpdt_boot {
+struct acpi_fpdt_boot_pointer {
struct acpi_fpdt_header header;
u8 reserved[4];
- u64 reset_end;
- u64 load_start;
- u64 startup_start;
- u64 exit_services_entry;
- u64 exit_services_exit;
+ u64 address;
};
/* 1: S3 Performance Table Pointer Record */
-struct acpi_fpdt_s3pt_ptr {
+struct acpi_fpdt_s3pt_pointer {
struct acpi_fpdt_header header;
u8 reserved[4];
u64 address;
@@ -225,7 +221,7 @@ struct acpi_fpdt_s3pt_ptr {
/*
* S3PT - S3 Performance Table. This table is pointed to by the
- * FPDT S3 Pointer Record above.
+ * S3 Pointer Record above.
*/
struct acpi_table_s3pt {
u8 signature[4]; /* "S3PT" */
@@ -233,34 +229,43 @@ struct acpi_table_s3pt {
};
/*
- * S3PT Subtables
+ * S3PT Subtables (Not part of the actual FPDT)
*/
-struct acpi_s3pt_header {
- u16 type;
- u8 length;
- u8 revision;
-};
-/* Values for Type field above */
+/* Values for Type field in S3PT header */
enum acpi_s3pt_type {
ACPI_S3PT_TYPE_RESUME = 0,
- ACPI_S3PT_TYPE_SUSPEND = 1
+ ACPI_S3PT_TYPE_SUSPEND = 1,
+ ACPI_FPDT_BOOT_PERFORMANCE = 2
};
struct acpi_s3pt_resume {
- struct acpi_s3pt_header header;
+ struct acpi_fpdt_header header;
u32 resume_count;
u64 full_resume;
u64 average_resume;
};
struct acpi_s3pt_suspend {
- struct acpi_s3pt_header header;
+ struct acpi_fpdt_header header;
u64 suspend_start;
u64 suspend_end;
};
+/*
+ * FPDT Boot Performance Record (Not part of the actual FPDT)
+ */
+struct acpi_fpdt_boot {
+ struct acpi_fpdt_header header;
+ u8 reserved[4];
+ u64 reset_end;
+ u64 load_start;
+ u64 startup_start;
+ u64 exit_services_entry;
+ u64 exit_services_exit;
+};
+
/*******************************************************************************
*
* GTDT - Generic Timer Description Table (ACPI 5.1)
@@ -476,7 +481,8 @@ struct acpi_table_pcct {
enum acpi_pcct_type {
ACPI_PCCT_TYPE_GENERIC_SUBSPACE = 0,
ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE = 1,
- ACPI_PCCT_TYPE_RESERVED = 2 /* 2 and greater are reserved */
+ ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2 = 2, /* ACPI 6.1 */
+ ACPI_PCCT_TYPE_RESERVED = 3 /* 3 and greater are reserved */
};
/*
@@ -515,6 +521,26 @@ struct acpi_pcct_hw_reduced {
u16 min_turnaround_time;
};
+/* 2: HW-reduced Communications Subspace Type 2 (ACPI 6.1) */
+
+struct acpi_pcct_hw_reduced_type2 {
+ struct acpi_subtable_header header;
+ u32 doorbell_interrupt;
+ u8 flags;
+ u8 reserved;
+ u64 base_address;
+ u64 length;
+ struct acpi_generic_address doorbell_register;
+ u64 preserve_mask;
+ u64 write_mask;
+ u32 latency;
+ u32 max_access_rate;
+ u16 min_turnaround_time;
+ struct acpi_generic_address doorbell_ack_register;
+ u64 ack_preserve_mask;
+ u64 ack_write_mask;
+};
+
/* Values for doorbell flags above */
#define ACPI_PCCT_INTERRUPT_POLARITY (1)
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index db46546d3b9d..cb389efd321c 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -630,7 +630,8 @@ typedef u64 acpi_integer;
#define ACPI_NOTIFY_SHUTDOWN_REQUEST (u8) 0x0C
#define ACPI_NOTIFY_AFFINITY_UPDATE (u8) 0x0D
-#define ACPI_NOTIFY_MAX 0x0D
+#define ACPI_GENERIC_NOTIFY_MAX 0x0D
+#define ACPI_SPECIFIC_NOTIFY_MAX 0x84
/*
* Types associated with ACPI names and objects. The first group of
@@ -892,7 +893,7 @@ typedef u8 acpi_adr_space_type;
/* Sleep function dispatch */
-typedef acpi_status(*acpi_sleep_function) (u8 sleep_state);
+typedef acpi_status (*acpi_sleep_function) (u8 sleep_state);
struct acpi_sleep_functions {
acpi_sleep_function legacy_function;
@@ -994,7 +995,7 @@ struct acpi_buffer {
* Predefined Namespace items
*/
struct acpi_predefined_names {
- char *name;
+ const char *name;
u8 type;
char *val;
};
@@ -1071,20 +1072,21 @@ void (*acpi_notify_handler) (acpi_handle device, u32 value, void *context);
typedef
void (*acpi_object_handler) (acpi_handle object, void *data);
-typedef acpi_status(*acpi_init_handler) (acpi_handle object, u32 function);
+typedef
+acpi_status (*acpi_init_handler) (acpi_handle object, u32 function);
#define ACPI_INIT_DEVICE_INI 1
typedef
-acpi_status(*acpi_exception_handler) (acpi_status aml_status,
- acpi_name name,
- u16 opcode,
- u32 aml_offset, void *context);
+acpi_status (*acpi_exception_handler) (acpi_status aml_status,
+ acpi_name name,
+ u16 opcode,
+ u32 aml_offset, void *context);
/* Table Event handler (Load, load_table, etc.) and types */
typedef
-acpi_status(*acpi_table_handler) (u32 event, void *table, void *context);
+acpi_status (*acpi_table_handler) (u32 event, void *table, void *context);
#define ACPI_TABLE_LOAD 0x0
#define ACPI_TABLE_UNLOAD 0x1
@@ -1093,12 +1095,12 @@ acpi_status(*acpi_table_handler) (u32 event, void *table, void *context);
/* Address Spaces (For Operation Regions) */
typedef
-acpi_status(*acpi_adr_space_handler) (u32 function,
- acpi_physical_address address,
- u32 bit_width,
- u64 *value,
- void *handler_context,
- void *region_context);
+acpi_status (*acpi_adr_space_handler) (u32 function,
+ acpi_physical_address address,
+ u32 bit_width,
+ u64 *value,
+ void *handler_context,
+ void *region_context);
#define ACPI_DEFAULT_HANDLER NULL
@@ -1111,18 +1113,18 @@ struct acpi_connection_info {
};
typedef
-acpi_status(*acpi_adr_space_setup) (acpi_handle region_handle,
- u32 function,
- void *handler_context,
- void **region_context);
+acpi_status (*acpi_adr_space_setup) (acpi_handle region_handle,
+ u32 function,
+ void *handler_context,
+ void **region_context);
#define ACPI_REGION_ACTIVATE 0
#define ACPI_REGION_DEACTIVATE 1
typedef
-acpi_status(*acpi_walk_callback) (acpi_handle object,
- u32 nesting_level,
- void *context, void **return_value);
+acpi_status (*acpi_walk_callback) (acpi_handle object,
+ u32 nesting_level,
+ void *context, void **return_value);
typedef
u32 (*acpi_interface_handler) (acpi_string interface_name, u32 supported);
@@ -1227,7 +1229,7 @@ struct acpi_mem_space_context {
* struct acpi_memory_list is used only if the ACPICA local cache is enabled
*/
struct acpi_memory_list {
- char *list_name;
+ const char *list_name;
void *list_head;
u16 object_size;
u16 max_depth;
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index 7c0595bde132..86b5a8447606 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -66,17 +66,28 @@
*
*****************************************************************************/
+/* Common application configuration. All single threaded except for acpi_exec. */
+
+#if (defined ACPI_ASL_COMPILER) || \
+ (defined ACPI_BIN_APP) || \
+ (defined ACPI_DUMP_APP) || \
+ (defined ACPI_HELP_APP) || \
+ (defined ACPI_NAMES_APP) || \
+ (defined ACPI_SRC_APP) || \
+ (defined ACPI_XTRACT_APP) || \
+ (defined ACPI_EXAMPLE_APP)
+#define ACPI_APPLICATION
+#define ACPI_SINGLE_THREADED
+#endif
+
/* iASL configuration */
#ifdef ACPI_ASL_COMPILER
-#define ACPI_APPLICATION
#define ACPI_DEBUG_OUTPUT
#define ACPI_CONSTANT_EVAL_ONLY
#define ACPI_LARGE_NAMESPACE_NODE
#define ACPI_DATA_TABLE_DISASSEMBLY
-#define ACPI_SINGLE_THREADED
#define ACPI_32BIT_PHYSICAL_ADDRESS
-
#define ACPI_DISASSEMBLER 1
#endif
@@ -89,21 +100,6 @@
#define ACPI_DBG_TRACK_ALLOCATIONS
#endif
-/*
- * acpi_bin/acpi_dump/acpi_help/acpi_names/acpi_src/acpi_xtract/Example
- * configuration. All single threaded.
- */
-#if (defined ACPI_BIN_APP) || \
- (defined ACPI_DUMP_APP) || \
- (defined ACPI_HELP_APP) || \
- (defined ACPI_NAMES_APP) || \
- (defined ACPI_SRC_APP) || \
- (defined ACPI_XTRACT_APP) || \
- (defined ACPI_EXAMPLE_APP)
-#define ACPI_APPLICATION
-#define ACPI_SINGLE_THREADED
-#endif
-
/* acpi_help configuration. Error messages disabled. */
#ifdef ACPI_HELP_APP
@@ -138,11 +134,16 @@
#define ACPI_REDUCED_HARDWARE 1
#endif
-/* Linkable ACPICA library */
+/* Linkable ACPICA library. Two versions, one with full debug. */
#ifdef ACPI_LIBRARY
#define ACPI_USE_LOCAL_CACHE
-#define ACPI_FULL_DEBUG
+#define ACPI_DEBUGGER 1
+#define ACPI_DISASSEMBLER 1
+
+#ifdef _DEBUG
+#define ACPI_DEBUG_OUTPUT
+#endif
#endif
/* Common for all ACPICA applications */
@@ -218,6 +219,9 @@
#elif defined(__HAIKU__)
#include "achaiku.h"
+#elif defined(__QNX__)
+#include "acqnx.h"
+
#else
/* Unknown environment */
diff --git a/include/acpi/platform/acmsvcex.h b/include/acpi/platform/acmsvcex.h
deleted file mode 100644
index 28084a1034fe..000000000000
--- a/include/acpi/platform/acmsvcex.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/******************************************************************************
- *
- * Name: acmsvcex.h - Extra VC specific defines, etc.
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2016, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- * substantially similar to the "NO WARRANTY" disclaimer below
- * ("Disclaimer") and any redistribution must be conditioned upon
- * including a substantially similar Disclaimer requirement for further
- * binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- * of any contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#ifndef __ACMSVCEX_H__
-#define __ACMSVCEX_H__
-
-/* Debug support. */
-
-#ifdef _DEBUG
-#define _CRTDBG_MAP_ALLOC /* Enables specific file/lineno for leaks */
-#include <crtdbg.h>
-#endif
-
-#endif /* __ACMSVCEX_H__ */
diff --git a/include/acpi/platform/acwinex.h b/include/acpi/platform/acwinex.h
deleted file mode 100644
index a00b3e4b80b0..000000000000
--- a/include/acpi/platform/acwinex.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/******************************************************************************
- *
- * Name: acwinex.h - Extra OS specific defines, etc.
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2016, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- * substantially similar to the "NO WARRANTY" disclaimer below
- * ("Disclaimer") and any redistribution must be conditioned upon
- * including a substantially similar Disclaimer requirement for further
- * binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- * of any contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#ifndef __ACWINEX_H__
-#define __ACWINEX_H__
-
-/* Windows uses VC */
-
-#endif /* __ACWINEX_H__ */
diff --git a/include/acpi/video.h b/include/acpi/video.h
index 5ca2f2c16458..5731ccb42585 100644
--- a/include/acpi/video.h
+++ b/include/acpi/video.h
@@ -4,6 +4,19 @@
#include <linux/errno.h> /* for ENODEV */
#include <linux/types.h> /* for bool */
+struct acpi_video_brightness_flags {
+ u8 _BCL_no_ac_battery_levels:1; /* no AC/Battery levels in _BCL */
+ u8 _BCL_reversed:1; /* _BCL package is in a reversed order */
+ u8 _BQC_use_index:1; /* _BQC returns an index value */
+};
+
+struct acpi_video_device_brightness {
+ int curr;
+ int count;
+ int *levels;
+ struct acpi_video_brightness_flags flags;
+};
+
struct acpi_device;
#define ACPI_VIDEO_CLASS "video"
@@ -37,6 +50,9 @@ extern void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type);
* may change over time and should not be cached.
*/
extern bool acpi_video_handles_brightness_key_presses(void);
+extern int acpi_video_get_levels(struct acpi_device *device,
+ struct acpi_video_device_brightness **dev_br,
+ int *pmax_level);
#else
static inline int acpi_video_register(void) { return 0; }
static inline void acpi_video_unregister(void) { return; }
@@ -56,6 +72,12 @@ static inline bool acpi_video_handles_brightness_key_presses(void)
{
return false;
}
+static inline int acpi_video_get_levels(struct acpi_device *device,
+ struct acpi_video_device_brightness **dev_br,
+ int *pmax_level)
+{
+ return -ENODEV;
+}
#endif
#endif
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index eed3bbe88c8a..002b81f6f2bc 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -191,7 +191,7 @@ static inline void writeq(u64 value, volatile void __iomem *addr)
#define readl_relaxed readl
#endif
-#ifndef readq_relaxed
+#if defined(readq) && !defined(readq_relaxed)
#define readq_relaxed readq
#endif
@@ -207,7 +207,7 @@ static inline void writeq(u64 value, volatile void __iomem *addr)
#define writel_relaxed writel
#endif
-#ifndef writeq_relaxed
+#if defined(writeq) && !defined(writeq_relaxed)
#define writeq_relaxed writeq
#endif
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 9401f4819891..d4458b6dbfb4 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -806,4 +806,12 @@ static inline int pmd_clear_huge(pmd_t *pmd)
#define io_remap_pfn_range remap_pfn_range
#endif
+#ifndef has_transparent_hugepage
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define has_transparent_hugepage() 1
+#else
+#define has_transparent_hugepage() 0
+#endif
+#endif
+
#endif /* _ASM_GENERIC_PGTABLE_H */
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
index 5d8ffa3e6f8c..c1cde3577551 100644
--- a/include/asm-generic/preempt.h
+++ b/include/asm-generic/preempt.h
@@ -7,10 +7,10 @@
static __always_inline int preempt_count(void)
{
- return current_thread_info()->preempt_count;
+ return READ_ONCE(current_thread_info()->preempt_count);
}
-static __always_inline int *preempt_count_ptr(void)
+static __always_inline volatile int *preempt_count_ptr(void)
{
return &current_thread_info()->preempt_count;
}
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index 35a52a880b2f..6bd05700d8c9 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -28,7 +28,30 @@
*/
static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
{
- return atomic_read(&lock->val);
+ /*
+ * queued_spin_lock_slowpath() can ACQUIRE the lock before
+ * issuing the unordered store that sets _Q_LOCKED_VAL.
+ *
+ * See both smp_cond_acquire() sites for more detail.
+ *
+ * This however means that in code like:
+ *
+ * spin_lock(A) spin_lock(B)
+ * spin_unlock_wait(B) spin_is_locked(A)
+ * do_something() do_something()
+ *
+ * Both CPUs can end up running do_something() because the store
+ * setting _Q_LOCKED_VAL will pass through the loads in
+ * spin_unlock_wait() and/or spin_is_locked().
+ *
+ * Avoid this by issuing a full memory barrier between the spin_lock()
+ * and the loads in spin_unlock_wait() and spin_is_locked().
+ *
+ * Note that regular mutual exclusion doesn't care about this
+ * delayed store.
+ */
+ smp_mb();
+ return atomic_read(&lock->val) & _Q_LOCKED_MASK;
}
/**
@@ -108,6 +131,8 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock)
*/
static inline void queued_spin_unlock_wait(struct qspinlock *lock)
{
+ /* See queued_spin_is_locked() */
+ smp_mb();
while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
cpu_relax();
}
diff --git a/include/asm-generic/rwsem.h b/include/asm-generic/rwsem.h
index d6d5dc98d7da..3fc94a046bf5 100644
--- a/include/asm-generic/rwsem.h
+++ b/include/asm-generic/rwsem.h
@@ -53,7 +53,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
/*
* lock for writing
*/
-static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
+static inline void __down_write(struct rw_semaphore *sem)
{
long tmp;
@@ -63,9 +63,16 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
rwsem_down_write_failed(sem);
}
-static inline void __down_write(struct rw_semaphore *sem)
+static inline int __down_write_killable(struct rw_semaphore *sem)
{
- __down_write_nested(sem, 0);
+ long tmp;
+
+ tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
+ (atomic_long_t *)&sem->count);
+ if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
+ if (IS_ERR(rwsem_down_write_failed_killable(sem)))
+ return -EINTR;
+ return 0;
}
static inline int __down_write_trylock(struct rw_semaphore *sem)
diff --git a/include/asm-generic/seccomp.h b/include/asm-generic/seccomp.h
index c9ccafa0d99a..e74072d23e69 100644
--- a/include/asm-generic/seccomp.h
+++ b/include/asm-generic/seccomp.h
@@ -29,4 +29,18 @@
#define __NR_seccomp_sigreturn __NR_rt_sigreturn
#endif
+#ifdef CONFIG_COMPAT
+#ifndef get_compat_mode1_syscalls
+static inline const int *get_compat_mode1_syscalls(void)
+{
+ static const int mode1_syscalls_32[] = {
+ __NR_seccomp_read_32, __NR_seccomp_write_32,
+ __NR_seccomp_exit_32, __NR_seccomp_sigreturn_32,
+ 0, /* null terminated */
+ };
+ return mode1_syscalls_32;
+}
+#endif
+#endif /* CONFIG_COMPAT */
+
#endif /* _ASM_GENERIC_SECCOMP_H */
diff --git a/include/asm-generic/siginfo.h b/include/asm-generic/siginfo.h
index 3d1a3af5cf59..a2508a8f9a9c 100644
--- a/include/asm-generic/siginfo.h
+++ b/include/asm-generic/siginfo.h
@@ -17,21 +17,6 @@
struct siginfo;
void do_schedule_next_timer(struct siginfo *info);
-#ifndef HAVE_ARCH_COPY_SIGINFO
-
-#include <linux/string.h>
-
-static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
-{
- if (from->si_code < 0)
- memcpy(to, from, sizeof(*to));
- else
- /* _sigchld is currently the largest know union member */
- memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld));
-}
-
-#endif
-
extern int copy_siginfo_to_user(struct siginfo __user *to, const struct siginfo *from);
#endif
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 339125bb4d2c..6a67ab94b553 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -245,7 +245,9 @@
#define INIT_TASK_DATA(align) \
. = ALIGN(align); \
- *(.data..init_task)
+ VMLINUX_SYMBOL(__start_init_task) = .; \
+ *(.data..init_task) \
+ VMLINUX_SYMBOL(__end_init_task) = .;
/*
* Read only Data
diff --git a/include/clocksource/arm_arch_timer.h b/include/clocksource/arm_arch_timer.h
index 25d0914481a2..caedb74c9210 100644
--- a/include/clocksource/arm_arch_timer.h
+++ b/include/clocksource/arm_arch_timer.h
@@ -49,11 +49,16 @@ enum arch_timer_reg {
#define ARCH_TIMER_EVT_STREAM_FREQ 10000 /* 100us */
+struct arch_timer_kvm_info {
+ struct timecounter timecounter;
+ int virtual_irq;
+};
+
#ifdef CONFIG_ARM_ARCH_TIMER
extern u32 arch_timer_get_rate(void);
extern u64 (*arch_timer_read_counter)(void);
-extern struct timecounter *arch_timer_get_timecounter(void);
+extern struct arch_timer_kvm_info *arch_timer_get_kvm_info(void);
#else
@@ -67,11 +72,6 @@ static inline u64 arch_timer_read_counter(void)
return 0;
}
-static inline struct timecounter *arch_timer_get_timecounter(void)
-{
- return NULL;
-}
-
#endif
#endif
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index 957bb8763219..75174f80a106 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -405,8 +405,7 @@ static inline void aead_request_set_tfm(struct aead_request *req,
* encrypt and decrypt API calls. During the allocation, the provided aead
* handle is registered in the request data structure.
*
- * Return: allocated request handle in case of success; IS_ERR() is true in case
- * of an error, PTR_ERR() returns the error code.
+ * Return: allocated request handle in case of success, or NULL if out of memory
*/
static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm,
gfp_t gfp)
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 1969f1416658..26605888a199 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -547,8 +547,7 @@ static inline void ahash_request_set_tfm(struct ahash_request *req,
* the allocation, the provided ahash handle
* is registered in the request data structure.
*
- * Return: allocated request handle in case of success; IS_ERR() is true in case
- * of an error, PTR_ERR() returns the error code.
+ * Return: allocated request handle in case of success, or NULL if out of memory
*/
static inline struct ahash_request *ahash_request_alloc(
struct crypto_ahash *tfm, gfp_t gfp)
diff --git a/include/crypto/pkcs7.h b/include/crypto/pkcs7.h
index 441aff9b5aa7..583f199400a3 100644
--- a/include/crypto/pkcs7.h
+++ b/include/crypto/pkcs7.h
@@ -12,6 +12,7 @@
#ifndef _CRYPTO_PKCS7_H
#define _CRYPTO_PKCS7_H
+#include <linux/verification.h>
#include <crypto/public_key.h>
struct key;
@@ -26,14 +27,13 @@ extern void pkcs7_free_message(struct pkcs7_message *pkcs7);
extern int pkcs7_get_content_data(const struct pkcs7_message *pkcs7,
const void **_data, size_t *_datalen,
- bool want_wrapper);
+ size_t *_headerlen);
/*
* pkcs7_trust.c
*/
extern int pkcs7_validate_trust(struct pkcs7_message *pkcs7,
- struct key *trust_keyring,
- bool *_trusted);
+ struct key *trust_keyring);
/*
* pkcs7_verify.c
diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h
index aa730ea7faf8..882ca0e1e7a5 100644
--- a/include/crypto/public_key.h
+++ b/include/crypto/public_key.h
@@ -15,20 +15,6 @@
#define _LINUX_PUBLIC_KEY_H
/*
- * The use to which an asymmetric key is being put.
- */
-enum key_being_used_for {
- VERIFYING_MODULE_SIGNATURE,
- VERIFYING_FIRMWARE_SIGNATURE,
- VERIFYING_KEXEC_PE_SIGNATURE,
- VERIFYING_KEY_SIGNATURE,
- VERIFYING_KEY_SELF_SIGNATURE,
- VERIFYING_UNSPECIFIED_SIGNATURE,
- NR__KEY_BEING_USED_FOR
-};
-extern const char *const key_being_used_for[NR__KEY_BEING_USED_FOR];
-
-/*
* Cryptographic data for the public-key subtype of the asymmetric key type.
*
* Note that this may include private part of the key as well as the public
@@ -41,12 +27,13 @@ struct public_key {
const char *pkey_algo;
};
-extern void public_key_destroy(void *payload);
+extern void public_key_free(struct public_key *key);
/*
* Public key cryptography signature data
*/
struct public_key_signature {
+ struct asymmetric_key_id *auth_ids[2];
u8 *s; /* Signature */
u32 s_size; /* Number of bytes in signature */
u8 *digest;
@@ -55,17 +42,21 @@ struct public_key_signature {
const char *hash_algo;
};
+extern void public_key_signature_free(struct public_key_signature *sig);
+
extern struct asymmetric_key_subtype public_key_subtype;
+
struct key;
+struct key_type;
+union key_payload;
+
+extern int restrict_link_by_signature(struct key *trust_keyring,
+ const struct key_type *type,
+ const union key_payload *payload);
+
extern int verify_signature(const struct key *key,
const struct public_key_signature *sig);
-struct asymmetric_key_id;
-extern struct key *x509_request_asymmetric_key(struct key *keyring,
- const struct asymmetric_key_id *id,
- const struct asymmetric_key_id *skid,
- bool partial);
-
int public_key_verify_signature(const struct public_key *pkey,
const struct public_key_signature *sig);
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index 905490c1da89..0f987f50bb52 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -425,8 +425,7 @@ static inline struct skcipher_request *skcipher_request_cast(
* encrypt and decrypt API calls. During the allocation, the provided skcipher
* handle is registered in the request data structure.
*
- * Return: allocated request handle in case of success; IS_ERR() is true in case
- * of an error, PTR_ERR() returns the error code.
+ * Return: allocated request handle in case of success, or NULL if out of memory
*/
static inline struct skcipher_request *skcipher_request_alloc(
struct crypto_skcipher *tfm, gfp_t gfp)
diff --git a/include/drm/bridge/analogix_dp.h b/include/drm/bridge/analogix_dp.h
new file mode 100644
index 000000000000..25afb31f0389
--- /dev/null
+++ b/include/drm/bridge/analogix_dp.h
@@ -0,0 +1,41 @@
+/*
+ * Analogix DP (Display Port) Core interface driver.
+ *
+ * Copyright (C) 2015 Rockchip Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#ifndef _ANALOGIX_DP_H_
+#define _ANALOGIX_DP_H_
+
+#include <drm/drm_crtc.h>
+
+enum analogix_dp_devtype {
+ EXYNOS_DP,
+ RK3288_DP,
+};
+
+struct analogix_dp_plat_data {
+ enum analogix_dp_devtype dev_type;
+ struct drm_panel *panel;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+
+ int (*power_on)(struct analogix_dp_plat_data *);
+ int (*power_off)(struct analogix_dp_plat_data *);
+ int (*attach)(struct analogix_dp_plat_data *, struct drm_bridge *,
+ struct drm_connector *);
+ int (*get_modes)(struct analogix_dp_plat_data *);
+};
+
+int analogix_dp_resume(struct device *dev);
+int analogix_dp_suspend(struct device *dev);
+
+int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
+ struct analogix_dp_plat_data *plat_data);
+void analogix_dp_unbind(struct device *dev, struct device *master, void *data);
+
+#endif /* _ANALOGIX_DP_H_ */
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 3c8422c69572..84f1a8eefbdb 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -90,7 +90,7 @@ struct reservation_object;
struct dma_buf_attachment;
/*
- * 4 debug categories are defined:
+ * The following categories are defined:
*
* CORE: Used in the generic drm code: drm_ioctl.c, drm_mm.c, drm_memory.c, ...
* This is the category used by the DRM_DEBUG() macro.
@@ -580,12 +580,21 @@ struct drm_driver {
void (*debugfs_cleanup)(struct drm_minor *minor);
/**
- * Driver-specific constructor for drm_gem_objects, to set up
- * obj->driver_private.
+ * @gem_free_object: deconstructor for drm_gem_objects
*
- * Returns 0 on success.
+ * This is deprecated and should not be used by new drivers. Use
+ * @gem_free_object_unlocked instead.
*/
void (*gem_free_object) (struct drm_gem_object *obj);
+
+ /**
+ * @gem_free_object_unlocked: deconstructor for drm_gem_objects
+ *
+ * This is for drivers which are not encumbered with dev->struct_mutex
+ * legacy locking schemes. Use this hook instead of @gem_free_object.
+ */
+ void (*gem_free_object_unlocked) (struct drm_gem_object *obj);
+
int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
@@ -769,6 +778,7 @@ struct drm_device {
atomic_t buf_alloc; /**< Buffer allocation in progress */
/*@} */
+ struct mutex filelist_mutex;
struct list_head filelist;
/** \name Memory management */
@@ -805,14 +815,6 @@ struct drm_device {
int irq;
/*
- * At load time, disabling the vblank interrupt won't be allowed since
- * old clients may not call the modeset ioctl and therefore misbehave.
- * Once the modeset ioctl *has* been called though, we can safely
- * disable them when unused.
- */
- bool vblank_disable_allowed;
-
- /*
* If true, vblank interrupt will be disabled immediately when the
* refcount drops to zero, as opposed to via the vblank disable
* timer.
diff --git a/include/drm/drm_agpsupport.h b/include/drm/drm_agpsupport.h
index 193ef19dfc5c..b2d912670a7f 100644
--- a/include/drm/drm_agpsupport.h
+++ b/include/drm/drm_agpsupport.h
@@ -37,7 +37,7 @@ struct agp_memory *drm_agp_bind_pages(struct drm_device *dev,
uint32_t type);
struct drm_agp_head *drm_agp_init(struct drm_device *dev);
-void drm_agp_clear(struct drm_device *dev);
+void drm_legacy_agp_clear(struct drm_device *dev);
int drm_agp_acquire(struct drm_device *dev);
int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -93,7 +93,7 @@ static inline struct drm_agp_head *drm_agp_init(struct drm_device *dev)
return NULL;
}
-static inline void drm_agp_clear(struct drm_device *dev)
+static inline void drm_legacy_agp_clear(struct drm_device *dev)
{
}
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index d3eaa5df187a..92c84e9ab09a 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -137,7 +137,7 @@ drm_atomic_clean_old_fb(struct drm_device *dev, unsigned plane_mask, int ret);
int __must_check drm_atomic_check_only(struct drm_atomic_state *state);
int __must_check drm_atomic_commit(struct drm_atomic_state *state);
-int __must_check drm_atomic_async_commit(struct drm_atomic_state *state);
+int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state);
#define for_each_connector_in_state(state, connector, connector_state, __i) \
for ((__i) = 0; \
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index 9054598c9a7a..d473dcc91f54 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -40,8 +40,10 @@ int drm_atomic_helper_check(struct drm_device *dev,
struct drm_atomic_state *state);
int drm_atomic_helper_commit(struct drm_device *dev,
struct drm_atomic_state *state,
- bool async);
+ bool nonblock);
+void drm_atomic_helper_wait_for_fences(struct drm_device *dev,
+ struct drm_atomic_state *state);
bool drm_atomic_helper_framebuffer_changed(struct drm_device *dev,
struct drm_atomic_state *old_state,
struct drm_crtc *crtc);
@@ -108,6 +110,8 @@ int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
uint32_t flags);
int drm_atomic_helper_connector_dpms(struct drm_connector *connector,
int mode);
+struct drm_encoder *
+drm_atomic_helper_best_encoder(struct drm_connector *connector);
/* default implementations for state handling */
void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc);
@@ -115,8 +119,7 @@ void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
struct drm_crtc_state *state);
struct drm_crtc_state *
drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc);
-void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
- struct drm_crtc_state *state);
+void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state);
void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state);
@@ -125,8 +128,7 @@ void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
struct drm_plane_state *state);
struct drm_plane_state *
drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane);
-void __drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
- struct drm_plane_state *state);
+void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state);
void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state);
@@ -142,8 +144,7 @@ struct drm_atomic_state *
drm_atomic_helper_duplicate_state(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
void
-__drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
- struct drm_connector_state *state);
+__drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state);
void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
struct drm_connector_state *state);
void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index e0170bf80bb0..d1559cd04e3d 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -45,20 +45,12 @@ struct drm_clip_rect;
struct device_node;
struct fence;
-#define DRM_MODE_OBJECT_CRTC 0xcccccccc
-#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0
-#define DRM_MODE_OBJECT_ENCODER 0xe0e0e0e0
-#define DRM_MODE_OBJECT_MODE 0xdededede
-#define DRM_MODE_OBJECT_PROPERTY 0xb0b0b0b0
-#define DRM_MODE_OBJECT_FB 0xfbfbfbfb
-#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb
-#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee
-#define DRM_MODE_OBJECT_ANY 0
-
struct drm_mode_object {
uint32_t id;
uint32_t type;
struct drm_object_properties *properties;
+ struct kref refcount;
+ void (*free_cb)(struct kref *kref);
};
#define DRM_OBJECT_MAX_PROPERTY 24
@@ -126,6 +118,14 @@ enum subpixel_order {
#define DRM_COLOR_FORMAT_RGB444 (1<<0)
#define DRM_COLOR_FORMAT_YCRCB444 (1<<1)
#define DRM_COLOR_FORMAT_YCRCB422 (1<<2)
+
+#define DRM_BUS_FLAG_DE_LOW (1<<0)
+#define DRM_BUS_FLAG_DE_HIGH (1<<1)
+/* drive data on pos. edge */
+#define DRM_BUS_FLAG_PIXDATA_POSEDGE (1<<2)
+/* drive data on neg. edge */
+#define DRM_BUS_FLAG_PIXDATA_NEGEDGE (1<<3)
+
/*
* Describes a given display (e.g. CRT or flat panel) and its limitations.
*/
@@ -147,6 +147,7 @@ struct drm_display_info {
const u32 *bus_formats;
unsigned int num_bus_formats;
+ u32 bus_flags;
/* Mask of supported hdmi deep color modes */
u8 edid_hdmi_dc_modes;
@@ -233,8 +234,8 @@ struct drm_framebuffer {
* should be deferred. In cases like this, the driver would like to
* hold a ref to the fb even though it has already been removed from
* userspace perspective.
+ * The refcount is stored inside the mode object.
*/
- struct kref refcount;
/*
* Place on the dev->mode_config.fb_list, access protected by
* dev->mode_config.fb_lock.
@@ -258,7 +259,6 @@ struct drm_framebuffer {
struct drm_property_blob {
struct drm_mode_object base;
struct drm_device *dev;
- struct kref refcount;
struct list_head head_global;
struct list_head head_file;
size_t length;
@@ -1895,7 +1895,7 @@ struct drm_mode_config_funcs {
* drm_atomic_helper_commit(), or one of the exported sub-functions of
* it.
*
- * Asynchronous commits (as indicated with the async parameter) must
+ * Nonblocking commits (as indicated with the nonblock parameter) must
* do any preparatory work which might result in an unsuccessful commit
* in the context of this callback. The only exceptions are hardware
* errors resulting in -EIO. But even in that case the driver must
@@ -1908,7 +1908,7 @@ struct drm_mode_config_funcs {
* The driver must wait for any pending rendering to the new
* framebuffers to complete before executing the flip. It should also
* wait for any pending rendering from other drivers if the underlying
- * buffer is a shared dma-buf. Asynchronous commits must not wait for
+ * buffer is a shared dma-buf. Nonblocking commits must not wait for
* rendering in the context of this callback.
*
* An application can request to be notified when the atomic commit has
@@ -1939,7 +1939,7 @@ struct drm_mode_config_funcs {
*
* 0 on success or one of the below negative error codes:
*
- * - -EBUSY, if an asynchronous updated is requested and there is
+ * - -EBUSY, if a nonblocking updated is requested and there is
* an earlier updated pending. Drivers are allowed to support a queue
* of outstanding updates, but currently no driver supports that.
* Note that drivers must wait for preceding updates to complete if a
@@ -1969,7 +1969,7 @@ struct drm_mode_config_funcs {
*/
int (*atomic_commit)(struct drm_device *dev,
struct drm_atomic_state *state,
- bool async);
+ bool nonblock);
/**
* @atomic_state_alloc:
@@ -2259,8 +2259,9 @@ static inline unsigned drm_connector_index(struct drm_connector *connector)
return connector->connector_id;
}
-/* helper to unplug all connectors from sysfs for device */
-extern void drm_connector_unplug_all(struct drm_device *dev);
+/* helpers to {un}register all connectors from sysfs for device */
+extern int drm_connector_register_all(struct drm_device *dev);
+extern void drm_connector_unregister_all(struct drm_device *dev);
extern int drm_bridge_add(struct drm_bridge *bridge);
extern void drm_bridge_remove(struct drm_bridge *bridge);
@@ -2386,8 +2387,6 @@ extern int drm_framebuffer_init(struct drm_device *dev,
const struct drm_framebuffer_funcs *funcs);
extern struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
uint32_t id);
-extern void drm_framebuffer_unreference(struct drm_framebuffer *fb);
-extern void drm_framebuffer_reference(struct drm_framebuffer *fb);
extern void drm_framebuffer_remove(struct drm_framebuffer *fb);
extern void drm_framebuffer_cleanup(struct drm_framebuffer *fb);
extern void drm_framebuffer_unregister_private(struct drm_framebuffer *fb);
@@ -2445,6 +2444,8 @@ extern int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
int gamma_size);
extern struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
uint32_t id, uint32_t type);
+void drm_mode_object_reference(struct drm_mode_object *obj);
+void drm_mode_object_unreference(struct drm_mode_object *obj);
/* IOCTLs */
extern int drm_mode_getresources(struct drm_device *dev,
@@ -2510,6 +2511,8 @@ extern int drm_edid_header_is_valid(const u8 *raw_edid);
extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid,
bool *edid_corrupt);
extern bool drm_edid_is_valid(struct edid *edid);
+extern void drm_edid_get_monitor_name(struct edid *edid, char *name,
+ int buflen);
extern struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
char topology[8]);
@@ -2577,7 +2580,15 @@ static inline struct drm_encoder *drm_encoder_find(struct drm_device *dev,
return mo ? obj_to_encoder(mo) : NULL;
}
-static inline struct drm_connector *drm_connector_find(struct drm_device *dev,
+/**
+ * drm_connector_lookup - lookup connector object
+ * @dev: DRM device
+ * @id: connector object id
+ *
+ * This function looks up the connector object specified by id
+ * add takes a reference to it.
+ */
+static inline struct drm_connector *drm_connector_lookup(struct drm_device *dev,
uint32_t id)
{
struct drm_mode_object *mo;
@@ -2600,14 +2611,73 @@ static inline struct drm_property *drm_property_find(struct drm_device *dev,
static inline uint32_t drm_color_lut_extract(uint32_t user_input,
uint32_t bit_precision)
{
- uint32_t val = user_input + (1 << (16 - bit_precision - 1));
+ uint32_t val = user_input;
uint32_t max = 0xffff >> (16 - bit_precision);
- val >>= 16 - bit_precision;
+ /* Round only if we're not using full precision. */
+ if (bit_precision < 16) {
+ val += 1UL << (16 - bit_precision - 1);
+ val >>= 16 - bit_precision;
+ }
return clamp_val(val, 0, max);
}
+/**
+ * drm_framebuffer_reference - incr the fb refcnt
+ * @fb: framebuffer
+ *
+ * This functions increments the fb's refcount.
+ */
+static inline void drm_framebuffer_reference(struct drm_framebuffer *fb)
+{
+ drm_mode_object_reference(&fb->base);
+}
+
+/**
+ * drm_framebuffer_unreference - unref a framebuffer
+ * @fb: framebuffer to unref
+ *
+ * This functions decrements the fb's refcount and frees it if it drops to zero.
+ */
+static inline void drm_framebuffer_unreference(struct drm_framebuffer *fb)
+{
+ drm_mode_object_unreference(&fb->base);
+}
+
+/**
+ * drm_framebuffer_read_refcount - read the framebuffer reference count.
+ * @fb: framebuffer
+ *
+ * This functions returns the framebuffer's reference count.
+ */
+static inline uint32_t drm_framebuffer_read_refcount(struct drm_framebuffer *fb)
+{
+ return atomic_read(&fb->base.refcount.refcount);
+}
+
+/**
+ * drm_connector_reference - incr the connector refcnt
+ * @connector: connector
+ *
+ * This function increments the connector's refcount.
+ */
+static inline void drm_connector_reference(struct drm_connector *connector)
+{
+ drm_mode_object_reference(&connector->base);
+}
+
+/**
+ * drm_connector_unreference - unref a connector
+ * @connector: connector to unref
+ *
+ * This function decrements the connector's refcount and frees it if it drops to zero.
+ */
+static inline void drm_connector_unreference(struct drm_connector *connector)
+{
+ drm_mode_object_unreference(&connector->base);
+}
+
/* Plane list iterator for legacy (overlay only) planes. */
#define drm_for_each_legacy_plane(plane, dev) \
list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) \
diff --git a/include/drm/drm_displayid.h b/include/drm/drm_displayid.h
index 623b4e98e748..c0d4df6a606f 100644
--- a/include/drm/drm_displayid.h
+++ b/include/drm/drm_displayid.h
@@ -73,4 +73,21 @@ struct displayid_tiled_block {
u8 topology_id[8];
} __packed;
+struct displayid_detailed_timings_1 {
+ u8 pixel_clock[3];
+ u8 flags;
+ u8 hactive[2];
+ u8 hblank[2];
+ u8 hsync[2];
+ u8 hsw[2];
+ u8 vactive[2];
+ u8 vblank[2];
+ u8 vsync[2];
+ u8 vsw[2];
+} __packed;
+
+struct displayid_detailed_timing_block {
+ struct displayid_block base;
+ struct displayid_detailed_timings_1 timings[0];
+};
#endif
diff --git a/include/drm/drm_dp_dual_mode_helper.h b/include/drm/drm_dp_dual_mode_helper.h
new file mode 100644
index 000000000000..e8a9dfd0e055
--- /dev/null
+++ b/include/drm/drm_dp_dual_mode_helper.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef DRM_DP_DUAL_MODE_HELPER_H
+#define DRM_DP_DUAL_MODE_HELPER_H
+
+#include <linux/types.h>
+
+/*
+ * Optional for type 1 DVI adaptors
+ * Mandatory for type 1 HDMI and type 2 adaptors
+ */
+#define DP_DUAL_MODE_HDMI_ID 0x00 /* 00-0f */
+#define DP_DUAL_MODE_HDMI_ID_LEN 16
+/*
+ * Optional for type 1 adaptors
+ * Mandatory for type 2 adaptors
+ */
+#define DP_DUAL_MODE_ADAPTOR_ID 0x10
+#define DP_DUAL_MODE_REV_MASK 0x07
+#define DP_DUAL_MODE_REV_TYPE2 0x00
+#define DP_DUAL_MODE_TYPE_MASK 0xf0
+#define DP_DUAL_MODE_TYPE_TYPE2 0xa0
+#define DP_DUAL_MODE_IEEE_OUI 0x11 /* 11-13*/
+#define DP_DUAL_IEEE_OUI_LEN 3
+#define DP_DUAL_DEVICE_ID 0x14 /* 14-19 */
+#define DP_DUAL_DEVICE_ID_LEN 6
+#define DP_DUAL_MODE_HARDWARE_REV 0x1a
+#define DP_DUAL_MODE_FIRMWARE_MAJOR_REV 0x1b
+#define DP_DUAL_MODE_FIRMWARE_MINOR_REV 0x1c
+#define DP_DUAL_MODE_MAX_TMDS_CLOCK 0x1d
+#define DP_DUAL_MODE_I2C_SPEED_CAP 0x1e
+#define DP_DUAL_MODE_TMDS_OEN 0x20
+#define DP_DUAL_MODE_TMDS_DISABLE 0x01
+#define DP_DUAL_MODE_HDMI_PIN_CTRL 0x21
+#define DP_DUAL_MODE_CEC_ENABLE 0x01
+#define DP_DUAL_MODE_I2C_SPEED_CTRL 0x22
+
+struct i2c_adapter;
+
+ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter,
+ u8 offset, void *buffer, size_t size);
+ssize_t drm_dp_dual_mode_write(struct i2c_adapter *adapter,
+ u8 offset, const void *buffer, size_t size);
+
+/**
+ * enum drm_dp_dual_mode_type - Type of the DP dual mode adaptor
+ * @DRM_DP_DUAL_MODE_NONE: No DP dual mode adaptor
+ * @DRM_DP_DUAL_MODE_UNKNOWN: Could be either none or type 1 DVI adaptor
+ * @DRM_DP_DUAL_MODE_TYPE1_DVI: Type 1 DVI adaptor
+ * @DRM_DP_DUAL_MODE_TYPE1_HDMI: Type 1 HDMI adaptor
+ * @DRM_DP_DUAL_MODE_TYPE2_DVI: Type 2 DVI adaptor
+ * @DRM_DP_DUAL_MODE_TYPE2_HDMI: Type 2 HDMI adaptor
+ */
+enum drm_dp_dual_mode_type {
+ DRM_DP_DUAL_MODE_NONE,
+ DRM_DP_DUAL_MODE_UNKNOWN,
+ DRM_DP_DUAL_MODE_TYPE1_DVI,
+ DRM_DP_DUAL_MODE_TYPE1_HDMI,
+ DRM_DP_DUAL_MODE_TYPE2_DVI,
+ DRM_DP_DUAL_MODE_TYPE2_HDMI,
+};
+
+enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter);
+int drm_dp_dual_mode_max_tmds_clock(enum drm_dp_dual_mode_type type,
+ struct i2c_adapter *adapter);
+int drm_dp_dual_mode_get_tmds_output(enum drm_dp_dual_mode_type type,
+ struct i2c_adapter *adapter, bool *enabled);
+int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type,
+ struct i2c_adapter *adapter, bool enable);
+const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type);
+
+#endif
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 1252108da0ef..9d03f167007b 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -73,6 +73,7 @@
# define DP_ENHANCED_FRAME_CAP (1 << 7)
#define DP_MAX_DOWNSPREAD 0x003
+# define DP_MAX_DOWNSPREAD_0_5 (1 << 0)
# define DP_NO_AUX_HANDSHAKE_LINK_TRAINING (1 << 6)
#define DP_NORP 0x004
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index dec6221e8198..919933d1beb4 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -328,7 +328,15 @@ int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb);
int drm_av_sync_delay(struct drm_connector *connector,
const struct drm_display_mode *mode);
struct drm_connector *drm_select_eld(struct drm_encoder *encoder);
+
+#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
int drm_load_edid_firmware(struct drm_connector *connector);
+#else
+static inline int drm_load_edid_firmware(struct drm_connector *connector)
+{
+ return 0;
+}
+#endif
int
drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h
index be62bd321e75..fd0dde9f0a6d 100644
--- a/include/drm/drm_fb_cma_helper.h
+++ b/include/drm/drm_fb_cma_helper.h
@@ -4,11 +4,18 @@
struct drm_fbdev_cma;
struct drm_gem_cma_object;
+struct drm_fb_helper_surface_size;
+struct drm_framebuffer_funcs;
+struct drm_fb_helper_funcs;
struct drm_framebuffer;
+struct drm_fb_helper;
struct drm_device;
struct drm_file;
struct drm_mode_fb_cmd2;
+struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
+ unsigned int preferred_bpp, unsigned int num_crtc,
+ unsigned int max_conn_count, const struct drm_fb_helper_funcs *funcs);
struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
unsigned int preferred_bpp, unsigned int num_crtc,
unsigned int max_conn_count);
@@ -16,7 +23,17 @@ void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma);
void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma);
void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma);
+int drm_fbdev_cma_create_with_funcs(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes,
+ const struct drm_framebuffer_funcs *funcs);
+
+void drm_fb_cma_destroy(struct drm_framebuffer *fb);
+int drm_fb_cma_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv, unsigned int *handle);
+struct drm_framebuffer *drm_fb_cma_create_with_funcs(struct drm_device *dev,
+ struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd,
+ const struct drm_framebuffer_funcs *funcs);
struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev,
struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd);
@@ -24,6 +41,8 @@ struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
unsigned int plane);
#ifdef CONFIG_DEBUG_FS
+struct seq_file;
+
int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg);
#endif
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 062723bdcabe..5b4aa35026a3 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -172,6 +172,10 @@ struct drm_fb_helper_connector {
* @funcs: driver callbacks for fb helper
* @fbdev: emulated fbdev device info struct
* @pseudo_palette: fake palette of 16 colors
+ * @dirty_clip: clip rectangle used with deferred_io to accumulate damage to
+ * the screen buffer
+ * @dirty_lock: spinlock protecting @dirty_clip
+ * @dirty_work: worker used to flush the framebuffer
*
* This is the main structure used by the fbdev helpers. Drivers supporting
* fbdev emulation should embedded this into their overall driver structure.
@@ -189,6 +193,9 @@ struct drm_fb_helper {
const struct drm_fb_helper_funcs *funcs;
struct fb_info *fbdev;
u32 pseudo_palette[17];
+ struct drm_clip_rect dirty_clip;
+ spinlock_t dirty_lock;
+ struct work_struct dirty_work;
/**
* @kernel_fb_list:
@@ -245,6 +252,9 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper);
+void drm_fb_helper_deferred_io(struct fb_info *info,
+ struct list_head *pagelist);
+
ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf,
size_t count, loff_t *ppos);
ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf,
@@ -368,6 +378,11 @@ static inline void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper)
{
}
+static inline void drm_fb_helper_deferred_io(struct fb_info *info,
+ struct list_head *pagelist)
+{
+}
+
static inline ssize_t drm_fb_helper_sys_read(struct fb_info *info,
char __user *buf, size_t count,
loff_t *ppos)
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 0b3e11ab8757..fca1cd1b9c26 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -200,47 +200,29 @@ drm_gem_object_reference(struct drm_gem_object *obj)
}
/**
- * drm_gem_object_unreference - release a GEM BO reference
+ * __drm_gem_object_unreference - raw function to release a GEM BO reference
* @obj: GEM buffer object
*
- * This releases a reference to @obj. Callers must hold the dev->struct_mutex
- * lock when calling this function, even when the driver doesn't use
- * dev->struct_mutex for anything.
+ * This function is meant to be used by drivers which are not encumbered with
+ * dev->struct_mutex legacy locking and which are using the
+ * gem_free_object_unlocked callback. It avoids all the locking checks and
+ * locking overhead of drm_gem_object_unreference() and
+ * drm_gem_object_unreference_unlocked().
*
- * For drivers not encumbered with legacy locking use
- * drm_gem_object_unreference_unlocked() instead.
+ * Drivers should never call this directly in their code. Instead they should
+ * wrap it up into a driver_gem_object_unreference(struct driver_gem_object
+ * *obj) wrapper function, and use that. Shared code should never call this, to
+ * avoid breaking drivers by accident which still depend upon dev->struct_mutex
+ * locking.
*/
static inline void
-drm_gem_object_unreference(struct drm_gem_object *obj)
+__drm_gem_object_unreference(struct drm_gem_object *obj)
{
- if (obj != NULL) {
- WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
-
- kref_put(&obj->refcount, drm_gem_object_free);
- }
+ kref_put(&obj->refcount, drm_gem_object_free);
}
-/**
- * drm_gem_object_unreference_unlocked - release a GEM BO reference
- * @obj: GEM buffer object
- *
- * This releases a reference to @obj. Callers must not hold the
- * dev->struct_mutex lock when calling this function.
- */
-static inline void
-drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
-{
- struct drm_device *dev;
-
- if (!obj)
- return;
-
- dev = obj->dev;
- if (kref_put_mutex(&obj->refcount, drm_gem_object_free, &dev->struct_mutex))
- mutex_unlock(&dev->struct_mutex);
- else
- might_lock(&dev->struct_mutex);
-}
+void drm_gem_object_unreference_unlocked(struct drm_gem_object *obj);
+void drm_gem_object_unreference(struct drm_gem_object *obj);
int drm_gem_handle_create(struct drm_file *file_priv,
struct drm_gem_object *obj,
@@ -256,9 +238,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj);
void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
bool dirty, bool accessed);
-struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
- struct drm_file *filp,
- u32 handle);
+struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp, u32 handle);
int drm_gem_dumb_destroy(struct drm_file *file,
struct drm_device *dev,
uint32_t handle);
diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h
index 3e698038dc7b..a5ef2c7e40f8 100644
--- a/include/drm/drm_legacy.h
+++ b/include/drm/drm_legacy.h
@@ -154,8 +154,10 @@ struct drm_map_list {
int drm_legacy_addmap(struct drm_device *d, resource_size_t offset,
unsigned int size, enum drm_map_type type,
enum drm_map_flags flags, struct drm_local_map **map_p);
-int drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map);
+void drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map);
int drm_legacy_rmmap_locked(struct drm_device *d, struct drm_local_map *map);
+void drm_legacy_master_rmmaps(struct drm_device *dev,
+ struct drm_master *master);
struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev);
int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma);
diff --git a/include/drm/drm_mem_util.h b/include/drm/drm_mem_util.h
index e42495ad8136..70d4e221a3ad 100644
--- a/include/drm/drm_mem_util.h
+++ b/include/drm/drm_mem_util.h
@@ -54,6 +54,25 @@ static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size)
GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
}
+static __inline__ void *drm_malloc_gfp(size_t nmemb, size_t size, gfp_t gfp)
+{
+ if (size != 0 && nmemb > SIZE_MAX / size)
+ return NULL;
+
+ if (size * nmemb <= PAGE_SIZE)
+ return kmalloc(nmemb * size, gfp);
+
+ if (gfp & __GFP_RECLAIMABLE) {
+ void *ptr = kmalloc(nmemb * size,
+ gfp | __GFP_NOWARN | __GFP_NORETRY);
+ if (ptr)
+ return ptr;
+ }
+
+ return __vmalloc(size * nmemb,
+ gfp | __GFP_HIGHMEM, PAGE_KERNEL);
+}
+
static __inline void drm_free_large(void *ptr)
{
kvfree(ptr);
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index b61c2d45192e..d4619dc2eecb 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -672,7 +672,7 @@ struct drm_connector_helper_funcs {
* fixed panel can also manually add specific modes using
* drm_mode_probed_add(). Drivers which manually add modes should also
* make sure that the @display_info, @width_mm and @height_mm fields of the
- * struct #drm_connector are filled in.
+ * struct &drm_connector are filled in.
*
* Virtual drivers that just want some standard VESA mode with a given
* resolution can call drm_add_modes_noedid(), and mark the preferred
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index 13ff44b28893..220d1e2b3db1 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -75,6 +75,14 @@ struct drm_panel_funcs {
struct display_timing *timings);
};
+/**
+ * struct drm_panel - DRM panel object
+ * @drm: DRM device owning the panel
+ * @connector: DRM connector that the panel is attached to
+ * @dev: parent device of the panel
+ * @funcs: operations that can be performed on the panel
+ * @list: panel entry in registry
+ */
struct drm_panel {
struct drm_device *drm;
struct drm_connector *connector;
@@ -85,6 +93,17 @@ struct drm_panel {
struct list_head list;
};
+/**
+ * drm_disable_unprepare - power off a panel
+ * @panel: DRM panel
+ *
+ * Calling this function will completely power off a panel (assert the panel's
+ * reset, turn off power supplies, ...). After this function has completed, it
+ * is usually no longer possible to communicate with the panel until another
+ * call to drm_panel_prepare().
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
static inline int drm_panel_unprepare(struct drm_panel *panel)
{
if (panel && panel->funcs && panel->funcs->unprepare)
@@ -93,6 +112,16 @@ static inline int drm_panel_unprepare(struct drm_panel *panel)
return panel ? -ENOSYS : -EINVAL;
}
+/**
+ * drm_panel_disable - disable a panel
+ * @panel: DRM panel
+ *
+ * This will typically turn off the panel's backlight or disable the display
+ * drivers. For smart panels it should still be possible to communicate with
+ * the integrated circuitry via any command bus after this call.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
static inline int drm_panel_disable(struct drm_panel *panel)
{
if (panel && panel->funcs && panel->funcs->disable)
@@ -101,6 +130,16 @@ static inline int drm_panel_disable(struct drm_panel *panel)
return panel ? -ENOSYS : -EINVAL;
}
+/**
+ * drm_panel_prepare - power on a panel
+ * @panel: DRM panel
+ *
+ * Calling this function will enable power and deassert any reset signals to
+ * the panel. After this has completed it is possible to communicate with any
+ * integrated circuitry via a command bus.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
static inline int drm_panel_prepare(struct drm_panel *panel)
{
if (panel && panel->funcs && panel->funcs->prepare)
@@ -109,6 +148,16 @@ static inline int drm_panel_prepare(struct drm_panel *panel)
return panel ? -ENOSYS : -EINVAL;
}
+/**
+ * drm_panel_enable - enable a panel
+ * @panel: DRM panel
+ *
+ * Calling this function will cause the panel display drivers to be turned on
+ * and the backlight to be enabled. Content will be visible on screen after
+ * this call completes.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
static inline int drm_panel_enable(struct drm_panel *panel)
{
if (panel && panel->funcs && panel->funcs->enable)
@@ -117,6 +166,16 @@ static inline int drm_panel_enable(struct drm_panel *panel)
return panel ? -ENOSYS : -EINVAL;
}
+/**
+ * drm_panel_get_modes - probe the available display modes of a panel
+ * @panel: DRM panel
+ *
+ * The modes probed from the panel are automatically added to the connector
+ * that the panel is attached to.
+ *
+ * Return: The number of modes available from the panel on success or a
+ * negative error code on failure.
+ */
static inline int drm_panel_get_modes(struct drm_panel *panel)
{
if (panel && panel->funcs && panel->funcs->get_modes)
diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h
index 2f63dd5e05eb..06ea8e077ec2 100644
--- a/include/drm/drm_vma_manager.h
+++ b/include/drm/drm_vma_manager.h
@@ -176,19 +176,6 @@ static inline unsigned long drm_vma_node_size(struct drm_vma_offset_node *node)
}
/**
- * drm_vma_node_has_offset() - Check whether node is added to offset manager
- * @node: Node to be checked
- *
- * RETURNS:
- * true iff the node was previously allocated an offset and added to
- * an vma offset manager.
- */
-static inline bool drm_vma_node_has_offset(struct drm_vma_offset_node *node)
-{
- return drm_mm_node_allocated(&node->vm_node);
-}
-
-/**
* drm_vma_node_offset_addr() - Return sanitized offset for user-space mmaps
* @node: Linked offset node
*
@@ -220,7 +207,7 @@ static inline __u64 drm_vma_node_offset_addr(struct drm_vma_offset_node *node)
static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node,
struct address_space *file_mapping)
{
- if (drm_vma_node_has_offset(node))
+ if (drm_mm_node_allocated(&node->vm_node))
unmap_mapping_range(file_mapping,
drm_vma_node_offset_addr(node),
drm_vma_node_size(node) << PAGE_SHIFT, 1);
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 055a08ddac02..c801d9028e37 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -314,7 +314,7 @@ ttm_bo_reference(struct ttm_buffer_object *bo)
* Returns -EBUSY if no_wait is true and the buffer is busy.
* Returns -ERESTARTSYS if interrupted by a signal.
*/
-extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
+extern int ttm_bo_wait(struct ttm_buffer_object *bo,
bool interruptible, bool no_wait);
/**
* ttm_bo_validate
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 3d4bf08aa21f..513f7f96b80a 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -434,6 +434,18 @@ struct ttm_bo_driver {
*/
int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
+
+ /**
+ * Optional driver callback for when BO is removed from the LRU.
+ * Called with LRU lock held immediately before the removal.
+ */
+ void (*lru_removal)(struct ttm_buffer_object *bo);
+
+ /**
+ * Return the list_head after which a BO should be inserted in the LRU.
+ */
+ struct list_head *(*lru_tail)(struct ttm_buffer_object *bo);
+ struct list_head *(*swap_lru_tail)(struct ttm_buffer_object *bo);
};
/**
@@ -502,7 +514,6 @@ struct ttm_bo_global {
* @vma_manager: Address space manager
* lru_lock: Spinlock that protects the buffer+device lru lists and
* ddestroy lists.
- * @val_seq: Current validation sequence.
* @dev_mapping: A pointer to the struct address_space representing the
* device address space.
* @wq: Work queue structure for the delayed delete workqueue.
@@ -528,7 +539,6 @@ struct ttm_bo_device {
* Protected by the global:lru lock.
*/
struct list_head ddestroy;
- uint32_t val_seq;
/*
* Protected by load / firstopen / lastclose /unload sync.
@@ -753,14 +763,16 @@ extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
extern void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo);
extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
+struct list_head *ttm_bo_default_lru_tail(struct ttm_buffer_object *bo);
+struct list_head *ttm_bo_default_swap_lru_tail(struct ttm_buffer_object *bo);
+
/**
* __ttm_bo_reserve:
*
* @bo: A pointer to a struct ttm_buffer_object.
* @interruptible: Sleep interruptible if waiting.
* @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
- * @use_ticket: If @bo is already reserved, Only sleep waiting for
- * it to become unreserved if @ticket->stamp is older.
+ * @ticket: ticket used to acquire the ww_mutex.
*
* Will not remove reserved buffers from the lru lists.
* Otherwise identical to ttm_bo_reserve.
@@ -776,8 +788,7 @@ extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
* be returned if @use_ticket is set to true.
*/
static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo,
- bool interruptible,
- bool no_wait, bool use_ticket,
+ bool interruptible, bool no_wait,
struct ww_acquire_ctx *ticket)
{
int ret = 0;
@@ -806,8 +817,7 @@ static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo,
* @bo: A pointer to a struct ttm_buffer_object.
* @interruptible: Sleep interruptible if waiting.
* @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
- * @use_ticket: If @bo is already reserved, Only sleep waiting for
- * it to become unreserved if @ticket->stamp is older.
+ * @ticket: ticket used to acquire the ww_mutex.
*
* Locks a buffer object for validation. (Or prevents other processes from
* locking it for validation) and removes it from lru lists, while taking
@@ -846,15 +856,14 @@ static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo,
* be returned if @use_ticket is set to true.
*/
static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
- bool interruptible,
- bool no_wait, bool use_ticket,
+ bool interruptible, bool no_wait,
struct ww_acquire_ctx *ticket)
{
int ret;
WARN_ON(!atomic_read(&bo->kref.refcount));
- ret = __ttm_bo_reserve(bo, interruptible, no_wait, use_ticket, ticket);
+ ret = __ttm_bo_reserve(bo, interruptible, no_wait, ticket);
if (likely(ret == 0))
ttm_bo_del_sub_from_lru(bo);
@@ -1030,8 +1039,7 @@ extern pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
-#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
-#define TTM_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
#include <linux/agp_backend.h>
/**
diff --git a/include/dt-bindings/clock/ath79-clk.h b/include/dt-bindings/clock/ath79-clk.h
new file mode 100644
index 000000000000..27359ad83904
--- /dev/null
+++ b/include/dt-bindings/clock/ath79-clk.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2014, 2016 Antony Pavlov <antonynpavlov@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __DT_BINDINGS_ATH79_CLK_H
+#define __DT_BINDINGS_ATH79_CLK_H
+
+#define ATH79_CLK_CPU 0
+#define ATH79_CLK_DDR 1
+#define ATH79_CLK_AHB 2
+
+#define ATH79_CLK_END 3
+
+#endif /* __DT_BINDINGS_ATH79_CLK_H */
diff --git a/include/dt-bindings/clock/axis,artpec6-clkctrl.h b/include/dt-bindings/clock/axis,artpec6-clkctrl.h
new file mode 100644
index 000000000000..f9f04dccc996
--- /dev/null
+++ b/include/dt-bindings/clock/axis,artpec6-clkctrl.h
@@ -0,0 +1,38 @@
+/*
+ * ARTPEC-6 clock controller indexes
+ *
+ * Copyright 2016 Axis Comunications AB.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef DT_BINDINGS_CLK_ARTPEC6_CLKCTRL_H
+#define DT_BINDINGS_CLK_ARTPEC6_CLKCTRL_H
+
+#define ARTPEC6_CLK_CPU 0
+#define ARTPEC6_CLK_CPU_PERIPH 1
+#define ARTPEC6_CLK_NAND_CLKA 2
+#define ARTPEC6_CLK_NAND_CLKB 3
+#define ARTPEC6_CLK_ETH_ACLK 4
+#define ARTPEC6_CLK_DMA_ACLK 5
+#define ARTPEC6_CLK_PTP_REF 6
+#define ARTPEC6_CLK_SD_PCLK 7
+#define ARTPEC6_CLK_SD_IMCLK 8
+#define ARTPEC6_CLK_I2S_HST 9
+#define ARTPEC6_CLK_I2S0_CLK 10
+#define ARTPEC6_CLK_I2S1_CLK 11
+#define ARTPEC6_CLK_UART_PCLK 12
+#define ARTPEC6_CLK_UART_REFCLK 13
+#define ARTPEC6_CLK_I2C 14
+#define ARTPEC6_CLK_SPI_PCLK 15
+#define ARTPEC6_CLK_SPI_SSPCLK 16
+#define ARTPEC6_CLK_SYS_TIMER 17
+#define ARTPEC6_CLK_FRACDIV_IN 18
+#define ARTPEC6_CLK_DBG_PCLK 19
+
+/* This must be the highest clock index plus one. */
+#define ARTPEC6_CLK_NUMCLOCKS 20
+
+#endif
diff --git a/include/dt-bindings/clock/bcm2835.h b/include/dt-bindings/clock/bcm2835.h
index 61f1d20c2a67..360e00cefd35 100644
--- a/include/dt-bindings/clock/bcm2835.h
+++ b/include/dt-bindings/clock/bcm2835.h
@@ -44,5 +44,23 @@
#define BCM2835_CLOCK_EMMC 28
#define BCM2835_CLOCK_PERI_IMAGE 29
#define BCM2835_CLOCK_PWM 30
+#define BCM2835_CLOCK_PCM 31
-#define BCM2835_CLOCK_COUNT 31
+#define BCM2835_PLLA_DSI0 32
+#define BCM2835_PLLA_CCP2 33
+#define BCM2835_PLLD_DSI0 34
+#define BCM2835_PLLD_DSI1 35
+
+#define BCM2835_CLOCK_AVEO 36
+#define BCM2835_CLOCK_DFT 37
+#define BCM2835_CLOCK_GP0 38
+#define BCM2835_CLOCK_GP1 39
+#define BCM2835_CLOCK_GP2 40
+#define BCM2835_CLOCK_SLIM 41
+#define BCM2835_CLOCK_SMI 42
+#define BCM2835_CLOCK_TEC 43
+#define BCM2835_CLOCK_DPI 44
+#define BCM2835_CLOCK_CAM0 45
+#define BCM2835_CLOCK_CAM1 46
+#define BCM2835_CLOCK_DSI0E 47
+#define BCM2835_CLOCK_DSI1E 48
diff --git a/include/dt-bindings/clock/exynos3250.h b/include/dt-bindings/clock/exynos3250.h
index 63d01c15d2b3..c796ff02ceeb 100644
--- a/include/dt-bindings/clock/exynos3250.h
+++ b/include/dt-bindings/clock/exynos3250.h
@@ -79,6 +79,8 @@
#define CLK_MOUT_CORE 58
#define CLK_MOUT_APLL 59
#define CLK_MOUT_ACLK_266_SUB 60
+#define CLK_MOUT_UART2 61
+#define CLK_MOUT_MMC2 62
/* Dividers */
#define CLK_DIV_GPL 64
@@ -127,6 +129,9 @@
#define CLK_DIV_CORE 107
#define CLK_DIV_HPM 108
#define CLK_DIV_COPY 109
+#define CLK_DIV_UART2 110
+#define CLK_DIV_MMC2_PRE 111
+#define CLK_DIV_MMC2 112
/* Gates */
#define CLK_ASYNC_G3D 128
@@ -223,6 +228,8 @@
#define CLK_BLOCK_MFC 219
#define CLK_BLOCK_CAM 220
#define CLK_SMIES 221
+#define CLK_UART2 222
+#define CLK_SDMMC2 223
/* Special clocks */
#define CLK_SCLK_JPEG 224
@@ -249,12 +256,14 @@
#define CLK_SCLK_SPI0 245
#define CLK_SCLK_UART1 246
#define CLK_SCLK_UART0 247
+#define CLK_SCLK_UART2 248
+#define CLK_SCLK_MMC2 249
/*
* Total number of clocks of main CMU.
* NOTE: Must be equal to last clock ID increased by one.
*/
-#define CLK_NR_CLKS 248
+#define CLK_NR_CLKS 250
/*
* CMU DMC
diff --git a/include/dt-bindings/clock/exynos5420.h b/include/dt-bindings/clock/exynos5420.h
index 7699ee9c16c0..17ab8394bec7 100644
--- a/include/dt-bindings/clock/exynos5420.h
+++ b/include/dt-bindings/clock/exynos5420.h
@@ -217,8 +217,30 @@
/* divider clocks */
#define CLK_DOUT_PIXEL 768
+#define CLK_DOUT_ACLK400_WCORE 769
+#define CLK_DOUT_ACLK400_ISP 770
+#define CLK_DOUT_ACLK400_MSCL 771
+#define CLK_DOUT_ACLK200 772
+#define CLK_DOUT_ACLK200_FSYS2 773
+#define CLK_DOUT_ACLK100_NOC 774
+#define CLK_DOUT_PCLK200_FSYS 775
+#define CLK_DOUT_ACLK200_FSYS 776
+#define CLK_DOUT_ACLK333_432_GSCL 777
+#define CLK_DOUT_ACLK333_432_ISP 778
+#define CLK_DOUT_ACLK66 779
+#define CLK_DOUT_ACLK333_432_ISP0 780
+#define CLK_DOUT_ACLK266 781
+#define CLK_DOUT_ACLK166 782
+#define CLK_DOUT_ACLK333 783
+#define CLK_DOUT_ACLK333_G2D 784
+#define CLK_DOUT_ACLK266_G2D 785
+#define CLK_DOUT_ACLK_G3D 786
+#define CLK_DOUT_ACLK300_JPEG 787
+#define CLK_DOUT_ACLK300_DISP1 788
+#define CLK_DOUT_ACLK300_GSCL 789
+#define CLK_DOUT_ACLK400_DISP1 790
/* must be greater than maximal clock id */
-#define CLK_NR_CLKS 769
+#define CLK_NR_CLKS 791
#endif /* _DT_BINDINGS_CLOCK_EXYNOS_5420_H */
diff --git a/include/dt-bindings/clock/hi3519-clock.h b/include/dt-bindings/clock/hi3519-clock.h
new file mode 100644
index 000000000000..14f4d2184e5a
--- /dev/null
+++ b/include/dt-bindings/clock/hi3519-clock.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2015 HiSilicon Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __DTS_HI3519_CLOCK_H
+#define __DTS_HI3519_CLOCK_H
+
+#define HI3519_FMC_CLK 1
+#define HI3519_SPI0_CLK 2
+#define HI3519_SPI1_CLK 3
+#define HI3519_SPI2_CLK 4
+#define HI3519_UART0_CLK 5
+#define HI3519_UART1_CLK 6
+#define HI3519_UART2_CLK 7
+#define HI3519_UART3_CLK 8
+#define HI3519_UART4_CLK 9
+#define HI3519_PWM_CLK 10
+#define HI3519_DMA_CLK 11
+#define HI3519_IR_CLK 12
+#define HI3519_ETH_PHY_CLK 13
+#define HI3519_ETH_MAC_CLK 14
+#define HI3519_ETH_MACIF_CLK 15
+#define HI3519_USB2_BUS_CLK 16
+#define HI3519_USB2_PORT_CLK 17
+#define HI3519_USB3_CLK 18
+
+#endif /* __DTS_HI3519_CLOCK_H */
diff --git a/include/dt-bindings/clock/imx7d-clock.h b/include/dt-bindings/clock/imx7d-clock.h
index edca8985c50e..1183347c383f 100644
--- a/include/dt-bindings/clock/imx7d-clock.h
+++ b/include/dt-bindings/clock/imx7d-clock.h
@@ -448,5 +448,6 @@
#define IMX7D_PLL_DRAM_TEST_DIV 435
#define IMX7D_ADC_ROOT_CLK 436
#define IMX7D_CLK_ARM 437
-#define IMX7D_CLK_END 438
+#define IMX7D_CKIL 438
+#define IMX7D_CLK_END 439
#endif /* __DT_BINDINGS_CLOCK_IMX7D_H */
diff --git a/include/dt-bindings/clock/microchip,pic32-clock.h b/include/dt-bindings/clock/microchip,pic32-clock.h
new file mode 100644
index 000000000000..184647a6a8de
--- /dev/null
+++ b/include/dt-bindings/clock/microchip,pic32-clock.h
@@ -0,0 +1,42 @@
+/*
+ * Purna Chandra Mandal,<purna.mandal@microchip.com>
+ * Copyright (C) 2015 Microchip Technology Inc. All rights reserved.
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MICROCHIP_PIC32_H_
+#define _DT_BINDINGS_CLK_MICROCHIP_PIC32_H_
+
+/* clock output indices */
+#define POSCCLK 0
+#define FRCCLK 1
+#define BFRCCLK 2
+#define LPRCCLK 3
+#define SOSCCLK 4
+#define FRCDIVCLK 5
+#define PLLCLK 6
+#define SCLK 7
+#define PB1CLK 8
+#define PB2CLK 9
+#define PB3CLK 10
+#define PB4CLK 11
+#define PB5CLK 12
+#define PB6CLK 13
+#define PB7CLK 14
+#define REF1CLK 15
+#define REF2CLK 16
+#define REF3CLK 17
+#define REF4CLK 18
+#define REF5CLK 19
+#define UPLLCLK 20
+#define MAXCLKS 21
+
+#endif /* _DT_BINDINGS_CLK_MICROCHIP_PIC32_H_ */
diff --git a/include/dt-bindings/clock/mt8173-clk.h b/include/dt-bindings/clock/mt8173-clk.h
index 7956ba1bc974..6094bf7e50ab 100644
--- a/include/dt-bindings/clock/mt8173-clk.h
+++ b/include/dt-bindings/clock/mt8173-clk.h
@@ -176,7 +176,8 @@
#define CLK_APMIXED_LVDSPLL 13
#define CLK_APMIXED_MSDCPLL2 14
#define CLK_APMIXED_REF2USB_TX 15
-#define CLK_APMIXED_NR_CLK 16
+#define CLK_APMIXED_HDMI_REF 16
+#define CLK_APMIXED_NR_CLK 17
/* INFRA_SYS */
diff --git a/include/dt-bindings/clock/r8a7790-clock.h b/include/dt-bindings/clock/r8a7790-clock.h
index 7b1ad8922eec..fa5e8da809f2 100644
--- a/include/dt-bindings/clock/r8a7790-clock.h
+++ b/include/dt-bindings/clock/r8a7790-clock.h
@@ -66,6 +66,7 @@
#define R8A7790_CLK_IIC2 0
#define R8A7790_CLK_TPU0 4
#define R8A7790_CLK_MMCIF1 5
+#define R8A7790_CLK_SCIF2 10
#define R8A7790_CLK_SDHI3 11
#define R8A7790_CLK_SDHI2 12
#define R8A7790_CLK_SDHI1 13
diff --git a/include/dt-bindings/clock/r8a7794-clock.h b/include/dt-bindings/clock/r8a7794-clock.h
index f843de6bf377..4d3ecd626c1f 100644
--- a/include/dt-bindings/clock/r8a7794-clock.h
+++ b/include/dt-bindings/clock/r8a7794-clock.h
@@ -21,6 +21,7 @@
#define R8A7794_CLK_SDH 6
#define R8A7794_CLK_SD0 7
#define R8A7794_CLK_Z 8
+#define R8A7794_CLK_RCAN 9
/* MSTP0 */
#define R8A7794_CLK_MSIOF0 0
@@ -56,6 +57,8 @@
#define R8A7794_CLK_SDHI1 12
#define R8A7794_CLK_SDHI0 14
#define R8A7794_CLK_MMCIF0 15
+#define R8A7794_CLK_IIC0 18
+#define R8A7794_CLK_IIC1 23
#define R8A7794_CLK_CMT1 29
#define R8A7794_CLK_USBDMAC0 30
#define R8A7794_CLK_USBDMAC1 31
@@ -95,6 +98,8 @@
#define R8A7794_CLK_GPIO2 10
#define R8A7794_CLK_GPIO1 11
#define R8A7794_CLK_GPIO0 12
+#define R8A7794_CLK_RCAN1 15
+#define R8A7794_CLK_RCAN0 16
#define R8A7794_CLK_QSPI_MOD 17
#define R8A7794_CLK_I2C5 25
#define R8A7794_CLK_I2C4 27
diff --git a/include/dt-bindings/clock/rk3399-cru.h b/include/dt-bindings/clock/rk3399-cru.h
new file mode 100644
index 000000000000..50a44cffb070
--- /dev/null
+++ b/include/dt-bindings/clock/rk3399-cru.h
@@ -0,0 +1,755 @@
+/*
+ * Copyright (c) 2016 Rockchip Electronics Co. Ltd.
+ * Author: Xing Zheng <zhengxing@rock-chips.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3399_H
+#define _DT_BINDINGS_CLK_ROCKCHIP_RK3399_H
+
+/* core clocks */
+#define PLL_APLLL 1
+#define PLL_APLLB 2
+#define PLL_DPLL 3
+#define PLL_CPLL 4
+#define PLL_GPLL 5
+#define PLL_NPLL 6
+#define PLL_VPLL 7
+#define ARMCLKL 8
+#define ARMCLKB 9
+
+/* sclk gates (special clocks) */
+#define SCLK_I2C1 65
+#define SCLK_I2C2 66
+#define SCLK_I2C3 67
+#define SCLK_I2C5 68
+#define SCLK_I2C6 69
+#define SCLK_I2C7 70
+#define SCLK_SPI0 71
+#define SCLK_SPI1 72
+#define SCLK_SPI2 73
+#define SCLK_SPI4 74
+#define SCLK_SPI5 75
+#define SCLK_SDMMC 76
+#define SCLK_SDIO 77
+#define SCLK_EMMC 78
+#define SCLK_TSADC 79
+#define SCLK_SARADC 80
+#define SCLK_UART0 81
+#define SCLK_UART1 82
+#define SCLK_UART2 83
+#define SCLK_UART3 84
+#define SCLK_SPDIF_8CH 85
+#define SCLK_I2S0_8CH 86
+#define SCLK_I2S1_8CH 87
+#define SCLK_I2S2_8CH 88
+#define SCLK_I2S_8CH_OUT 89
+#define SCLK_TIMER00 90
+#define SCLK_TIMER01 91
+#define SCLK_TIMER02 92
+#define SCLK_TIMER03 93
+#define SCLK_TIMER04 94
+#define SCLK_TIMER05 95
+#define SCLK_TIMER06 96
+#define SCLK_TIMER07 97
+#define SCLK_TIMER08 98
+#define SCLK_TIMER09 99
+#define SCLK_TIMER10 100
+#define SCLK_TIMER11 101
+#define SCLK_MACREF 102
+#define SCLK_MAC_RX 103
+#define SCLK_MAC_TX 104
+#define SCLK_MAC 105
+#define SCLK_MACREF_OUT 106
+#define SCLK_VOP0_PWM 107
+#define SCLK_VOP1_PWM 108
+#define SCLK_RGA_CORE 109
+#define SCLK_ISP0 110
+#define SCLK_ISP1 111
+#define SCLK_HDMI_CEC 112
+#define SCLK_HDMI_SFR 113
+#define SCLK_DP_CORE 114
+#define SCLK_PVTM_CORE_L 115
+#define SCLK_PVTM_CORE_B 116
+#define SCLK_PVTM_GPU 117
+#define SCLK_PVTM_DDR 118
+#define SCLK_MIPIDPHY_REF 119
+#define SCLK_MIPIDPHY_CFG 120
+#define SCLK_HSICPHY 121
+#define SCLK_USBPHY480M 122
+#define SCLK_USB2PHY0_REF 123
+#define SCLK_USB2PHY1_REF 124
+#define SCLK_UPHY0_TCPDPHY_REF 125
+#define SCLK_UPHY0_TCPDCORE 126
+#define SCLK_UPHY1_TCPDPHY_REF 127
+#define SCLK_UPHY1_TCPDCORE 128
+#define SCLK_USB3OTG0_REF 129
+#define SCLK_USB3OTG1_REF 130
+#define SCLK_USB3OTG0_SUSPEND 131
+#define SCLK_USB3OTG1_SUSPEND 132
+#define SCLK_CRYPTO0 133
+#define SCLK_CRYPTO1 134
+#define SCLK_CCI_TRACE 135
+#define SCLK_CS 136
+#define SCLK_CIF_OUT 137
+#define SCLK_PCIEPHY_REF 138
+#define SCLK_PCIE_CORE 139
+#define SCLK_M0_PERILP 140
+#define SCLK_M0_PERILP_DEC 141
+#define SCLK_CM0S 142
+#define SCLK_DBG_NOC 143
+#define SCLK_DBG_PD_CORE_B 144
+#define SCLK_DBG_PD_CORE_L 145
+#define SCLK_DFIMON0_TIMER 146
+#define SCLK_DFIMON1_TIMER 147
+#define SCLK_INTMEM0 148
+#define SCLK_INTMEM1 149
+#define SCLK_INTMEM2 150
+#define SCLK_INTMEM3 151
+#define SCLK_INTMEM4 152
+#define SCLK_INTMEM5 153
+#define SCLK_SDMMC_DRV 154
+#define SCLK_SDMMC_SAMPLE 155
+#define SCLK_SDIO_DRV 156
+#define SCLK_SDIO_SAMPLE 157
+#define SCLK_VDU_CORE 158
+#define SCLK_VDU_CA 159
+#define SCLK_PCIE_PM 160
+#define SCLK_SPDIF_REC_DPTX 161
+#define SCLK_DPHY_PLL 162
+#define SCLK_DPHY_TX0_CFG 163
+#define SCLK_DPHY_TX1RX1_CFG 164
+#define SCLK_DPHY_RX0_CFG 165
+#define SCLK_RMII_SRC 166
+#define SCLK_PCIEPHY_REF100M 167
+
+#define DCLK_VOP0 180
+#define DCLK_VOP1 181
+#define DCLK_VOP0_DIV 182
+#define DCLK_VOP1_DIV 183
+#define DCLK_M0_PERILP 184
+
+#define FCLK_CM0S 190
+
+/* aclk gates */
+#define ACLK_PERIHP 192
+#define ACLK_PERIHP_NOC 193
+#define ACLK_PERILP0 194
+#define ACLK_PERILP0_NOC 195
+#define ACLK_PERF_PCIE 196
+#define ACLK_PCIE 197
+#define ACLK_INTMEM 198
+#define ACLK_TZMA 199
+#define ACLK_DCF 200
+#define ACLK_CCI 201
+#define ACLK_CCI_NOC0 202
+#define ACLK_CCI_NOC1 203
+#define ACLK_CCI_GRF 204
+#define ACLK_CENTER 205
+#define ACLK_CENTER_MAIN_NOC 206
+#define ACLK_CENTER_PERI_NOC 207
+#define ACLK_GPU 208
+#define ACLK_PERF_GPU 209
+#define ACLK_GPU_GRF 210
+#define ACLK_DMAC0_PERILP 211
+#define ACLK_DMAC1_PERILP 212
+#define ACLK_GMAC 213
+#define ACLK_GMAC_NOC 214
+#define ACLK_PERF_GMAC 215
+#define ACLK_VOP0_NOC 216
+#define ACLK_VOP0 217
+#define ACLK_VOP1_NOC 218
+#define ACLK_VOP1 219
+#define ACLK_RGA 220
+#define ACLK_RGA_NOC 221
+#define ACLK_HDCP 222
+#define ACLK_HDCP_NOC 223
+#define ACLK_HDCP22 224
+#define ACLK_IEP 225
+#define ACLK_IEP_NOC 226
+#define ACLK_VIO 227
+#define ACLK_VIO_NOC 228
+#define ACLK_ISP0 229
+#define ACLK_ISP1 230
+#define ACLK_ISP0_NOC 231
+#define ACLK_ISP1_NOC 232
+#define ACLK_ISP0_WRAPPER 233
+#define ACLK_ISP1_WRAPPER 234
+#define ACLK_VCODEC 235
+#define ACLK_VCODEC_NOC 236
+#define ACLK_VDU 237
+#define ACLK_VDU_NOC 238
+#define ACLK_PERI 239
+#define ACLK_EMMC 240
+#define ACLK_EMMC_CORE 241
+#define ACLK_EMMC_NOC 242
+#define ACLK_EMMC_GRF 243
+#define ACLK_USB3 244
+#define ACLK_USB3_NOC 245
+#define ACLK_USB3OTG0 246
+#define ACLK_USB3OTG1 247
+#define ACLK_USB3_RKSOC_AXI_PERF 248
+#define ACLK_USB3_GRF 249
+#define ACLK_GIC 250
+#define ACLK_GIC_NOC 251
+#define ACLK_GIC_ADB400_CORE_L_2_GIC 252
+#define ACLK_GIC_ADB400_CORE_B_2_GIC 253
+#define ACLK_GIC_ADB400_GIC_2_CORE_L 254
+#define ACLK_GIC_ADB400_GIC_2_CORE_B 255
+#define ACLK_CORE_ADB400_CORE_L_2_CCI500 256
+#define ACLK_CORE_ADB400_CORE_B_2_CCI500 257
+#define ACLK_ADB400M_PD_CORE_L 258
+#define ACLK_ADB400M_PD_CORE_B 259
+#define ACLK_PERF_CORE_L 260
+#define ACLK_PERF_CORE_B 261
+#define ACLK_GIC_PRE 262
+#define ACLK_VOP0_PRE 263
+#define ACLK_VOP1_PRE 264
+
+/* pclk gates */
+#define PCLK_PERIHP 320
+#define PCLK_PERIHP_NOC 321
+#define PCLK_PERILP0 322
+#define PCLK_PERILP1 323
+#define PCLK_PERILP1_NOC 324
+#define PCLK_PERILP_SGRF 325
+#define PCLK_PERIHP_GRF 326
+#define PCLK_PCIE 327
+#define PCLK_SGRF 328
+#define PCLK_INTR_ARB 329
+#define PCLK_CENTER_MAIN_NOC 330
+#define PCLK_CIC 331
+#define PCLK_COREDBG_B 332
+#define PCLK_COREDBG_L 333
+#define PCLK_DBG_CXCS_PD_CORE_B 334
+#define PCLK_DCF 335
+#define PCLK_GPIO2 336
+#define PCLK_GPIO3 337
+#define PCLK_GPIO4 338
+#define PCLK_GRF 339
+#define PCLK_HSICPHY 340
+#define PCLK_I2C1 341
+#define PCLK_I2C2 342
+#define PCLK_I2C3 343
+#define PCLK_I2C5 344
+#define PCLK_I2C6 345
+#define PCLK_I2C7 346
+#define PCLK_SPI0 347
+#define PCLK_SPI1 348
+#define PCLK_SPI2 349
+#define PCLK_SPI4 350
+#define PCLK_SPI5 351
+#define PCLK_UART0 352
+#define PCLK_UART1 353
+#define PCLK_UART2 354
+#define PCLK_UART3 355
+#define PCLK_TSADC 356
+#define PCLK_SARADC 357
+#define PCLK_GMAC 358
+#define PCLK_GMAC_NOC 359
+#define PCLK_TIMER0 360
+#define PCLK_TIMER1 361
+#define PCLK_EDP 362
+#define PCLK_EDP_NOC 363
+#define PCLK_EDP_CTRL 364
+#define PCLK_VIO 365
+#define PCLK_VIO_NOC 366
+#define PCLK_VIO_GRF 367
+#define PCLK_MIPI_DSI0 368
+#define PCLK_MIPI_DSI1 369
+#define PCLK_HDCP 370
+#define PCLK_HDCP_NOC 371
+#define PCLK_HDMI_CTRL 372
+#define PCLK_DP_CTRL 373
+#define PCLK_HDCP22 374
+#define PCLK_GASKET 375
+#define PCLK_DDR 376
+#define PCLK_DDR_MON 377
+#define PCLK_DDR_SGRF 378
+#define PCLK_ISP1_WRAPPER 379
+#define PCLK_WDT 380
+#define PCLK_EFUSE1024NS 381
+#define PCLK_EFUSE1024S 382
+#define PCLK_PMU_INTR_ARB 383
+#define PCLK_MAILBOX0 384
+#define PCLK_USBPHY_MUX_G 385
+#define PCLK_UPHY0_TCPHY_G 386
+#define PCLK_UPHY0_TCPD_G 387
+#define PCLK_UPHY1_TCPHY_G 388
+#define PCLK_UPHY1_TCPD_G 389
+#define PCLK_ALIVE 390
+
+/* hclk gates */
+#define HCLK_PERIHP 448
+#define HCLK_PERILP0 449
+#define HCLK_PERILP1 450
+#define HCLK_PERILP0_NOC 451
+#define HCLK_PERILP1_NOC 452
+#define HCLK_M0_PERILP 453
+#define HCLK_M0_PERILP_NOC 454
+#define HCLK_AHB1TOM 455
+#define HCLK_HOST0 456
+#define HCLK_HOST0_ARB 457
+#define HCLK_HOST1 458
+#define HCLK_HOST1_ARB 459
+#define HCLK_HSIC 460
+#define HCLK_SD 461
+#define HCLK_SDMMC 462
+#define HCLK_SDMMC_NOC 463
+#define HCLK_M_CRYPTO0 464
+#define HCLK_M_CRYPTO1 465
+#define HCLK_S_CRYPTO0 466
+#define HCLK_S_CRYPTO1 467
+#define HCLK_I2S0_8CH 468
+#define HCLK_I2S1_8CH 469
+#define HCLK_I2S2_8CH 470
+#define HCLK_SPDIF 471
+#define HCLK_VOP0_NOC 472
+#define HCLK_VOP0 473
+#define HCLK_VOP1_NOC 474
+#define HCLK_VOP1 475
+#define HCLK_ROM 476
+#define HCLK_IEP 477
+#define HCLK_IEP_NOC 478
+#define HCLK_ISP0 479
+#define HCLK_ISP1 480
+#define HCLK_ISP0_NOC 481
+#define HCLK_ISP1_NOC 482
+#define HCLK_ISP0_WRAPPER 483
+#define HCLK_ISP1_WRAPPER 484
+#define HCLK_RGA 485
+#define HCLK_RGA_NOC 486
+#define HCLK_HDCP 487
+#define HCLK_HDCP_NOC 488
+#define HCLK_HDCP22 489
+#define HCLK_VCODEC 490
+#define HCLK_VCODEC_NOC 491
+#define HCLK_VDU 492
+#define HCLK_VDU_NOC 493
+#define HCLK_SDIO 494
+#define HCLK_SDIO_NOC 495
+#define HCLK_SDIOAUDIO_NOC 496
+
+#define CLK_NR_CLKS (HCLK_SDIOAUDIO_NOC + 1)
+
+/* pmu-clocks indices */
+
+#define PLL_PPLL 1
+
+#define SCLK_32K_SUSPEND_PMU 2
+#define SCLK_SPI3_PMU 3
+#define SCLK_TIMER12_PMU 4
+#define SCLK_TIMER13_PMU 5
+#define SCLK_UART4_PMU 6
+#define SCLK_PVTM_PMU 7
+#define SCLK_WIFI_PMU 8
+#define SCLK_I2C0_PMU 9
+#define SCLK_I2C4_PMU 10
+#define SCLK_I2C8_PMU 11
+
+#define PCLK_SRC_PMU 19
+#define PCLK_PMU 20
+#define PCLK_PMUGRF_PMU 21
+#define PCLK_INTMEM1_PMU 22
+#define PCLK_GPIO0_PMU 23
+#define PCLK_GPIO1_PMU 24
+#define PCLK_SGRF_PMU 25
+#define PCLK_NOC_PMU 26
+#define PCLK_I2C0_PMU 27
+#define PCLK_I2C4_PMU 28
+#define PCLK_I2C8_PMU 29
+#define PCLK_RKPWM_PMU 30
+#define PCLK_SPI3_PMU 31
+#define PCLK_TIMER_PMU 32
+#define PCLK_MAILBOX_PMU 33
+#define PCLK_UART4_PMU 34
+#define PCLK_WDT_M0_PMU 35
+
+#define FCLK_CM0S_SRC_PMU 44
+#define FCLK_CM0S_PMU 45
+#define SCLK_CM0S_PMU 46
+#define HCLK_CM0S_PMU 47
+#define DCLK_CM0S_PMU 48
+#define PCLK_INTR_ARB_PMU 49
+#define HCLK_NOC_PMU 50
+
+#define CLKPMU_NR_CLKS (HCLK_NOC_PMU + 1)
+
+/* soft-reset indices */
+
+/* cru_softrst_con0 */
+#define SRST_CORE_L0 0
+#define SRST_CORE_B0 1
+#define SRST_CORE_PO_L0 2
+#define SRST_CORE_PO_B0 3
+#define SRST_L2_L 4
+#define SRST_L2_B 5
+#define SRST_ADB_L 6
+#define SRST_ADB_B 7
+#define SRST_A_CCI 8
+#define SRST_A_CCIM0_NOC 9
+#define SRST_A_CCIM1_NOC 10
+#define SRST_DBG_NOC 11
+
+/* cru_softrst_con1 */
+#define SRST_CORE_L0_T 16
+#define SRST_CORE_L1 17
+#define SRST_CORE_L2 18
+#define SRST_CORE_L3 19
+#define SRST_CORE_PO_L0_T 20
+#define SRST_CORE_PO_L1 21
+#define SRST_CORE_PO_L2 22
+#define SRST_CORE_PO_L3 23
+#define SRST_A_ADB400_GIC2COREL 24
+#define SRST_A_ADB400_COREL2GIC 25
+#define SRST_P_DBG_L 26
+#define SRST_L2_L_T 28
+#define SRST_ADB_L_T 29
+#define SRST_A_RKPERF_L 30
+#define SRST_PVTM_CORE_L 31
+
+/* cru_softrst_con2 */
+#define SRST_CORE_B0_T 32
+#define SRST_CORE_B1 33
+#define SRST_CORE_PO_B0_T 36
+#define SRST_CORE_PO_B1 37
+#define SRST_A_ADB400_GIC2COREB 40
+#define SRST_A_ADB400_COREB2GIC 41
+#define SRST_P_DBG_B 42
+#define SRST_L2_B_T 43
+#define SRST_ADB_B_T 45
+#define SRST_A_RKPERF_B 46
+#define SRST_PVTM_CORE_B 47
+
+/* cru_softrst_con3 */
+#define SRST_A_CCI_T 50
+#define SRST_A_CCIM0_NOC_T 51
+#define SRST_A_CCIM1_NOC_T 52
+#define SRST_A_ADB400M_PD_CORE_B_T 53
+#define SRST_A_ADB400M_PD_CORE_L_T 54
+#define SRST_DBG_NOC_T 55
+#define SRST_DBG_CXCS 56
+#define SRST_CCI_TRACE 57
+#define SRST_P_CCI_GRF 58
+
+/* cru_softrst_con4 */
+#define SRST_A_CENTER_MAIN_NOC 64
+#define SRST_A_CENTER_PERI_NOC 65
+#define SRST_P_CENTER_MAIN 66
+#define SRST_P_DDRMON 67
+#define SRST_P_CIC 68
+#define SRST_P_CENTER_SGRF 69
+#define SRST_DDR0_MSCH 70
+#define SRST_DDRCFG0_MSCH 71
+#define SRST_DDR0 72
+#define SRST_DDRPHY0 73
+#define SRST_DDR1_MSCH 74
+#define SRST_DDRCFG1_MSCH 75
+#define SRST_DDR1 76
+#define SRST_DDRPHY1 77
+#define SRST_DDR_CIC 78
+#define SRST_PVTM_DDR 79
+
+/* cru_softrst_con5 */
+#define SRST_A_VCODEC_NOC 80
+#define SRST_A_VCODEC 81
+#define SRST_H_VCODEC_NOC 82
+#define SRST_H_VCODEC 83
+#define SRST_A_VDU_NOC 88
+#define SRST_A_VDU 89
+#define SRST_H_VDU_NOC 90
+#define SRST_H_VDU 91
+#define SRST_VDU_CORE 92
+#define SRST_VDU_CA 93
+
+/* cru_softrst_con6 */
+#define SRST_A_IEP_NOC 96
+#define SRST_A_VOP_IEP 97
+#define SRST_A_IEP 98
+#define SRST_H_IEP_NOC 99
+#define SRST_H_IEP 100
+#define SRST_A_RGA_NOC 102
+#define SRST_A_RGA 103
+#define SRST_H_RGA_NOC 104
+#define SRST_H_RGA 105
+#define SRST_RGA_CORE 106
+#define SRST_EMMC_NOC 108
+#define SRST_EMMC 109
+#define SRST_EMMC_GRF 110
+
+/* cru_softrst_con7 */
+#define SRST_A_PERIHP_NOC 112
+#define SRST_P_PERIHP_GRF 113
+#define SRST_H_PERIHP_NOC 114
+#define SRST_USBHOST0 115
+#define SRST_HOSTC0_AUX 116
+#define SRST_HOST0_ARB 117
+#define SRST_USBHOST1 118
+#define SRST_HOSTC1_AUX 119
+#define SRST_HOST1_ARB 120
+#define SRST_SDIO0 121
+#define SRST_SDMMC 122
+#define SRST_HSIC 123
+#define SRST_HSIC_AUX 124
+#define SRST_AHB1TOM 125
+#define SRST_P_PERIHP_NOC 126
+#define SRST_HSICPHY 127
+
+/* cru_softrst_con8 */
+#define SRST_A_PCIE 128
+#define SRST_P_PCIE 129
+#define SRST_PCIE_CORE 130
+#define SRST_PCIE_MGMT 131
+#define SRST_PCIE_MGMT_STICKY 132
+#define SRST_PCIE_PIPE 133
+#define SRST_PCIE_PM 134
+#define SRST_PCIEPHY 135
+#define SRST_A_GMAC_NOC 136
+#define SRST_A_GMAC 137
+#define SRST_P_GMAC_NOC 138
+#define SRST_P_GMAC_GRF 140
+#define SRST_HSICPHY_POR 142
+#define SRST_HSICPHY_UTMI 143
+
+/* cru_softrst_con9 */
+#define SRST_USB2PHY0_POR 144
+#define SRST_USB2PHY0_UTMI_PORT0 145
+#define SRST_USB2PHY0_UTMI_PORT1 146
+#define SRST_USB2PHY0_EHCIPHY 147
+#define SRST_UPHY0_PIPE_L00 148
+#define SRST_UPHY0 149
+#define SRST_UPHY0_TCPDPWRUP 150
+#define SRST_USB2PHY1_POR 152
+#define SRST_USB2PHY1_UTMI_PORT0 153
+#define SRST_USB2PHY1_UTMI_PORT1 154
+#define SRST_USB2PHY1_EHCIPHY 155
+#define SRST_UPHY1_PIPE_L00 156
+#define SRST_UPHY1 157
+#define SRST_UPHY1_TCPDPWRUP 158
+
+/* cru_softrst_con10 */
+#define SRST_A_PERILP0_NOC 160
+#define SRST_A_DCF 161
+#define SRST_GIC500 162
+#define SRST_DMAC0_PERILP0 163
+#define SRST_DMAC1_PERILP0 164
+#define SRST_TZMA 165
+#define SRST_INTMEM 166
+#define SRST_ADB400_MST0 167
+#define SRST_ADB400_MST1 168
+#define SRST_ADB400_SLV0 169
+#define SRST_ADB400_SLV1 170
+#define SRST_H_PERILP0 171
+#define SRST_H_PERILP0_NOC 172
+#define SRST_ROM 173
+#define SRST_CRYPTO_S 174
+#define SRST_CRYPTO_M 175
+
+/* cru_softrst_con11 */
+#define SRST_P_DCF 176
+#define SRST_CM0S_NOC 177
+#define SRST_CM0S 178
+#define SRST_CM0S_DBG 179
+#define SRST_CM0S_PO 180
+#define SRST_CRYPTO 181
+#define SRST_P_PERILP1_SGRF 182
+#define SRST_P_PERILP1_GRF 183
+#define SRST_CRYPTO1_S 184
+#define SRST_CRYPTO1_M 185
+#define SRST_CRYPTO1 186
+#define SRST_GIC_NOC 188
+#define SRST_SD_NOC 189
+#define SRST_SDIOAUDIO_BRG 190
+
+/* cru_softrst_con12 */
+#define SRST_H_PERILP1 192
+#define SRST_H_PERILP1_NOC 193
+#define SRST_H_I2S0_8CH 194
+#define SRST_H_I2S1_8CH 195
+#define SRST_H_I2S2_8CH 196
+#define SRST_H_SPDIF_8CH 197
+#define SRST_P_PERILP1_NOC 198
+#define SRST_P_EFUSE_1024 199
+#define SRST_P_EFUSE_1024S 200
+#define SRST_P_I2C0 201
+#define SRST_P_I2C1 202
+#define SRST_P_I2C2 203
+#define SRST_P_I2C3 204
+#define SRST_P_I2C4 205
+#define SRST_P_I2C5 206
+#define SRST_P_MAILBOX0 207
+
+/* cru_softrst_con13 */
+#define SRST_P_UART0 208
+#define SRST_P_UART1 209
+#define SRST_P_UART2 210
+#define SRST_P_UART3 211
+#define SRST_P_SARADC 212
+#define SRST_P_TSADC 213
+#define SRST_P_SPI0 214
+#define SRST_P_SPI1 215
+#define SRST_P_SPI2 216
+#define SRST_P_SPI3 217
+#define SRST_P_SPI4 218
+#define SRST_SPI0 219
+#define SRST_SPI1 220
+#define SRST_SPI2 221
+#define SRST_SPI3 222
+#define SRST_SPI4 223
+
+/* cru_softrst_con14 */
+#define SRST_I2S0_8CH 224
+#define SRST_I2S1_8CH 225
+#define SRST_I2S2_8CH 226
+#define SRST_SPDIF_8CH 227
+#define SRST_UART0 228
+#define SRST_UART1 229
+#define SRST_UART2 230
+#define SRST_UART3 231
+#define SRST_TSADC 232
+#define SRST_I2C0 233
+#define SRST_I2C1 234
+#define SRST_I2C2 235
+#define SRST_I2C3 236
+#define SRST_I2C4 237
+#define SRST_I2C5 238
+#define SRST_SDIOAUDIO_NOC 239
+
+/* cru_softrst_con15 */
+#define SRST_A_VIO_NOC 240
+#define SRST_A_HDCP_NOC 241
+#define SRST_A_HDCP 242
+#define SRST_H_HDCP_NOC 243
+#define SRST_H_HDCP 244
+#define SRST_P_HDCP_NOC 245
+#define SRST_P_HDCP 246
+#define SRST_P_HDMI_CTRL 247
+#define SRST_P_DP_CTRL 248
+#define SRST_S_DP_CTRL 249
+#define SRST_C_DP_CTRL 250
+#define SRST_P_MIPI_DSI0 251
+#define SRST_P_MIPI_DSI1 252
+#define SRST_DP_CORE 253
+#define SRST_DP_I2S 254
+
+/* cru_softrst_con16 */
+#define SRST_GASKET 256
+#define SRST_VIO_GRF 258
+#define SRST_DPTX_SPDIF_REC 259
+#define SRST_HDMI_CTRL 260
+#define SRST_HDCP_CTRL 261
+#define SRST_A_ISP0_NOC 262
+#define SRST_A_ISP1_NOC 263
+#define SRST_H_ISP0_NOC 266
+#define SRST_H_ISP1_NOC 267
+#define SRST_H_ISP0 268
+#define SRST_H_ISP1 269
+#define SRST_ISP0 270
+#define SRST_ISP1 271
+
+/* cru_softrst_con17 */
+#define SRST_A_VOP0_NOC 272
+#define SRST_A_VOP1_NOC 273
+#define SRST_A_VOP0 274
+#define SRST_A_VOP1 275
+#define SRST_H_VOP0_NOC 276
+#define SRST_H_VOP1_NOC 277
+#define SRST_H_VOP0 278
+#define SRST_H_VOP1 279
+#define SRST_D_VOP0 280
+#define SRST_D_VOP1 281
+#define SRST_VOP0_PWM 282
+#define SRST_VOP1_PWM 283
+#define SRST_P_EDP_NOC 284
+#define SRST_P_EDP_CTRL 285
+
+/* cru_softrst_con18 */
+#define SRST_A_GPU 288
+#define SRST_A_GPU_NOC 289
+#define SRST_A_GPU_GRF 290
+#define SRST_PVTM_GPU 291
+#define SRST_A_USB3_NOC 292
+#define SRST_A_USB3_OTG0 293
+#define SRST_A_USB3_OTG1 294
+#define SRST_A_USB3_GRF 295
+#define SRST_PMU 296
+
+/* cru_softrst_con19 */
+#define SRST_P_TIMER0_5 304
+#define SRST_TIMER0 305
+#define SRST_TIMER1 306
+#define SRST_TIMER2 307
+#define SRST_TIMER3 308
+#define SRST_TIMER4 309
+#define SRST_TIMER5 310
+#define SRST_P_TIMER6_11 311
+#define SRST_TIMER6 312
+#define SRST_TIMER7 313
+#define SRST_TIMER8 314
+#define SRST_TIMER9 315
+#define SRST_TIMER10 316
+#define SRST_TIMER11 317
+#define SRST_P_INTR_ARB_PMU 318
+#define SRST_P_ALIVE_SGRF 319
+
+/* cru_softrst_con20 */
+#define SRST_P_GPIO2 320
+#define SRST_P_GPIO3 321
+#define SRST_P_GPIO4 322
+#define SRST_P_GRF 323
+#define SRST_P_ALIVE_NOC 324
+#define SRST_P_WDT0 325
+#define SRST_P_WDT1 326
+#define SRST_P_INTR_ARB 327
+#define SRST_P_UPHY0_DPTX 328
+#define SRST_P_UPHY0_APB 330
+#define SRST_P_UPHY0_TCPHY 332
+#define SRST_P_UPHY1_TCPHY 333
+#define SRST_P_UPHY0_TCPDCTRL 334
+#define SRST_P_UPHY1_TCPDCTRL 335
+
+/* pmu soft-reset indices */
+
+/* pmu_cru_softrst_con0 */
+#define SRST_P_NOC 0
+#define SRST_P_INTMEM 1
+#define SRST_H_CM0S 2
+#define SRST_H_CM0S_NOC 3
+#define SRST_DBG_CM0S 4
+#define SRST_PO_CM0S 5
+#define SRST_P_SPI6 6
+#define SRST_SPI6 7
+#define SRST_P_TIMER_0_1 8
+#define SRST_P_TIMER_0 9
+#define SRST_P_TIMER_1 10
+#define SRST_P_UART4 11
+#define SRST_UART4 12
+#define SRST_P_WDT 13
+
+/* pmu_cru_softrst_con1 */
+#define SRST_P_I2C6 16
+#define SRST_P_I2C7 17
+#define SRST_P_I2C8 18
+#define SRST_P_MAILBOX 19
+#define SRST_P_RKPWM 20
+#define SRST_P_PMUGRF 21
+#define SRST_P_SGRF 22
+#define SRST_P_GPIO0 23
+#define SRST_P_GPIO1 24
+#define SRST_P_CRU 25
+#define SRST_P_INTR 26
+#define SRST_PVTM 27
+#define SRST_I2C6 28
+#define SRST_I2C7 29
+#define SRST_I2C8 30
+
+#endif
diff --git a/include/dt-bindings/clock/tegra210-car.h b/include/dt-bindings/clock/tegra210-car.h
index 0a05b0d36ae7..bd3530e56d46 100644
--- a/include/dt-bindings/clock/tegra210-car.h
+++ b/include/dt-bindings/clock/tegra210-car.h
@@ -346,7 +346,7 @@
#define TEGRA210_CLK_PLL_P_OUT_HSIO 316
#define TEGRA210_CLK_PLL_P_OUT_XUSB 317
#define TEGRA210_CLK_XUSB_SSP_SRC 318
-/* 319 */
+#define TEGRA210_CLK_PLL_RE_OUT1 319
/* 320 */
/* 321 */
/* 322 */
diff --git a/include/dt-bindings/clock/vf610-clock.h b/include/dt-bindings/clock/vf610-clock.h
index 56c16aaea112..45997750c8a0 100644
--- a/include/dt-bindings/clock/vf610-clock.h
+++ b/include/dt-bindings/clock/vf610-clock.h
@@ -194,7 +194,11 @@
#define VF610_PLL7_BYPASS 181
#define VF610_CLK_SNVS 182
#define VF610_CLK_DAP 183
-#define VF610_CLK_OCOTP 184
-#define VF610_CLK_END 185
+#define VF610_CLK_OCOTP 184
+#define VF610_CLK_DDRMC 185
+#define VF610_CLK_WKPU 186
+#define VF610_CLK_TCON0 187
+#define VF610_CLK_TCON1 188
+#define VF610_CLK_END 189
#endif /* __DT_BINDINGS_CLOCK_VF610_H */
diff --git a/include/dt-bindings/gpio/meson-gxbb-gpio.h b/include/dt-bindings/gpio/meson-gxbb-gpio.h
new file mode 100644
index 000000000000..58654fd7aa1e
--- /dev/null
+++ b/include/dt-bindings/gpio/meson-gxbb-gpio.h
@@ -0,0 +1,154 @@
+/*
+ * GPIO definitions for Amlogic Meson GXBB SoCs
+ *
+ * Copyright (C) 2016 Endless Mobile, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _DT_BINDINGS_MESON_GXBB_GPIO_H
+#define _DT_BINDINGS_MESON_GXBB_GPIO_H
+
+#define GPIOAO_0 0
+#define GPIOAO_1 1
+#define GPIOAO_2 2
+#define GPIOAO_3 3
+#define GPIOAO_4 4
+#define GPIOAO_5 5
+#define GPIOAO_6 6
+#define GPIOAO_7 7
+#define GPIOAO_8 8
+#define GPIOAO_9 9
+#define GPIOAO_10 10
+#define GPIOAO_11 11
+#define GPIOAO_12 12
+#define GPIOAO_13 13
+
+#define GPIOZ_0 0
+#define GPIOZ_1 1
+#define GPIOZ_2 2
+#define GPIOZ_3 3
+#define GPIOZ_4 4
+#define GPIOZ_5 5
+#define GPIOZ_6 6
+#define GPIOZ_7 7
+#define GPIOZ_8 8
+#define GPIOZ_9 9
+#define GPIOZ_10 10
+#define GPIOZ_11 11
+#define GPIOZ_12 12
+#define GPIOZ_13 13
+#define GPIOZ_14 14
+#define GPIOZ_15 15
+#define GPIOH_0 16
+#define GPIOH_1 17
+#define GPIOH_2 18
+#define GPIOH_3 19
+#define BOOT_0 20
+#define BOOT_1 21
+#define BOOT_2 22
+#define BOOT_3 23
+#define BOOT_4 24
+#define BOOT_5 25
+#define BOOT_6 26
+#define BOOT_7 27
+#define BOOT_8 28
+#define BOOT_9 29
+#define BOOT_10 30
+#define BOOT_11 31
+#define BOOT_12 32
+#define BOOT_13 33
+#define BOOT_14 34
+#define BOOT_15 35
+#define BOOT_16 36
+#define BOOT_17 37
+#define CARD_0 38
+#define CARD_1 39
+#define CARD_2 40
+#define CARD_3 41
+#define CARD_4 42
+#define CARD_5 43
+#define CARD_6 44
+#define GPIODV_0 45
+#define GPIODV_1 46
+#define GPIODV_2 47
+#define GPIODV_3 48
+#define GPIODV_4 49
+#define GPIODV_5 50
+#define GPIODV_6 51
+#define GPIODV_7 52
+#define GPIODV_8 53
+#define GPIODV_9 54
+#define GPIODV_10 55
+#define GPIODV_11 56
+#define GPIODV_12 57
+#define GPIODV_13 58
+#define GPIODV_14 59
+#define GPIODV_15 60
+#define GPIODV_16 61
+#define GPIODV_17 62
+#define GPIODV_18 63
+#define GPIODV_19 64
+#define GPIODV_20 65
+#define GPIODV_21 66
+#define GPIODV_22 67
+#define GPIODV_23 68
+#define GPIODV_24 69
+#define GPIODV_25 70
+#define GPIODV_26 71
+#define GPIODV_27 72
+#define GPIODV_28 73
+#define GPIODV_29 74
+#define GPIOY_0 75
+#define GPIOY_1 76
+#define GPIOY_2 77
+#define GPIOY_3 78
+#define GPIOY_4 79
+#define GPIOY_5 80
+#define GPIOY_6 81
+#define GPIOY_7 82
+#define GPIOY_8 83
+#define GPIOY_9 84
+#define GPIOY_10 85
+#define GPIOY_11 86
+#define GPIOY_12 87
+#define GPIOY_13 88
+#define GPIOY_14 89
+#define GPIOY_15 90
+#define GPIOY_16 91
+#define GPIOX_0 92
+#define GPIOX_1 93
+#define GPIOX_2 94
+#define GPIOX_3 95
+#define GPIOX_4 96
+#define GPIOX_5 97
+#define GPIOX_6 98
+#define GPIOX_7 99
+#define GPIOX_8 100
+#define GPIOX_9 101
+#define GPIOX_10 102
+#define GPIOX_11 103
+#define GPIOX_12 104
+#define GPIOX_13 105
+#define GPIOX_14 106
+#define GPIOX_15 107
+#define GPIOX_16 108
+#define GPIOX_17 109
+#define GPIOX_18 110
+#define GPIOX_19 111
+#define GPIOX_20 112
+#define GPIOX_21 113
+#define GPIOX_22 114
+#define GPIOCLK_0 115
+#define GPIOCLK_1 116
+#define GPIOCLK_2 117
+#define GPIOCLK_3 118
+#define GPIO_TEST_N 119
+
+#endif
diff --git a/include/dt-bindings/gpio/tegra-gpio.h b/include/dt-bindings/gpio/tegra-gpio.h
index 197dc28b676e..a1c09e88e80b 100644
--- a/include/dt-bindings/gpio/tegra-gpio.h
+++ b/include/dt-bindings/gpio/tegra-gpio.h
@@ -12,40 +12,40 @@
#include <dt-bindings/gpio/gpio.h>
-#define TEGRA_GPIO_BANK_ID_A 0
-#define TEGRA_GPIO_BANK_ID_B 1
-#define TEGRA_GPIO_BANK_ID_C 2
-#define TEGRA_GPIO_BANK_ID_D 3
-#define TEGRA_GPIO_BANK_ID_E 4
-#define TEGRA_GPIO_BANK_ID_F 5
-#define TEGRA_GPIO_BANK_ID_G 6
-#define TEGRA_GPIO_BANK_ID_H 7
-#define TEGRA_GPIO_BANK_ID_I 8
-#define TEGRA_GPIO_BANK_ID_J 9
-#define TEGRA_GPIO_BANK_ID_K 10
-#define TEGRA_GPIO_BANK_ID_L 11
-#define TEGRA_GPIO_BANK_ID_M 12
-#define TEGRA_GPIO_BANK_ID_N 13
-#define TEGRA_GPIO_BANK_ID_O 14
-#define TEGRA_GPIO_BANK_ID_P 15
-#define TEGRA_GPIO_BANK_ID_Q 16
-#define TEGRA_GPIO_BANK_ID_R 17
-#define TEGRA_GPIO_BANK_ID_S 18
-#define TEGRA_GPIO_BANK_ID_T 19
-#define TEGRA_GPIO_BANK_ID_U 20
-#define TEGRA_GPIO_BANK_ID_V 21
-#define TEGRA_GPIO_BANK_ID_W 22
-#define TEGRA_GPIO_BANK_ID_X 23
-#define TEGRA_GPIO_BANK_ID_Y 24
-#define TEGRA_GPIO_BANK_ID_Z 25
-#define TEGRA_GPIO_BANK_ID_AA 26
-#define TEGRA_GPIO_BANK_ID_BB 27
-#define TEGRA_GPIO_BANK_ID_CC 28
-#define TEGRA_GPIO_BANK_ID_DD 29
-#define TEGRA_GPIO_BANK_ID_EE 30
-#define TEGRA_GPIO_BANK_ID_FF 31
+#define TEGRA_GPIO_PORT_A 0
+#define TEGRA_GPIO_PORT_B 1
+#define TEGRA_GPIO_PORT_C 2
+#define TEGRA_GPIO_PORT_D 3
+#define TEGRA_GPIO_PORT_E 4
+#define TEGRA_GPIO_PORT_F 5
+#define TEGRA_GPIO_PORT_G 6
+#define TEGRA_GPIO_PORT_H 7
+#define TEGRA_GPIO_PORT_I 8
+#define TEGRA_GPIO_PORT_J 9
+#define TEGRA_GPIO_PORT_K 10
+#define TEGRA_GPIO_PORT_L 11
+#define TEGRA_GPIO_PORT_M 12
+#define TEGRA_GPIO_PORT_N 13
+#define TEGRA_GPIO_PORT_O 14
+#define TEGRA_GPIO_PORT_P 15
+#define TEGRA_GPIO_PORT_Q 16
+#define TEGRA_GPIO_PORT_R 17
+#define TEGRA_GPIO_PORT_S 18
+#define TEGRA_GPIO_PORT_T 19
+#define TEGRA_GPIO_PORT_U 20
+#define TEGRA_GPIO_PORT_V 21
+#define TEGRA_GPIO_PORT_W 22
+#define TEGRA_GPIO_PORT_X 23
+#define TEGRA_GPIO_PORT_Y 24
+#define TEGRA_GPIO_PORT_Z 25
+#define TEGRA_GPIO_PORT_AA 26
+#define TEGRA_GPIO_PORT_BB 27
+#define TEGRA_GPIO_PORT_CC 28
+#define TEGRA_GPIO_PORT_DD 29
+#define TEGRA_GPIO_PORT_EE 30
+#define TEGRA_GPIO_PORT_FF 31
-#define TEGRA_GPIO(bank, offset) \
- ((TEGRA_GPIO_BANK_ID_##bank * 8) + offset)
+#define TEGRA_GPIO(port, offset) \
+ ((TEGRA_GPIO_PORT_##port * 8) + offset)
#endif
diff --git a/include/dt-bindings/gpio/tegra186-gpio.h b/include/dt-bindings/gpio/tegra186-gpio.h
new file mode 100644
index 000000000000..38001c7023f1
--- /dev/null
+++ b/include/dt-bindings/gpio/tegra186-gpio.h
@@ -0,0 +1,56 @@
+/*
+ * This header provides constants for binding nvidia,tegra186-gpio*.
+ *
+ * The first cell in Tegra's GPIO specifier is the GPIO ID. The macros below
+ * provide names for this.
+ *
+ * The second cell contains standard flag values specified in gpio.h.
+ */
+
+#ifndef _DT_BINDINGS_GPIO_TEGRA_GPIO_H
+#define _DT_BINDINGS_GPIO_TEGRA_GPIO_H
+
+#include <dt-bindings/gpio/gpio.h>
+
+/* GPIOs implemented by main GPIO controller */
+#define TEGRA_MAIN_GPIO_PORT_A 0
+#define TEGRA_MAIN_GPIO_PORT_B 1
+#define TEGRA_MAIN_GPIO_PORT_C 2
+#define TEGRA_MAIN_GPIO_PORT_D 3
+#define TEGRA_MAIN_GPIO_PORT_E 4
+#define TEGRA_MAIN_GPIO_PORT_F 5
+#define TEGRA_MAIN_GPIO_PORT_G 6
+#define TEGRA_MAIN_GPIO_PORT_H 7
+#define TEGRA_MAIN_GPIO_PORT_I 8
+#define TEGRA_MAIN_GPIO_PORT_J 9
+#define TEGRA_MAIN_GPIO_PORT_K 10
+#define TEGRA_MAIN_GPIO_PORT_L 11
+#define TEGRA_MAIN_GPIO_PORT_M 12
+#define TEGRA_MAIN_GPIO_PORT_N 13
+#define TEGRA_MAIN_GPIO_PORT_O 14
+#define TEGRA_MAIN_GPIO_PORT_P 15
+#define TEGRA_MAIN_GPIO_PORT_Q 16
+#define TEGRA_MAIN_GPIO_PORT_R 17
+#define TEGRA_MAIN_GPIO_PORT_T 18
+#define TEGRA_MAIN_GPIO_PORT_X 19
+#define TEGRA_MAIN_GPIO_PORT_Y 20
+#define TEGRA_MAIN_GPIO_PORT_BB 21
+#define TEGRA_MAIN_GPIO_PORT_CC 22
+
+#define TEGRA_MAIN_GPIO(port, offset) \
+ ((TEGRA_MAIN_GPIO_PORT_##port * 8) + offset)
+
+/* GPIOs implemented by AON GPIO controller */
+#define TEGRA_AON_GPIO_PORT_S 0
+#define TEGRA_AON_GPIO_PORT_U 1
+#define TEGRA_AON_GPIO_PORT_V 2
+#define TEGRA_AON_GPIO_PORT_W 3
+#define TEGRA_AON_GPIO_PORT_Z 4
+#define TEGRA_AON_GPIO_PORT_AA 5
+#define TEGRA_AON_GPIO_PORT_EE 6
+#define TEGRA_AON_GPIO_PORT_FF 7
+
+#define TEGRA_AON_GPIO(port, offset) \
+ ((TEGRA_AON_GPIO_PORT_##port * 8) + offset)
+
+#endif
diff --git a/include/dt-bindings/iio/adi,ad5592r.h b/include/dt-bindings/iio/adi,ad5592r.h
new file mode 100644
index 000000000000..c48aca1dcade
--- /dev/null
+++ b/include/dt-bindings/iio/adi,ad5592r.h
@@ -0,0 +1,16 @@
+
+#ifndef _DT_BINDINGS_ADI_AD5592R_H
+#define _DT_BINDINGS_ADI_AD5592R_H
+
+#define CH_MODE_UNUSED 0
+#define CH_MODE_ADC 1
+#define CH_MODE_DAC 2
+#define CH_MODE_DAC_AND_ADC 3
+#define CH_MODE_GPIO 8
+
+#define CH_OFFSTATE_PULLDOWN 0
+#define CH_OFFSTATE_OUT_LOW 1
+#define CH_OFFSTATE_OUT_HIGH 2
+#define CH_OFFSTATE_OUT_TRISTATE 3
+
+#endif /* _DT_BINDINGS_ADI_AD5592R_H */
diff --git a/include/dt-bindings/mfd/arizona.h b/include/dt-bindings/mfd/arizona.h
index c40f665e2712..dedf46ffdb53 100644
--- a/include/dt-bindings/mfd/arizona.h
+++ b/include/dt-bindings/mfd/arizona.h
@@ -110,4 +110,9 @@
#define ARIZONA_ACCDET_MODE_HPM 4
#define ARIZONA_ACCDET_MODE_ADC 7
+#define ARIZONA_GPSW_OPEN 0
+#define ARIZONA_GPSW_CLOSED 1
+#define ARIZONA_GPSW_CLAMP_ENABLED 2
+#define ARIZONA_GPSW_CLAMP_DISABLED 3
+
#endif
diff --git a/include/dt-bindings/mfd/max77620.h b/include/dt-bindings/mfd/max77620.h
new file mode 100644
index 000000000000..b911a0720ccd
--- /dev/null
+++ b/include/dt-bindings/mfd/max77620.h
@@ -0,0 +1,39 @@
+/*
+ * This header provides macros for MAXIM MAX77620 device bindings.
+ *
+ * Copyright (c) 2016, NVIDIA Corporation.
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ */
+
+#ifndef _DT_BINDINGS_MFD_MAX77620_H
+#define _DT_BINDINGS_MFD_MAX77620_H
+
+/* MAX77620 interrupts */
+#define MAX77620_IRQ_TOP_GLBL 0 /* Low-Battery */
+#define MAX77620_IRQ_TOP_SD 1 /* SD power fail */
+#define MAX77620_IRQ_TOP_LDO 2 /* LDO power fail */
+#define MAX77620_IRQ_TOP_GPIO 3 /* GPIO internal int to MAX77620 */
+#define MAX77620_IRQ_TOP_RTC 4 /* RTC */
+#define MAX77620_IRQ_TOP_32K 5 /* 32kHz oscillator */
+#define MAX77620_IRQ_TOP_ONOFF 6 /* ON/OFF oscillator */
+#define MAX77620_IRQ_LBT_MBATLOW 7 /* Thermal alarm status, > 120C */
+#define MAX77620_IRQ_LBT_TJALRM1 8 /* Thermal alarm status, > 120C */
+#define MAX77620_IRQ_LBT_TJALRM2 9 /* Thermal alarm status, > 140C */
+
+/* FPS event source */
+#define MAX77620_FPS_EVENT_SRC_EN0 0
+#define MAX77620_FPS_EVENT_SRC_EN1 1
+#define MAX77620_FPS_EVENT_SRC_SW 2
+
+/* Device state when FPS event LOW */
+#define MAX77620_FPS_INACTIVE_STATE_SLEEP 0
+#define MAX77620_FPS_INACTIVE_STATE_LOW_POWER 1
+
+/* FPS source */
+#define MAX77620_FPS_SRC_0 0
+#define MAX77620_FPS_SRC_1 1
+#define MAX77620_FPS_SRC_2 2
+#define MAX77620_FPS_SRC_NONE 3
+#define MAX77620_FPS_SRC_DEF 4
+
+#endif
diff --git a/include/dt-bindings/pinctrl/hisi.h b/include/dt-bindings/pinctrl/hisi.h
new file mode 100644
index 000000000000..38f1ea879ea1
--- /dev/null
+++ b/include/dt-bindings/pinctrl/hisi.h
@@ -0,0 +1,59 @@
+/*
+ * This header provides constants for hisilicon pinctrl bindings.
+ *
+ * Copyright (c) 2015 Hisilicon Limited.
+ * Copyright (c) 2015 Linaro Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_HISI_H
+#define _DT_BINDINGS_PINCTRL_HISI_H
+
+/* iomg bit definition */
+#define MUX_M0 0
+#define MUX_M1 1
+#define MUX_M2 2
+#define MUX_M3 3
+#define MUX_M4 4
+#define MUX_M5 5
+#define MUX_M6 6
+#define MUX_M7 7
+
+/* iocg bit definition */
+#define PULL_MASK (3)
+#define PULL_DIS (0)
+#define PULL_UP (1 << 0)
+#define PULL_DOWN (1 << 1)
+
+/* drive strength definition */
+#define DRIVE_MASK (7 << 4)
+#define DRIVE1_02MA (0 << 4)
+#define DRIVE1_04MA (1 << 4)
+#define DRIVE1_08MA (2 << 4)
+#define DRIVE1_10MA (3 << 4)
+#define DRIVE2_02MA (0 << 4)
+#define DRIVE2_04MA (1 << 4)
+#define DRIVE2_08MA (2 << 4)
+#define DRIVE2_10MA (3 << 4)
+#define DRIVE3_04MA (0 << 4)
+#define DRIVE3_08MA (1 << 4)
+#define DRIVE3_12MA (2 << 4)
+#define DRIVE3_16MA (3 << 4)
+#define DRIVE3_20MA (4 << 4)
+#define DRIVE3_24MA (5 << 4)
+#define DRIVE3_32MA (6 << 4)
+#define DRIVE3_40MA (7 << 4)
+#define DRIVE4_02MA (0 << 4)
+#define DRIVE4_04MA (2 << 4)
+#define DRIVE4_08MA (4 << 4)
+#define DRIVE4_10MA (6 << 4)
+
+#endif
diff --git a/include/dt-bindings/power/r8a7779-sysc.h b/include/dt-bindings/power/r8a7779-sysc.h
new file mode 100644
index 000000000000..183571da507e
--- /dev/null
+++ b/include/dt-bindings/power/r8a7779-sysc.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A7779_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A7779_SYSC_H__
+
+/*
+ * These power domain indices match the numbers of the interrupt bits
+ * representing the power areas in the various Interrupt Registers
+ * (e.g. SYSCISR, Interrupt Status Register)
+ */
+
+#define R8A7779_PD_ARM1 1
+#define R8A7779_PD_ARM2 2
+#define R8A7779_PD_ARM3 3
+#define R8A7779_PD_SGX 20
+#define R8A7779_PD_VDP 21
+#define R8A7779_PD_IMP 24
+
+/* Always-on power area */
+#define R8A7779_PD_ALWAYS_ON 32
+
+#endif /* __DT_BINDINGS_POWER_R8A7779_SYSC_H__ */
diff --git a/include/dt-bindings/power/r8a7790-sysc.h b/include/dt-bindings/power/r8a7790-sysc.h
new file mode 100644
index 000000000000..6af4e9929bd0
--- /dev/null
+++ b/include/dt-bindings/power/r8a7790-sysc.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A7790_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A7790_SYSC_H__
+
+/*
+ * These power domain indices match the numbers of the interrupt bits
+ * representing the power areas in the various Interrupt Registers
+ * (e.g. SYSCISR, Interrupt Status Register)
+ */
+
+#define R8A7790_PD_CA15_CPU0 0
+#define R8A7790_PD_CA15_CPU1 1
+#define R8A7790_PD_CA15_CPU2 2
+#define R8A7790_PD_CA15_CPU3 3
+#define R8A7790_PD_CA7_CPU0 5
+#define R8A7790_PD_CA7_CPU1 6
+#define R8A7790_PD_CA7_CPU2 7
+#define R8A7790_PD_CA7_CPU3 8
+#define R8A7790_PD_CA15_SCU 12
+#define R8A7790_PD_SH_4A 16
+#define R8A7790_PD_RGX 20
+#define R8A7790_PD_CA7_SCU 21
+#define R8A7790_PD_IMP 24
+
+/* Always-on power area */
+#define R8A7790_PD_ALWAYS_ON 32
+
+#endif /* __DT_BINDINGS_POWER_R8A7790_SYSC_H__ */
diff --git a/include/dt-bindings/power/r8a7791-sysc.h b/include/dt-bindings/power/r8a7791-sysc.h
new file mode 100644
index 000000000000..1403baa0514f
--- /dev/null
+++ b/include/dt-bindings/power/r8a7791-sysc.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A7791_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A7791_SYSC_H__
+
+/*
+ * These power domain indices match the numbers of the interrupt bits
+ * representing the power areas in the various Interrupt Registers
+ * (e.g. SYSCISR, Interrupt Status Register)
+ */
+
+#define R8A7791_PD_CA15_CPU0 0
+#define R8A7791_PD_CA15_CPU1 1
+#define R8A7791_PD_CA15_SCU 12
+#define R8A7791_PD_SH_4A 16
+#define R8A7791_PD_SGX 20
+
+/* Always-on power area */
+#define R8A7791_PD_ALWAYS_ON 32
+
+#endif /* __DT_BINDINGS_POWER_R8A7791_SYSC_H__ */
diff --git a/include/dt-bindings/power/r8a7793-sysc.h b/include/dt-bindings/power/r8a7793-sysc.h
new file mode 100644
index 000000000000..b5693df3d830
--- /dev/null
+++ b/include/dt-bindings/power/r8a7793-sysc.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A7793_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A7793_SYSC_H__
+
+/*
+ * These power domain indices match the numbers of the interrupt bits
+ * representing the power areas in the various Interrupt Registers
+ * (e.g. SYSCISR, Interrupt Status Register)
+ *
+ * Note that R-Car M2-N is identical to R-Car M2-W w.r.t. power domains.
+ */
+
+#define R8A7793_PD_CA15_CPU0 0
+#define R8A7793_PD_CA15_CPU1 1
+#define R8A7793_PD_CA15_SCU 12
+#define R8A7793_PD_SH_4A 16
+#define R8A7793_PD_SGX 20
+
+/* Always-on power area */
+#define R8A7793_PD_ALWAYS_ON 32
+
+#endif /* __DT_BINDINGS_POWER_R8A7793_SYSC_H__ */
diff --git a/include/dt-bindings/power/r8a7794-sysc.h b/include/dt-bindings/power/r8a7794-sysc.h
new file mode 100644
index 000000000000..862241c2d27b
--- /dev/null
+++ b/include/dt-bindings/power/r8a7794-sysc.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A7794_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A7794_SYSC_H__
+
+/*
+ * These power domain indices match the numbers of the interrupt bits
+ * representing the power areas in the various Interrupt Registers
+ * (e.g. SYSCISR, Interrupt Status Register)
+ */
+
+#define R8A7794_PD_CA7_CPU0 5
+#define R8A7794_PD_CA7_CPU1 6
+#define R8A7794_PD_SH_4A 16
+#define R8A7794_PD_SGX 20
+#define R8A7794_PD_CA7_SCU 21
+
+/* Always-on power area */
+#define R8A7794_PD_ALWAYS_ON 32
+
+#endif /* __DT_BINDINGS_POWER_R8A7794_SYSC_H__ */
diff --git a/include/dt-bindings/power/r8a7795-sysc.h b/include/dt-bindings/power/r8a7795-sysc.h
new file mode 100644
index 000000000000..ee2e26ba605e
--- /dev/null
+++ b/include/dt-bindings/power/r8a7795-sysc.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A7795_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A7795_SYSC_H__
+
+/*
+ * These power domain indices match the numbers of the interrupt bits
+ * representing the power areas in the various Interrupt Registers
+ * (e.g. SYSCISR, Interrupt Status Register)
+ */
+
+#define R8A7795_PD_CA57_CPU0 0
+#define R8A7795_PD_CA57_CPU1 1
+#define R8A7795_PD_CA57_CPU2 2
+#define R8A7795_PD_CA57_CPU3 3
+#define R8A7795_PD_CA53_CPU0 5
+#define R8A7795_PD_CA53_CPU1 6
+#define R8A7795_PD_CA53_CPU2 7
+#define R8A7795_PD_CA53_CPU3 8
+#define R8A7795_PD_A3VP 9
+#define R8A7795_PD_CA57_SCU 12
+#define R8A7795_PD_CR7 13
+#define R8A7795_PD_A3VC 14
+#define R8A7795_PD_3DG_A 17
+#define R8A7795_PD_3DG_B 18
+#define R8A7795_PD_3DG_C 19
+#define R8A7795_PD_3DG_D 20
+#define R8A7795_PD_CA53_SCU 21
+#define R8A7795_PD_3DG_E 22
+#define R8A7795_PD_A3IR 24
+#define R8A7795_PD_A2VC0 25
+#define R8A7795_PD_A2VC1 26
+
+/* Always-on power area */
+#define R8A7795_PD_ALWAYS_ON 32
+
+#endif /* __DT_BINDINGS_POWER_R8A7795_SYSC_H__ */
diff --git a/include/dt-bindings/power/rk3399-power.h b/include/dt-bindings/power/rk3399-power.h
new file mode 100644
index 000000000000..168b3bfbd6f5
--- /dev/null
+++ b/include/dt-bindings/power/rk3399-power.h
@@ -0,0 +1,53 @@
+#ifndef __DT_BINDINGS_POWER_RK3399_POWER_H__
+#define __DT_BINDINGS_POWER_RK3399_POWER_H__
+
+/* VD_CORE_L */
+#define RK3399_PD_A53_L0 0
+#define RK3399_PD_A53_L1 1
+#define RK3399_PD_A53_L2 2
+#define RK3399_PD_A53_L3 3
+#define RK3399_PD_SCU_L 4
+
+/* VD_CORE_B */
+#define RK3399_PD_A72_B0 5
+#define RK3399_PD_A72_B1 6
+#define RK3399_PD_SCU_B 7
+
+/* VD_LOGIC */
+#define RK3399_PD_TCPD0 8
+#define RK3399_PD_TCPD1 9
+#define RK3399_PD_CCI 10
+#define RK3399_PD_CCI0 11
+#define RK3399_PD_CCI1 12
+#define RK3399_PD_PERILP 13
+#define RK3399_PD_PERIHP 14
+#define RK3399_PD_VIO 15
+#define RK3399_PD_VO 16
+#define RK3399_PD_VOPB 17
+#define RK3399_PD_VOPL 18
+#define RK3399_PD_ISP0 19
+#define RK3399_PD_ISP1 20
+#define RK3399_PD_HDCP 21
+#define RK3399_PD_GMAC 22
+#define RK3399_PD_EMMC 23
+#define RK3399_PD_USB3 24
+#define RK3399_PD_EDP 25
+#define RK3399_PD_GIC 26
+#define RK3399_PD_SD 27
+#define RK3399_PD_SDIOAUDIO 28
+#define RK3399_PD_ALIVE 29
+
+/* VD_CENTER */
+#define RK3399_PD_CENTER 30
+#define RK3399_PD_VCODEC 31
+#define RK3399_PD_VDU 32
+#define RK3399_PD_RGA 33
+#define RK3399_PD_IEP 34
+
+/* VD_GPU */
+#define RK3399_PD_GPU 35
+
+/* VD_PMU */
+#define RK3399_PD_PMU 36
+
+#endif
diff --git a/include/dt-bindings/thermal/tegra124-soctherm.h b/include/dt-bindings/thermal/tegra124-soctherm.h
index 85aaf66690f9..729ab9fc325e 100644
--- a/include/dt-bindings/thermal/tegra124-soctherm.h
+++ b/include/dt-bindings/thermal/tegra124-soctherm.h
@@ -9,5 +9,6 @@
#define TEGRA124_SOCTHERM_SENSOR_MEM 1
#define TEGRA124_SOCTHERM_SENSOR_GPU 2
#define TEGRA124_SOCTHERM_SENSOR_PLLX 3
+#define TEGRA124_SOCTHERM_SENSOR_NUM 4
#endif
diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
index 4915d40d3c3c..2480469ce8fb 100644
--- a/include/keys/asymmetric-subtype.h
+++ b/include/keys/asymmetric-subtype.h
@@ -32,7 +32,7 @@ struct asymmetric_key_subtype {
void (*describe)(const struct key *key, struct seq_file *m);
/* Destroy a key of this subtype */
- void (*destroy)(void *payload);
+ void (*destroy)(void *payload_crypto, void *payload_auth);
/* Verify the signature on a key of this subtype (optional) */
int (*verify_signature)(const struct key *key,
diff --git a/include/keys/asymmetric-type.h b/include/keys/asymmetric-type.h
index 59c1df9cf922..b38240716d41 100644
--- a/include/keys/asymmetric-type.h
+++ b/include/keys/asymmetric-type.h
@@ -15,6 +15,7 @@
#define _KEYS_ASYMMETRIC_TYPE_H
#include <linux/key-type.h>
+#include <linux/verification.h>
extern struct key_type key_type_asymmetric;
@@ -23,9 +24,10 @@ extern struct key_type key_type_asymmetric;
* follows:
*/
enum asymmetric_payload_bits {
- asym_crypto,
- asym_subtype,
- asym_key_ids,
+ asym_crypto, /* The data representing the key */
+ asym_subtype, /* Pointer to an asymmetric_key_subtype struct */
+ asym_key_ids, /* Pointer to an asymmetric_key_ids struct */
+ asym_auth /* The key's authorisation (signature, parent key ID) */
};
/*
@@ -74,6 +76,11 @@ const struct asymmetric_key_ids *asymmetric_key_ids(const struct key *key)
return key->payload.data[asym_key_ids];
}
+extern struct key *find_asymmetric_key(struct key *keyring,
+ const struct asymmetric_key_id *id_0,
+ const struct asymmetric_key_id *id_1,
+ bool partial);
+
/*
* The payload is at the discretion of the subtype.
*/
diff --git a/include/keys/system_keyring.h b/include/keys/system_keyring.h
index 39fd38cfa8c9..fbd4647767e9 100644
--- a/include/keys/system_keyring.h
+++ b/include/keys/system_keyring.h
@@ -12,51 +12,40 @@
#ifndef _KEYS_SYSTEM_KEYRING_H
#define _KEYS_SYSTEM_KEYRING_H
+#include <linux/key.h>
+
#ifdef CONFIG_SYSTEM_TRUSTED_KEYRING
-#include <linux/key.h>
-#include <crypto/public_key.h>
+extern int restrict_link_by_builtin_trusted(struct key *keyring,
+ const struct key_type *type,
+ const union key_payload *payload);
-extern struct key *system_trusted_keyring;
-static inline struct key *get_system_trusted_keyring(void)
-{
- return system_trusted_keyring;
-}
#else
-static inline struct key *get_system_trusted_keyring(void)
-{
- return NULL;
-}
+#define restrict_link_by_builtin_trusted restrict_link_reject
#endif
-#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
-extern int system_verify_data(const void *data, unsigned long len,
- const void *raw_pkcs7, size_t pkcs7_len,
- enum key_being_used_for usage);
+#ifdef CONFIG_SECONDARY_TRUSTED_KEYRING
+extern int restrict_link_by_builtin_and_secondary_trusted(
+ struct key *keyring,
+ const struct key_type *type,
+ const union key_payload *payload);
+#else
+#define restrict_link_by_builtin_and_secondary_trusted restrict_link_by_builtin_trusted
#endif
-#ifdef CONFIG_IMA_MOK_KEYRING
-extern struct key *ima_mok_keyring;
+#ifdef CONFIG_IMA_BLACKLIST_KEYRING
extern struct key *ima_blacklist_keyring;
-static inline struct key *get_ima_mok_keyring(void)
-{
- return ima_mok_keyring;
-}
static inline struct key *get_ima_blacklist_keyring(void)
{
return ima_blacklist_keyring;
}
#else
-static inline struct key *get_ima_mok_keyring(void)
-{
- return NULL;
-}
static inline struct key *get_ima_blacklist_keyring(void)
{
return NULL;
}
-#endif /* CONFIG_IMA_MOK_KEYRING */
+#endif /* CONFIG_IMA_BLACKLIST_KEYRING */
#endif /* _KEYS_SYSTEM_KEYRING_H */
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index b651aed9dc6b..dda39d8fa189 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -24,9 +24,6 @@
#include <linux/workqueue.h>
struct arch_timer_kvm {
- /* Is the timer enabled */
- bool enabled;
-
/* Virtual offset */
cycle_t cntvoff;
};
@@ -53,15 +50,15 @@ struct arch_timer_cpu {
/* Timer IRQ */
struct kvm_irq_level irq;
- /* VGIC mapping */
- struct irq_phys_map *map;
-
/* Active IRQ state caching */
bool active_cleared_last;
+
+ /* Is the timer enabled */
+ bool enabled;
};
int kvm_timer_hyp_init(void);
-void kvm_timer_enable(struct kvm *kvm);
+int kvm_timer_enable(struct kvm_vcpu *vcpu);
void kvm_timer_init(struct kvm *kvm);
int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
const struct kvm_irq_level *irq);
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 281caf847fad..da0a524802cb 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -19,12 +19,17 @@
#ifndef __ASM_ARM_KVM_VGIC_H
#define __ASM_ARM_KVM_VGIC_H
+#ifdef CONFIG_KVM_NEW_VGIC
+#include <kvm/vgic/vgic.h>
+#else
+
#include <linux/kernel.h>
#include <linux/kvm.h>
#include <linux/irqreturn.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <kvm/iodev.h>
+#include <linux/irqchip/arm-gic-common.h>
#define VGIC_NR_IRQS_LEGACY 256
#define VGIC_NR_SGIS 16
@@ -157,7 +162,6 @@ struct vgic_io_device {
struct irq_phys_map {
u32 virt_irq;
u32 phys_irq;
- u32 irq;
};
struct irq_phys_map_entry {
@@ -304,9 +308,6 @@ struct vgic_cpu {
unsigned long *active_shared;
unsigned long *pend_act_shared;
- /* Number of list registers on this CPU */
- int nr_lr;
-
/* CPU vif control registers for world switch */
union {
struct vgic_v2_cpu_if vgic_v2;
@@ -341,27 +342,28 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
bool level);
int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid,
- struct irq_phys_map *map, bool level);
+ unsigned int virt_irq, bool level);
void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
-struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu,
- int virt_irq, int irq);
-int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map);
-bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, struct irq_phys_map *map);
+int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, int virt_irq, int phys_irq);
+int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq);
+bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq);
#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
#define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus))
#define vgic_ready(k) ((k)->arch.vgic.ready)
+#define vgic_valid_spi(k, i) (((i) >= VGIC_NR_PRIVATE_IRQS) && \
+ ((i) < (k)->arch.vgic.nr_irqs))
-int vgic_v2_probe(struct device_node *vgic_node,
+int vgic_v2_probe(const struct gic_kvm_info *gic_kvm_info,
const struct vgic_ops **ops,
const struct vgic_params **params);
#ifdef CONFIG_KVM_ARM_VGIC_V3
-int vgic_v3_probe(struct device_node *vgic_node,
+int vgic_v3_probe(const struct gic_kvm_info *gic_kvm_info,
const struct vgic_ops **ops,
const struct vgic_params **params);
#else
-static inline int vgic_v3_probe(struct device_node *vgic_node,
+static inline int vgic_v3_probe(const struct gic_kvm_info *gic_kvm_info,
const struct vgic_ops **ops,
const struct vgic_params **params)
{
@@ -369,4 +371,5 @@ static inline int vgic_v3_probe(struct device_node *vgic_node,
}
#endif
+#endif /* old VGIC include */
#endif
diff --git a/include/kvm/vgic/vgic.h b/include/kvm/vgic/vgic.h
new file mode 100644
index 000000000000..3fbd175265ae
--- /dev/null
+++ b/include/kvm/vgic/vgic.h
@@ -0,0 +1,246 @@
+/*
+ * Copyright (C) 2015, 2016 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_ARM_KVM_VGIC_VGIC_H
+#define __ASM_ARM_KVM_VGIC_VGIC_H
+
+#include <linux/kernel.h>
+#include <linux/kvm.h>
+#include <linux/irqreturn.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <kvm/iodev.h>
+
+#define VGIC_V3_MAX_CPUS 255
+#define VGIC_V2_MAX_CPUS 8
+#define VGIC_NR_IRQS_LEGACY 256
+#define VGIC_NR_SGIS 16
+#define VGIC_NR_PPIS 16
+#define VGIC_NR_PRIVATE_IRQS (VGIC_NR_SGIS + VGIC_NR_PPIS)
+#define VGIC_MAX_PRIVATE (VGIC_NR_PRIVATE_IRQS - 1)
+#define VGIC_MAX_SPI 1019
+#define VGIC_MAX_RESERVED 1023
+#define VGIC_MIN_LPI 8192
+
+enum vgic_type {
+ VGIC_V2, /* Good ol' GICv2 */
+ VGIC_V3, /* New fancy GICv3 */
+};
+
+/* same for all guests, as depending only on the _host's_ GIC model */
+struct vgic_global {
+ /* type of the host GIC */
+ enum vgic_type type;
+
+ /* Physical address of vgic virtual cpu interface */
+ phys_addr_t vcpu_base;
+
+ /* virtual control interface mapping */
+ void __iomem *vctrl_base;
+
+ /* Number of implemented list registers */
+ int nr_lr;
+
+ /* Maintenance IRQ number */
+ unsigned int maint_irq;
+
+ /* maximum number of VCPUs allowed (GICv2 limits us to 8) */
+ int max_gic_vcpus;
+
+ /* Only needed for the legacy KVM_CREATE_IRQCHIP */
+ bool can_emulate_gicv2;
+};
+
+extern struct vgic_global kvm_vgic_global_state;
+
+#define VGIC_V2_MAX_LRS (1 << 6)
+#define VGIC_V3_MAX_LRS 16
+#define VGIC_V3_LR_INDEX(lr) (VGIC_V3_MAX_LRS - 1 - lr)
+
+enum vgic_irq_config {
+ VGIC_CONFIG_EDGE = 0,
+ VGIC_CONFIG_LEVEL
+};
+
+struct vgic_irq {
+ spinlock_t irq_lock; /* Protects the content of the struct */
+ struct list_head ap_list;
+
+ struct kvm_vcpu *vcpu; /* SGIs and PPIs: The VCPU
+ * SPIs and LPIs: The VCPU whose ap_list
+ * this is queued on.
+ */
+
+ struct kvm_vcpu *target_vcpu; /* The VCPU that this interrupt should
+ * be sent to, as a result of the
+ * targets reg (v2) or the
+ * affinity reg (v3).
+ */
+
+ u32 intid; /* Guest visible INTID */
+ bool pending;
+ bool line_level; /* Level only */
+ bool soft_pending; /* Level only */
+ bool active; /* not used for LPIs */
+ bool enabled;
+ bool hw; /* Tied to HW IRQ */
+ u32 hwintid; /* HW INTID number */
+ union {
+ u8 targets; /* GICv2 target VCPUs mask */
+ u32 mpidr; /* GICv3 target VCPU */
+ };
+ u8 source; /* GICv2 SGIs only */
+ u8 priority;
+ enum vgic_irq_config config; /* Level or edge */
+};
+
+struct vgic_register_region;
+
+struct vgic_io_device {
+ gpa_t base_addr;
+ struct kvm_vcpu *redist_vcpu;
+ const struct vgic_register_region *regions;
+ int nr_regions;
+ struct kvm_io_device dev;
+};
+
+struct vgic_dist {
+ bool in_kernel;
+ bool ready;
+ bool initialized;
+
+ /* vGIC model the kernel emulates for the guest (GICv2 or GICv3) */
+ u32 vgic_model;
+
+ int nr_spis;
+
+ /* TODO: Consider moving to global state */
+ /* Virtual control interface mapping */
+ void __iomem *vctrl_base;
+
+ /* base addresses in guest physical address space: */
+ gpa_t vgic_dist_base; /* distributor */
+ union {
+ /* either a GICv2 CPU interface */
+ gpa_t vgic_cpu_base;
+ /* or a number of GICv3 redistributor regions */
+ gpa_t vgic_redist_base;
+ };
+
+ /* distributor enabled */
+ bool enabled;
+
+ struct vgic_irq *spis;
+
+ struct vgic_io_device dist_iodev;
+ struct vgic_io_device *redist_iodevs;
+};
+
+struct vgic_v2_cpu_if {
+ u32 vgic_hcr;
+ u32 vgic_vmcr;
+ u32 vgic_misr; /* Saved only */
+ u64 vgic_eisr; /* Saved only */
+ u64 vgic_elrsr; /* Saved only */
+ u32 vgic_apr;
+ u32 vgic_lr[VGIC_V2_MAX_LRS];
+};
+
+struct vgic_v3_cpu_if {
+#ifdef CONFIG_KVM_ARM_VGIC_V3
+ u32 vgic_hcr;
+ u32 vgic_vmcr;
+ u32 vgic_sre; /* Restored only, change ignored */
+ u32 vgic_misr; /* Saved only */
+ u32 vgic_eisr; /* Saved only */
+ u32 vgic_elrsr; /* Saved only */
+ u32 vgic_ap0r[4];
+ u32 vgic_ap1r[4];
+ u64 vgic_lr[VGIC_V3_MAX_LRS];
+#endif
+};
+
+struct vgic_cpu {
+ /* CPU vif control registers for world switch */
+ union {
+ struct vgic_v2_cpu_if vgic_v2;
+ struct vgic_v3_cpu_if vgic_v3;
+ };
+
+ unsigned int used_lrs;
+ struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS];
+
+ spinlock_t ap_list_lock; /* Protects the ap_list */
+
+ /*
+ * List of IRQs that this VCPU should consider because they are either
+ * Active or Pending (hence the name; AP list), or because they recently
+ * were one of the two and need to be migrated off this list to another
+ * VCPU.
+ */
+ struct list_head ap_list_head;
+
+ u64 live_lrs;
+};
+
+int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
+void kvm_vgic_early_init(struct kvm *kvm);
+int kvm_vgic_create(struct kvm *kvm, u32 type);
+void kvm_vgic_destroy(struct kvm *kvm);
+void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu);
+void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
+int kvm_vgic_map_resources(struct kvm *kvm);
+int kvm_vgic_hyp_init(void);
+
+int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
+ bool level);
+int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid, unsigned int intid,
+ bool level);
+int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq);
+int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq);
+bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq);
+
+int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
+
+#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
+#define vgic_initialized(k) ((k)->arch.vgic.initialized)
+#define vgic_ready(k) ((k)->arch.vgic.ready)
+#define vgic_valid_spi(k, i) (((i) >= VGIC_NR_PRIVATE_IRQS) && \
+ ((i) < (k)->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS))
+
+bool kvm_vcpu_has_pending_irqs(struct kvm_vcpu *vcpu);
+void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
+void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
+
+#ifdef CONFIG_KVM_ARM_VGIC_V3
+void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
+#else
+static inline void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg)
+{
+}
+#endif
+
+/**
+ * kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW
+ *
+ * The host's GIC naturally limits the maximum amount of VCPUs a guest
+ * can use.
+ */
+static inline int kvm_vgic_get_max_vcpus(void)
+{
+ return kvm_vgic_global_state.max_gic_vcpus;
+}
+
+#endif /* __ASM_ARM_KVM_VGIC_VGIC_H */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 06ed7e54033e..288fac5294f5 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -190,14 +190,6 @@ static inline int acpi_debugger_notify_command_complete(void)
}
#endif
-#ifdef CONFIG_ACPI_INITRD_TABLE_OVERRIDE
-void acpi_initrd_override(void *data, size_t size);
-#else
-static inline void acpi_initrd_override(void *data, size_t size)
-{
-}
-#endif
-
#define BAD_MADT_ENTRY(entry, end) ( \
(!entry) || (unsigned long)entry + sizeof(*entry) > end || \
((struct acpi_subtable_header *)entry)->length < sizeof(*entry))
@@ -216,6 +208,7 @@ void acpi_boot_table_init (void);
int acpi_mps_check (void);
int acpi_numa_init (void);
+void early_acpi_table_init(void *data, size_t size);
int acpi_table_init (void);
int acpi_table_parse(char *id, acpi_tbl_table_handler handler);
int __init acpi_parse_entries(char *id, unsigned long table_size,
@@ -278,6 +271,7 @@ void acpi_irq_stats_init(void);
extern u32 acpi_irq_handled;
extern u32 acpi_irq_not_handled;
extern unsigned int acpi_sci_irq;
+extern bool acpi_no_s5;
#define INVALID_ACPI_IRQ ((unsigned)-1)
static inline bool acpi_sci_irq_valid(void)
{
@@ -311,7 +305,6 @@ struct pci_dev;
int acpi_pci_irq_enable (struct pci_dev *dev);
void acpi_penalize_isa_irq(int irq, int active);
bool acpi_isa_irq_available(int irq);
-void acpi_penalize_sci_irq(int irq, int trigger, int polarity);
void acpi_pci_irq_disable (struct pci_dev *dev);
extern int ec_read(u8 addr, u8 *val);
@@ -359,7 +352,6 @@ extern bool wmi_has_guid(const char *guid);
extern char acpi_video_backlight_string[];
extern long acpi_is_video_device(acpi_handle handle);
extern int acpi_blacklisted(void);
-extern void acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d);
extern void acpi_osi_setup(char *str);
extern bool acpi_osi_is_win8(void);
@@ -596,6 +588,7 @@ static inline const char *acpi_dev_name(struct acpi_device *adev)
return NULL;
}
+static inline void early_acpi_table_init(void *data, size_t size) { }
static inline void acpi_early_init(void) { }
static inline void acpi_subsystem_init(void) { }
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h
index 10fe2a211c2e..27e9ec8778eb 100644
--- a/include/linux/amba/pl08x.h
+++ b/include/linux/amba/pl08x.h
@@ -86,7 +86,7 @@ struct pl08x_channel_data {
* @mem_buses: buses which memory can be accessed from: PL08X_AHB1 | PL08X_AHB2
*/
struct pl08x_platform_data {
- const struct pl08x_channel_data *slave_channels;
+ struct pl08x_channel_data *slave_channels;
unsigned int num_slave_channels;
struct pl08x_channel_data memcpy_channel;
int (*get_xfer_signal)(const struct pl08x_channel_data *);
diff --git a/include/linux/apple-gmux.h b/include/linux/apple-gmux.h
index b2d32e01dfe4..714186de8c36 100644
--- a/include/linux/apple-gmux.h
+++ b/include/linux/apple-gmux.h
@@ -35,7 +35,7 @@
*/
static inline bool apple_gmux_present(void)
{
- return acpi_dev_present(GMUX_ACPI_HID);
+ return acpi_dev_found(GMUX_ACPI_HID);
}
#else /* !CONFIG_APPLE_GMUX */
diff --git a/include/linux/ata.h b/include/linux/ata.h
index c1a2f345cbe6..99346be5a7ca 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -243,6 +243,7 @@ enum {
ATA_CMD_WRITE_QUEUED_FUA_EXT = 0x3E,
ATA_CMD_FPDMA_READ = 0x60,
ATA_CMD_FPDMA_WRITE = 0x61,
+ ATA_CMD_NCQ_NON_DATA = 0x63,
ATA_CMD_FPDMA_SEND = 0x64,
ATA_CMD_FPDMA_RECV = 0x65,
ATA_CMD_PIO_READ = 0x20,
@@ -301,19 +302,43 @@ enum {
ATA_CMD_CFA_WRITE_MULT_NE = 0xCD,
ATA_CMD_REQ_SENSE_DATA = 0x0B,
ATA_CMD_SANITIZE_DEVICE = 0xB4,
+ ATA_CMD_ZAC_MGMT_IN = 0x4A,
+ ATA_CMD_ZAC_MGMT_OUT = 0x9F,
/* marked obsolete in the ATA/ATAPI-7 spec */
ATA_CMD_RESTORE = 0x10,
+ /* Subcmds for ATA_CMD_FPDMA_RECV */
+ ATA_SUBCMD_FPDMA_RECV_RD_LOG_DMA_EXT = 0x01,
+ ATA_SUBCMD_FPDMA_RECV_ZAC_MGMT_IN = 0x02,
+
/* Subcmds for ATA_CMD_FPDMA_SEND */
ATA_SUBCMD_FPDMA_SEND_DSM = 0x00,
ATA_SUBCMD_FPDMA_SEND_WR_LOG_DMA_EXT = 0x02,
+ /* Subcmds for ATA_CMD_NCQ_NON_DATA */
+ ATA_SUBCMD_NCQ_NON_DATA_ABORT_QUEUE = 0x00,
+ ATA_SUBCMD_NCQ_NON_DATA_SET_FEATURES = 0x05,
+ ATA_SUBCMD_NCQ_NON_DATA_ZERO_EXT = 0x06,
+ ATA_SUBCMD_NCQ_NON_DATA_ZAC_MGMT_OUT = 0x07,
+
+ /* Subcmds for ATA_CMD_ZAC_MGMT_IN */
+ ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES = 0x00,
+
+ /* Subcmds for ATA_CMD_ZAC_MGMT_OUT */
+ ATA_SUBCMD_ZAC_MGMT_OUT_CLOSE_ZONE = 0x01,
+ ATA_SUBCMD_ZAC_MGMT_OUT_FINISH_ZONE = 0x02,
+ ATA_SUBCMD_ZAC_MGMT_OUT_OPEN_ZONE = 0x03,
+ ATA_SUBCMD_ZAC_MGMT_OUT_RESET_WRITE_POINTER = 0x04,
+
/* READ_LOG_EXT pages */
+ ATA_LOG_DIRECTORY = 0x0,
ATA_LOG_SATA_NCQ = 0x10,
+ ATA_LOG_NCQ_NON_DATA = 0x12,
ATA_LOG_NCQ_SEND_RECV = 0x13,
ATA_LOG_SATA_ID_DEV_DATA = 0x30,
ATA_LOG_SATA_SETTINGS = 0x08,
+ ATA_LOG_ZONED_INFORMATION = 0x09,
ATA_LOG_DEVSLP_OFFSET = 0x30,
ATA_LOG_DEVSLP_SIZE = 0x08,
ATA_LOG_DEVSLP_MDAT = 0x00,
@@ -328,8 +353,25 @@ enum {
ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET = 0x04,
ATA_LOG_NCQ_SEND_RECV_DSM_TRIM = (1 << 0),
ATA_LOG_NCQ_SEND_RECV_RD_LOG_OFFSET = 0x08,
+ ATA_LOG_NCQ_SEND_RECV_RD_LOG_SUPPORTED = (1 << 0),
ATA_LOG_NCQ_SEND_RECV_WR_LOG_OFFSET = 0x0C,
- ATA_LOG_NCQ_SEND_RECV_SIZE = 0x10,
+ ATA_LOG_NCQ_SEND_RECV_WR_LOG_SUPPORTED = (1 << 0),
+ ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_OFFSET = 0x10,
+ ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_OUT_SUPPORTED = (1 << 0),
+ ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_IN_SUPPORTED = (1 << 1),
+ ATA_LOG_NCQ_SEND_RECV_SIZE = 0x14,
+
+ /* NCQ Non-Data log */
+ ATA_LOG_NCQ_NON_DATA_SUBCMDS_OFFSET = 0x00,
+ ATA_LOG_NCQ_NON_DATA_ABORT_OFFSET = 0x00,
+ ATA_LOG_NCQ_NON_DATA_ABORT_NCQ = (1 << 0),
+ ATA_LOG_NCQ_NON_DATA_ABORT_ALL = (1 << 1),
+ ATA_LOG_NCQ_NON_DATA_ABORT_STREAMING = (1 << 2),
+ ATA_LOG_NCQ_NON_DATA_ABORT_NON_STREAMING = (1 << 3),
+ ATA_LOG_NCQ_NON_DATA_ABORT_SELECTED = (1 << 4),
+ ATA_LOG_NCQ_NON_DATA_ZAC_MGMT_OFFSET = 0x1C,
+ ATA_LOG_NCQ_NON_DATA_ZAC_MGMT_OUT = (1 << 0),
+ ATA_LOG_NCQ_NON_DATA_SIZE = 0x40,
/* READ/WRITE LONG (obsolete) */
ATA_CMD_READ_LONG = 0x22,
@@ -371,7 +413,8 @@ enum {
SETFEATURES_AAM_ON = 0x42,
SETFEATURES_AAM_OFF = 0xC2,
- SETFEATURES_SPINUP = 0x07, /* Spin-up drive */
+ SETFEATURES_SPINUP = 0x07, /* Spin-up drive */
+ SETFEATURES_SPINUP_TIMEOUT = 30000, /* 30s timeout for drive spin-up from PUIS */
SETFEATURES_SATA_ENABLE = 0x10, /* Enable use of SATA feature */
SETFEATURES_SATA_DISABLE = 0x90, /* Disable use of SATA feature */
@@ -385,6 +428,8 @@ enum {
SATA_SSP = 0x06, /* Software Settings Preservation */
SATA_DEVSLP = 0x09, /* Device Sleep */
+ SETFEATURE_SENSE_DATA = 0xC3, /* Sense Data Reporting feature */
+
/* feature values for SET_MAX */
ATA_SET_MAX_ADDR = 0x00,
ATA_SET_MAX_PASSWD = 0x01,
@@ -528,6 +573,8 @@ struct ata_bmdma_prd {
#define ata_id_cdb_intr(id) (((id)[ATA_ID_CONFIG] & 0x60) == 0x20)
#define ata_id_has_da(id) ((id)[ATA_ID_SATA_CAPABILITY_2] & (1 << 4))
#define ata_id_has_devslp(id) ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8))
+#define ata_id_has_ncq_autosense(id) \
+ ((id)[ATA_ID_FEATURE_SUPP] & (1 << 7))
static inline bool ata_id_has_hipm(const u16 *id)
{
@@ -716,6 +763,20 @@ static inline bool ata_id_has_read_log_dma_ext(const u16 *id)
return false;
}
+static inline bool ata_id_has_sense_reporting(const u16 *id)
+{
+ if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
+ return false;
+ return id[ATA_ID_COMMAND_SET_3] & (1 << 6);
+}
+
+static inline bool ata_id_sense_reporting_enabled(const u16 *id)
+{
+ if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
+ return false;
+ return id[ATA_ID_COMMAND_SET_4] & (1 << 6);
+}
+
/**
* ata_id_major_version - get ATA level of drive
* @id: Identify data
@@ -820,6 +881,11 @@ static inline bool ata_id_has_ncq_send_and_recv(const u16 *id)
return id[ATA_ID_SATA_CAPABILITY_2] & BIT(6);
}
+static inline bool ata_id_has_ncq_non_data(const u16 *id)
+{
+ return id[ATA_ID_SATA_CAPABILITY_2] & BIT(5);
+}
+
static inline bool ata_id_has_trim(const u16 *id)
{
if (ata_id_major_version(id) >= 7 &&
@@ -871,6 +937,11 @@ static inline bool ata_id_is_ssd(const u16 *id)
return id[ATA_ID_ROT_SPEED] == 0x01;
}
+static inline u8 ata_id_zoned_cap(const u16 *id)
+{
+ return (id[ATA_ID_ADDITIONAL_SUPP] & 0x3);
+}
+
static inline bool ata_id_pio_need_iordy(const u16 *id, const u8 pio)
{
/* CF spec. r4.1 Table 22 says no IORDY on PIO5 and PIO6. */
diff --git a/include/linux/ath9k_platform.h b/include/linux/ath9k_platform.h
index 33eb274cd0e6..e66153d60bd5 100644
--- a/include/linux/ath9k_platform.h
+++ b/include/linux/ath9k_platform.h
@@ -31,6 +31,10 @@ struct ath9k_platform_data {
u32 gpio_mask;
u32 gpio_val;
+ u32 bt_active_pin;
+ u32 bt_priority_pin;
+ u32 wlan_active_pin;
+
bool endian_check;
bool is_clk_25mhz;
bool tx_gain_buffalo;
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index 506c3531832e..e451534fe54d 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -560,11 +560,11 @@ static inline int atomic_dec_if_positive(atomic_t *v)
/**
* atomic_fetch_or - perform *p |= mask and return old value of *p
- * @p: pointer to atomic_t
* @mask: mask to OR on the atomic_t
+ * @p: pointer to atomic_t
*/
#ifndef atomic_fetch_or
-static inline int atomic_fetch_or(atomic_t *p, int mask)
+static inline int atomic_fetch_or(int mask, atomic_t *p)
{
int old, val = atomic_read(p);
diff --git a/include/linux/audit.h b/include/linux/audit.h
index e38e3fc13ea8..961a417d641e 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -26,6 +26,7 @@
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <uapi/linux/audit.h>
+#include <linux/tty.h>
#define AUDIT_INO_UNSET ((unsigned long)-1)
#define AUDIT_DEV_UNSET ((dev_t)-1)
@@ -347,6 +348,23 @@ static inline unsigned int audit_get_sessionid(struct task_struct *tsk)
return tsk->sessionid;
}
+static inline struct tty_struct *audit_get_tty(struct task_struct *tsk)
+{
+ struct tty_struct *tty = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tsk->sighand->siglock, flags);
+ if (tsk->signal)
+ tty = tty_kref_get(tsk->signal->tty);
+ spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
+ return tty;
+}
+
+static inline void audit_put_tty(struct tty_struct *tty)
+{
+ tty_kref_put(tty);
+}
+
extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp);
extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode);
extern void __audit_bprm(struct linux_binprm *bprm);
@@ -504,6 +522,12 @@ static inline unsigned int audit_get_sessionid(struct task_struct *tsk)
{
return -1;
}
+static inline struct tty_struct *audit_get_tty(struct task_struct *tsk)
+{
+ return NULL;
+}
+static inline void audit_put_tty(struct tty_struct *tty)
+{ }
static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp)
{ }
static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid,
diff --git a/include/linux/backlight.h b/include/linux/backlight.h
index 1e7a69adbe6f..5f2fd61ef4fb 100644
--- a/include/linux/backlight.h
+++ b/include/linux/backlight.h
@@ -141,9 +141,10 @@ extern void devm_backlight_device_unregister(struct device *dev,
struct backlight_device *bd);
extern void backlight_force_update(struct backlight_device *bd,
enum backlight_update_reason reason);
-extern bool backlight_device_registered(enum backlight_type type);
extern int backlight_register_notifier(struct notifier_block *nb);
extern int backlight_unregister_notifier(struct notifier_block *nb);
+extern struct backlight_device *backlight_device_get_by_type(enum backlight_type type);
+extern int backlight_device_set_brightness(struct backlight_device *bd, unsigned long brightness);
#define to_backlight_device(obj) container_of(obj, struct backlight_device, dev)
diff --git a/include/linux/bcm47xx_sprom.h b/include/linux/bcm47xx_sprom.h
new file mode 100644
index 000000000000..c06b47c84e1a
--- /dev/null
+++ b/include/linux/bcm47xx_sprom.h
@@ -0,0 +1,24 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __BCM47XX_SPROM_H
+#define __BCM47XX_SPROM_H
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/vmalloc.h>
+
+#ifdef CONFIG_BCM47XX_SPROM
+int bcm47xx_sprom_register_fallbacks(void);
+#else
+static inline int bcm47xx_sprom_register_fallbacks(void)
+{
+ return -ENOTSUPP;
+};
+#endif
+
+#endif /* __BCM47XX_SPROM_H */
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index 0367c63f5960..e6b41f42602b 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -4,6 +4,7 @@
#include <linux/pci.h>
#include <linux/mod_devicetable.h>
+#include <linux/bcma/bcma_driver_arm_c9.h>
#include <linux/bcma/bcma_driver_chipcommon.h>
#include <linux/bcma/bcma_driver_pci.h>
#include <linux/bcma/bcma_driver_pcie2.h>
diff --git a/include/linux/bcma/bcma_driver_arm_c9.h b/include/linux/bcma/bcma_driver_arm_c9.h
new file mode 100644
index 000000000000..93bd73d670d5
--- /dev/null
+++ b/include/linux/bcma/bcma_driver_arm_c9.h
@@ -0,0 +1,15 @@
+#ifndef LINUX_BCMA_DRIVER_ARM_C9_H_
+#define LINUX_BCMA_DRIVER_ARM_C9_H_
+
+/* DMU (Device Management Unit) */
+#define BCMA_DMU_CRU_USB2_CONTROL 0x0164
+#define BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_NDIV_MASK 0x00000FFC
+#define BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_NDIV_SHIFT 2
+#define BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_PDIV_MASK 0x00007000
+#define BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_PDIV_SHIFT 12
+#define BCMA_DMU_CRU_CLKSET_KEY 0x0180
+#define BCMA_DMU_CRU_STRAPS_CTRL 0x02A0
+#define BCMA_DMU_CRU_STRAPS_CTRL_USB3 0x00000010
+#define BCMA_DMU_CRU_STRAPS_CTRL_4BYTE 0x00008000
+
+#endif /* LINUX_BCMA_DRIVER_ARM_C9_H_ */
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index 846513c73606..a5ac2cad5cb7 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -587,7 +587,6 @@ struct mtd_info;
struct bcma_sflash {
bool present;
- u32 window;
u32 blocksize;
u16 numblocks;
u32 size;
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 6b7481f62218..9faebf7f9a33 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -703,6 +703,17 @@ static inline struct bio *bio_list_get(struct bio_list *bl)
}
/*
+ * Increment chain count for the bio. Make sure the CHAIN flag update
+ * is visible before the raised count.
+ */
+static inline void bio_inc_remaining(struct bio *bio)
+{
+ bio_set_flag(bio, BIO_CHAIN);
+ smp_mb__before_atomic();
+ atomic_inc(&bio->__bi_remaining);
+}
+
+/*
* bio_set is used to allow other portions of the IO system to
* allocate their own private memory pools for bio and iovec structures.
* These memory pools in turn all allocate from the bio_slab
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index defeaac0745f..299e76b59fe9 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -227,6 +227,22 @@ static inline unsigned long __ffs64(u64 word)
})
#endif
+#ifndef bit_clear_unless
+#define bit_clear_unless(ptr, _clear, _test) \
+({ \
+ const typeof(*ptr) clear = (_clear), test = (_test); \
+ typeof(*ptr) old, new; \
+ \
+ do { \
+ old = ACCESS_ONCE(*ptr); \
+ new = old & ~clear; \
+ } while (!(old & test) && \
+ cmpxchg(ptr, old, new) != old); \
+ \
+ !(old & test); \
+})
+#endif
+
#ifndef find_last_bit
/**
* find_last_bit - find the last set bit in a memory region
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 9ac9799b702b..2498fdf3a503 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -238,8 +238,8 @@ void blk_mq_start_hw_queues(struct request_queue *q);
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
void blk_mq_run_hw_queues(struct request_queue *q, bool async);
void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
-void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
- void *priv);
+void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
+ busy_tag_iter_fn *fn, void *priv);
void blk_mq_freeze_queue(struct request_queue *q);
void blk_mq_unfreeze_queue(struct request_queue *q);
void blk_mq_freeze_queue_start(struct request_queue *q);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 86a38ea1823f..77e5d81f07aa 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -208,7 +208,7 @@ enum rq_flag_bits {
#define REQ_COMMON_MASK \
(REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \
REQ_DISCARD | REQ_WRITE_SAME | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | \
- REQ_SECURE | REQ_INTEGRITY)
+ REQ_SECURE | REQ_INTEGRITY | REQ_NOMERGE)
#define REQ_CLONE_MASK REQ_COMMON_MASK
#define BIO_NO_ADVANCE_ITER_MASK (REQ_DISCARD|REQ_WRITE_SAME)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 669e419d6234..3d9cf326574f 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -433,8 +433,6 @@ struct request_queue {
/*
* for flush operations
*/
- unsigned int flush_flags;
- unsigned int flush_not_queueable:1;
struct blk_flush_queue *fq;
struct list_head requeue_list;
@@ -491,6 +489,9 @@ struct request_queue {
#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/
#define QUEUE_FLAG_POLL 22 /* IO polling enabled if set */
+#define QUEUE_FLAG_WC 23 /* Write back caching */
+#define QUEUE_FLAG_FUA 24 /* device supports FUA writes */
+#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
@@ -767,6 +768,17 @@ static inline void rq_flush_dcache_pages(struct request *rq)
}
#endif
+#ifdef CONFIG_PRINTK
+#define vfs_msg(sb, level, fmt, ...) \
+ __vfs_msg(sb, level, fmt, ##__VA_ARGS__)
+#else
+#define vfs_msg(sb, level, fmt, ...) \
+do { \
+ no_printk(fmt, ##__VA_ARGS__); \
+ __vfs_msg(sb, "", " "); \
+} while (0)
+#endif
+
extern int blk_register_queue(struct gendisk *disk);
extern void blk_unregister_queue(struct gendisk *disk);
extern blk_qc_t generic_make_request(struct bio *bio);
@@ -779,7 +791,7 @@ extern struct request *blk_make_request(struct request_queue *, struct bio *,
extern void blk_rq_set_block_pc(struct request *);
extern void blk_requeue_request(struct request_queue *, struct request *);
extern void blk_add_request_payload(struct request *rq, struct page *page,
- unsigned int len);
+ int offset, unsigned int len);
extern int blk_lld_busy(struct request_queue *q);
extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
struct bio_set *bs, gfp_t gfp_mask,
@@ -1007,8 +1019,8 @@ extern void blk_queue_update_dma_alignment(struct request_queue *, int);
extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
-extern void blk_queue_flush(struct request_queue *q, unsigned int flush);
extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
+extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
@@ -1128,6 +1140,8 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
+extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask, int type, struct bio **biop);
extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, struct page *page);
extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
@@ -1363,7 +1377,7 @@ static inline unsigned int block_size(struct block_device *bdev)
static inline bool queue_flush_queueable(struct request_queue *q)
{
- return !q->flush_not_queueable;
+ return !test_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
}
typedef struct {struct page *v;} Sector;
@@ -1657,7 +1671,7 @@ struct block_device_operations {
int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
long (*direct_access)(struct block_device *, sector_t, void __pmem **,
- pfn_t *);
+ pfn_t *, long);
unsigned int (*check_events) (struct gendisk *disk,
unsigned int clearing);
/* ->media_changed() is DEPRECATED, use ->check_events() instead */
@@ -1677,6 +1691,8 @@ extern int bdev_read_page(struct block_device *, sector_t, struct page *);
extern int bdev_write_page(struct block_device *, sector_t, struct page *,
struct writeback_control *);
extern long bdev_direct_access(struct block_device *, struct blk_dax_ctl *);
+extern int bdev_dax_supported(struct super_block *, int);
+extern bool bdev_dax_capable(struct block_device *);
#else /* CONFIG_BLOCK */
struct block_device;
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index afc1343df3c7..0f3172b8b225 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -57,6 +57,14 @@ void __trace_note_message(struct blk_trace *, const char *fmt, ...);
} while (0)
#define BLK_TN_MAX_MSG 128
+static inline bool blk_trace_note_message_enabled(struct request_queue *q)
+{
+ struct blk_trace *bt = q->blk_trace;
+ if (likely(!bt))
+ return false;
+ return bt->act_mask & BLK_TC_NOTIFY;
+}
+
extern void blk_add_driver_data(struct request_queue *q, struct request *rq,
void *data, size_t len);
extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
@@ -79,6 +87,7 @@ extern struct attribute_group blk_trace_attr_group;
# define blk_trace_remove(q) (-ENOTTY)
# define blk_add_trace_msg(q, fmt, ...) do { } while (0)
# define blk_trace_remove_sysfs(dev) do { } while (0)
+# define blk_trace_note_message_enabled(q) (false)
static inline int blk_trace_init_sysfs(struct device *dev)
{
return 0;
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 35b22f94d2d2..f9be32691718 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -83,34 +83,34 @@ extern void *__alloc_bootmem(unsigned long size,
unsigned long goal);
extern void *__alloc_bootmem_nopanic(unsigned long size,
unsigned long align,
- unsigned long goal);
+ unsigned long goal) __malloc;
extern void *__alloc_bootmem_node(pg_data_t *pgdat,
unsigned long size,
unsigned long align,
- unsigned long goal);
+ unsigned long goal) __malloc;
void *__alloc_bootmem_node_high(pg_data_t *pgdat,
unsigned long size,
unsigned long align,
- unsigned long goal);
+ unsigned long goal) __malloc;
extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat,
unsigned long size,
unsigned long align,
- unsigned long goal);
+ unsigned long goal) __malloc;
void *___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
unsigned long size,
unsigned long align,
unsigned long goal,
- unsigned long limit);
+ unsigned long limit) __malloc;
extern void *__alloc_bootmem_low(unsigned long size,
unsigned long align,
- unsigned long goal);
+ unsigned long goal) __malloc;
void *__alloc_bootmem_low_nopanic(unsigned long size,
unsigned long align,
- unsigned long goal);
+ unsigned long goal) __malloc;
extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
unsigned long size,
unsigned long align,
- unsigned long goal);
+ unsigned long goal) __malloc;
#ifdef CONFIG_NO_BOOTMEM
/* We are using top down, so it is safe to use 0 here */
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 21ee41b92e8a..8ee27b8afe81 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -66,6 +66,11 @@ enum bpf_arg_type {
* functions that access data on eBPF program stack
*/
ARG_PTR_TO_STACK, /* any pointer to eBPF program stack */
+ ARG_PTR_TO_RAW_STACK, /* any pointer to eBPF program stack, area does not
+ * need to be initialized, helper function must fill
+ * all bytes or clear them in error case.
+ */
+
ARG_CONST_STACK_SIZE, /* number of bytes accessed from stack */
ARG_CONST_STACK_SIZE_OR_ZERO, /* number of bytes accessed from stack or 0 */
@@ -131,6 +136,7 @@ struct bpf_prog_type_list {
struct bpf_prog_aux {
atomic_t refcnt;
u32 used_map_cnt;
+ u32 max_ctx_offset;
const struct bpf_verifier_ops *ops;
struct bpf_map **used_maps;
struct bpf_prog *prog;
@@ -160,9 +166,12 @@ struct bpf_array {
#define MAX_TAIL_CALL_CNT 32
u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
+u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
void bpf_fd_array_map_clear(struct bpf_map *map);
bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
+
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
+const struct bpf_func_proto *bpf_get_event_output_proto(void);
#ifdef CONFIG_BPF_SYSCALL
DECLARE_PER_CPU(int, bpf_prog_active);
@@ -171,12 +180,13 @@ void bpf_register_prog_type(struct bpf_prog_type_list *tl);
void bpf_register_map_type(struct bpf_map_type_list *tl);
struct bpf_prog *bpf_prog_get(u32 ufd);
+struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog);
void bpf_prog_put(struct bpf_prog *prog);
void bpf_prog_put_rcu(struct bpf_prog *prog);
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
struct bpf_map *__bpf_map_get(struct fd f);
-void bpf_map_inc(struct bpf_map *map, bool uref);
+struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref);
void bpf_map_put_with_uref(struct bpf_map *map);
void bpf_map_put(struct bpf_map *map);
int bpf_map_precharge_memlock(u32 pages);
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 735f9f8c4e43..5261751f6bd4 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -40,8 +40,11 @@ struct can_priv {
struct can_clock clock;
enum can_state state;
- u32 ctrlmode;
- u32 ctrlmode_supported;
+
+ /* CAN controller features - see include/uapi/linux/can/netlink.h */
+ u32 ctrlmode; /* current options setting */
+ u32 ctrlmode_supported; /* options that can be modified by netlink */
+ u32 ctrlmode_static; /* static enabled options for driver/hardware */
int restart_ms;
struct timer_list restart_timer;
@@ -108,6 +111,21 @@ static inline bool can_is_canfd_skb(const struct sk_buff *skb)
return skb->len == CANFD_MTU;
}
+/* helper to define static CAN controller features at device creation time */
+static inline void can_set_static_ctrlmode(struct net_device *dev,
+ u32 static_mode)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ /* alloc_candev() succeeded => netdev_priv() is valid at this point */
+ priv->ctrlmode = static_mode;
+ priv->ctrlmode_static = static_mode;
+
+ /* override MTU which was set by default in can_setup()? */
+ if (static_mode & CAN_CTRLMODE_FD)
+ dev->mtu = CANFD_MTU;
+}
+
/* get data length from can_dlc with sanitized can_dlc */
u8 can_dlc2len(u8 can_dlc);
diff --git a/include/linux/ccp.h b/include/linux/ccp.h
index 915af3095b39..7c2bb27c067c 100644
--- a/include/linux/ccp.h
+++ b/include/linux/ccp.h
@@ -1,9 +1,10 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -381,6 +382,35 @@ struct ccp_passthru_engine {
u32 final;
};
+/**
+ * struct ccp_passthru_nomap_engine - CCP pass-through operation
+ * without performing DMA mapping
+ * @bit_mod: bitwise operation to perform
+ * @byte_swap: byteswap operation to perform
+ * @mask: mask to be applied to data
+ * @mask_len: length in bytes of mask
+ * @src: data to be used for this operation
+ * @dst: data produced by this operation
+ * @src_len: length in bytes of data used for this operation
+ * @final: indicate final pass-through operation
+ *
+ * Variables required to be set when calling ccp_enqueue_cmd():
+ * - bit_mod, byte_swap, src, dst, src_len
+ * - mask, mask_len if bit_mod is not CCP_PASSTHRU_BITWISE_NOOP
+ */
+struct ccp_passthru_nomap_engine {
+ enum ccp_passthru_bitwise bit_mod;
+ enum ccp_passthru_byteswap byte_swap;
+
+ dma_addr_t mask;
+ u32 mask_len; /* In bytes */
+
+ dma_addr_t src_dma, dst_dma;
+ u64 src_len; /* In bytes */
+
+ u32 final;
+};
+
/***** ECC engine *****/
#define CCP_ECC_MODULUS_BYTES 48 /* 384-bits */
#define CCP_ECC_MAX_OPERANDS 6
@@ -522,7 +552,8 @@ enum ccp_engine {
};
/* Flag values for flags member of ccp_cmd */
-#define CCP_CMD_MAY_BACKLOG 0x00000001
+#define CCP_CMD_MAY_BACKLOG 0x00000001
+#define CCP_CMD_PASSTHRU_NO_DMA_MAP 0x00000002
/**
* struct ccp_cmd - CPP operation request
@@ -562,6 +593,7 @@ struct ccp_cmd {
struct ccp_sha_engine sha;
struct ccp_rsa_engine rsa;
struct ccp_passthru_engine passthru;
+ struct ccp_passthru_nomap_engine passthru_nomap;
struct ccp_ecc_engine ecc;
} u;
diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h
index 260d78b587c4..1563265d2097 100644
--- a/include/linux/ceph/auth.h
+++ b/include/linux/ceph/auth.h
@@ -12,9 +12,12 @@
*/
struct ceph_auth_client;
-struct ceph_authorizer;
struct ceph_msg;
+struct ceph_authorizer {
+ void (*destroy)(struct ceph_authorizer *);
+};
+
struct ceph_auth_handshake {
struct ceph_authorizer *authorizer;
void *authorizer_buf;
@@ -62,8 +65,6 @@ struct ceph_auth_client_ops {
struct ceph_auth_handshake *auth);
int (*verify_authorizer_reply)(struct ceph_auth_client *ac,
struct ceph_authorizer *a, size_t len);
- void (*destroy_authorizer)(struct ceph_auth_client *ac,
- struct ceph_authorizer *a);
void (*invalidate_authorizer)(struct ceph_auth_client *ac,
int peer_type);
@@ -112,8 +113,7 @@ extern int ceph_auth_is_authenticated(struct ceph_auth_client *ac);
extern int ceph_auth_create_authorizer(struct ceph_auth_client *ac,
int peer_type,
struct ceph_auth_handshake *auth);
-extern void ceph_auth_destroy_authorizer(struct ceph_auth_client *ac,
- struct ceph_authorizer *a);
+void ceph_auth_destroy_authorizer(struct ceph_authorizer *a);
extern int ceph_auth_update_authorizer(struct ceph_auth_client *ac,
int peer_type,
struct ceph_auth_handshake *a);
diff --git a/include/linux/ceph/ceph_frag.h b/include/linux/ceph/ceph_frag.h
index b827e066e55a..146507df8650 100644
--- a/include/linux/ceph/ceph_frag.h
+++ b/include/linux/ceph/ceph_frag.h
@@ -51,11 +51,11 @@ static inline __u32 ceph_frag_make_child(__u32 f, int by, int i)
return ceph_frag_make(newbits,
ceph_frag_value(f) | (i << (24 - newbits)));
}
-static inline int ceph_frag_is_leftmost(__u32 f)
+static inline bool ceph_frag_is_leftmost(__u32 f)
{
return ceph_frag_value(f) == 0;
}
-static inline int ceph_frag_is_rightmost(__u32 f)
+static inline bool ceph_frag_is_rightmost(__u32 f)
{
return ceph_frag_value(f) == ceph_frag_mask(f);
}
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index 37f28bf55ce4..dfce616002ad 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -153,8 +153,9 @@ struct ceph_dir_layout {
/* watch-notify operations */
enum {
- WATCH_NOTIFY = 1, /* notifying watcher */
- WATCH_NOTIFY_COMPLETE = 2, /* notifier notified when done */
+ CEPH_WATCH_EVENT_NOTIFY = 1, /* notifying watcher */
+ CEPH_WATCH_EVENT_NOTIFY_COMPLETE = 2, /* notifier notified when done */
+ CEPH_WATCH_EVENT_DISCONNECT = 3, /* we were disconnected */
};
@@ -207,6 +208,8 @@ struct ceph_mon_subscribe_ack {
struct ceph_fsid fsid;
} __attribute__ ((packed));
+#define CEPH_FS_CLUSTER_ID_NONE -1
+
/*
* mdsmap flags
*/
@@ -344,6 +347,18 @@ extern const char *ceph_mds_op_name(int op);
#define CEPH_XATTR_REPLACE (1 << 1)
#define CEPH_XATTR_REMOVE (1 << 31)
+/*
+ * readdir request flags;
+ */
+#define CEPH_READDIR_REPLY_BITFLAGS (1<<0)
+
+/*
+ * readdir reply flags.
+ */
+#define CEPH_READDIR_FRAG_END (1<<0)
+#define CEPH_READDIR_FRAG_COMPLETE (1<<8)
+#define CEPH_READDIR_HASH_ORDER (1<<9)
+
union ceph_mds_request_args {
struct {
__le32 mask; /* CEPH_CAP_* */
@@ -361,6 +376,7 @@ union ceph_mds_request_args {
__le32 frag; /* which dir fragment */
__le32 max_entries; /* how many dentries to grab */
__le32 max_bytes;
+ __le16 flags;
} __attribute__ ((packed)) readdir;
struct {
__le32 mode;
diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h
index a6ef9cc267ec..19e9932f3e77 100644
--- a/include/linux/ceph/decode.h
+++ b/include/linux/ceph/decode.h
@@ -47,7 +47,7 @@ static inline void ceph_decode_copy(void **p, void *pv, size_t n)
/*
* bounds check input.
*/
-static inline int ceph_has_room(void **p, void *end, size_t n)
+static inline bool ceph_has_room(void **p, void *end, size_t n)
{
return end >= *p && n <= end - *p;
}
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index db92a8d4926e..690985daad1c 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -180,6 +180,63 @@ static inline int calc_pages_for(u64 off, u64 len)
(off >> PAGE_SHIFT);
}
+/*
+ * These are not meant to be generic - an integer key is assumed.
+ */
+#define DEFINE_RB_INSDEL_FUNCS(name, type, keyfld, nodefld) \
+static void insert_##name(struct rb_root *root, type *t) \
+{ \
+ struct rb_node **n = &root->rb_node; \
+ struct rb_node *parent = NULL; \
+ \
+ BUG_ON(!RB_EMPTY_NODE(&t->nodefld)); \
+ \
+ while (*n) { \
+ type *cur = rb_entry(*n, type, nodefld); \
+ \
+ parent = *n; \
+ if (t->keyfld < cur->keyfld) \
+ n = &(*n)->rb_left; \
+ else if (t->keyfld > cur->keyfld) \
+ n = &(*n)->rb_right; \
+ else \
+ BUG(); \
+ } \
+ \
+ rb_link_node(&t->nodefld, parent, n); \
+ rb_insert_color(&t->nodefld, root); \
+} \
+static void erase_##name(struct rb_root *root, type *t) \
+{ \
+ BUG_ON(RB_EMPTY_NODE(&t->nodefld)); \
+ rb_erase(&t->nodefld, root); \
+ RB_CLEAR_NODE(&t->nodefld); \
+}
+
+#define DEFINE_RB_LOOKUP_FUNC(name, type, keyfld, nodefld) \
+static type *lookup_##name(struct rb_root *root, \
+ typeof(((type *)0)->keyfld) key) \
+{ \
+ struct rb_node *n = root->rb_node; \
+ \
+ while (n) { \
+ type *cur = rb_entry(n, type, nodefld); \
+ \
+ if (key < cur->keyfld) \
+ n = n->rb_left; \
+ else if (key > cur->keyfld) \
+ n = n->rb_right; \
+ else \
+ return cur; \
+ } \
+ \
+ return NULL; \
+}
+
+#define DEFINE_RB_FUNCS(name, type, keyfld, nodefld) \
+DEFINE_RB_INSDEL_FUNCS(name, type, keyfld, nodefld) \
+DEFINE_RB_LOOKUP_FUNC(name, type, keyfld, nodefld)
+
extern struct kmem_cache *ceph_inode_cachep;
extern struct kmem_cache *ceph_cap_cachep;
extern struct kmem_cache *ceph_cap_flush_cachep;
diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h
index e230e7ed60d3..e2a92df08b47 100644
--- a/include/linux/ceph/mon_client.h
+++ b/include/linux/ceph/mon_client.h
@@ -39,20 +39,31 @@ struct ceph_mon_request {
ceph_monc_request_func_t do_request;
};
+typedef void (*ceph_monc_callback_t)(struct ceph_mon_generic_request *);
+
/*
* ceph_mon_generic_request is being used for the statfs and
* mon_get_version requests which are being done a bit differently
* because we need to get data back to the caller
*/
struct ceph_mon_generic_request {
+ struct ceph_mon_client *monc;
struct kref kref;
u64 tid;
struct rb_node node;
int result;
- void *buf;
+
struct completion completion;
+ ceph_monc_callback_t complete_cb;
+ u64 private_data; /* r_tid/linger_id */
+
struct ceph_msg *request; /* original request */
struct ceph_msg *reply; /* and reply */
+
+ union {
+ struct ceph_statfs *st;
+ u64 newest;
+ } u;
};
struct ceph_mon_client {
@@ -77,7 +88,6 @@ struct ceph_mon_client {
/* pending generic requests */
struct rb_root generic_request_tree;
- int num_generic_requests;
u64 last_tid;
/* subs, indexed with CEPH_SUB_* */
@@ -86,6 +96,7 @@ struct ceph_mon_client {
bool want;
u32 have; /* epoch */
} subs[3];
+ int fs_cluster_id; /* "mdsmap.<id>" sub */
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs_file;
@@ -116,16 +127,18 @@ extern const char *ceph_sub_str[];
bool ceph_monc_want_map(struct ceph_mon_client *monc, int sub, u32 epoch,
bool continuous);
void ceph_monc_got_map(struct ceph_mon_client *monc, int sub, u32 epoch);
+void ceph_monc_renew_subs(struct ceph_mon_client *monc);
-extern void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc);
extern int ceph_monc_wait_osdmap(struct ceph_mon_client *monc, u32 epoch,
unsigned long timeout);
extern int ceph_monc_do_statfs(struct ceph_mon_client *monc,
struct ceph_statfs *buf);
-extern int ceph_monc_do_get_version(struct ceph_mon_client *monc,
- const char *what, u64 *newest);
+int ceph_monc_get_version(struct ceph_mon_client *monc, const char *what,
+ u64 *newest);
+int ceph_monc_get_version_async(struct ceph_mon_client *monc, const char *what,
+ ceph_monc_callback_t cb, u64 private_data);
extern int ceph_monc_open_session(struct ceph_mon_client *monc);
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 4343df806710..1b3b6e155392 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -16,15 +16,15 @@ struct ceph_msg;
struct ceph_snap_context;
struct ceph_osd_request;
struct ceph_osd_client;
-struct ceph_authorizer;
/*
* completion callback for async writepages
*/
-typedef void (*ceph_osdc_callback_t)(struct ceph_osd_request *,
- struct ceph_msg *);
+typedef void (*ceph_osdc_callback_t)(struct ceph_osd_request *);
typedef void (*ceph_osdc_unsafe_callback_t)(struct ceph_osd_request *, bool);
+#define CEPH_HOMELESS_OSD -1
+
/* a given osd we're communicating with */
struct ceph_osd {
atomic_t o_ref;
@@ -33,16 +33,15 @@ struct ceph_osd {
int o_incarnation;
struct rb_node o_node;
struct ceph_connection o_con;
- struct list_head o_requests;
- struct list_head o_linger_requests;
+ struct rb_root o_requests;
+ struct rb_root o_linger_requests;
struct list_head o_osd_lru;
struct ceph_auth_handshake o_auth;
unsigned long lru_ttl;
- int o_marked_for_keepalive;
struct list_head o_keepalive_item;
+ struct mutex lock;
};
-
#define CEPH_OSD_SLAB_OPS 2
#define CEPH_OSD_MAX_OPS 16
@@ -105,76 +104,95 @@ struct ceph_osd_req_op {
struct ceph_osd_data response_data;
__u8 class_len;
__u8 method_len;
- __u8 argc;
+ u32 indata_len;
} cls;
struct {
u64 cookie;
- u64 ver;
- u32 prot_ver;
- u32 timeout;
- __u8 flag;
+ __u8 op; /* CEPH_OSD_WATCH_OP_ */
+ u32 gen;
} watch;
struct {
+ struct ceph_osd_data request_data;
+ } notify_ack;
+ struct {
+ u64 cookie;
+ struct ceph_osd_data request_data;
+ struct ceph_osd_data response_data;
+ } notify;
+ struct {
u64 expected_object_size;
u64 expected_write_size;
} alloc_hint;
};
};
+struct ceph_osd_request_target {
+ struct ceph_object_id base_oid;
+ struct ceph_object_locator base_oloc;
+ struct ceph_object_id target_oid;
+ struct ceph_object_locator target_oloc;
+
+ struct ceph_pg pgid;
+ u32 pg_num;
+ u32 pg_num_mask;
+ struct ceph_osds acting;
+ struct ceph_osds up;
+ int size;
+ int min_size;
+ bool sort_bitwise;
+
+ unsigned int flags; /* CEPH_OSD_FLAG_* */
+ bool paused;
+
+ int osd;
+};
+
/* an in-flight request */
struct ceph_osd_request {
u64 r_tid; /* unique for this client */
struct rb_node r_node;
- struct list_head r_req_lru_item;
- struct list_head r_osd_item;
- struct list_head r_linger_item;
- struct list_head r_linger_osd_item;
+ struct rb_node r_mc_node; /* map check */
struct ceph_osd *r_osd;
- struct ceph_pg r_pgid;
- int r_pg_osds[CEPH_PG_MAX_SIZE];
- int r_num_pg_osds;
+
+ struct ceph_osd_request_target r_t;
+#define r_base_oid r_t.base_oid
+#define r_base_oloc r_t.base_oloc
+#define r_flags r_t.flags
struct ceph_msg *r_request, *r_reply;
- int r_flags; /* any additional flags for the osd */
u32 r_sent; /* >0 if r_request is sending/sent */
/* request osd ops array */
unsigned int r_num_ops;
- /* these are updated on each send */
- __le32 *r_request_osdmap_epoch;
- __le32 *r_request_flags;
- __le64 *r_request_pool;
- void *r_request_pgid;
- __le32 *r_request_attempts;
- bool r_paused;
- struct ceph_eversion *r_request_reassert_version;
-
int r_result;
- int r_got_reply;
- int r_linger;
+ bool r_got_reply;
struct ceph_osd_client *r_osdc;
struct kref r_kref;
bool r_mempool;
- struct completion r_completion, r_safe_completion;
+ struct completion r_completion;
+ struct completion r_safe_completion; /* fsync waiter */
ceph_osdc_callback_t r_callback;
ceph_osdc_unsafe_callback_t r_unsafe_callback;
- struct ceph_eversion r_reassert_version;
struct list_head r_unsafe_item;
struct inode *r_inode; /* for use by callbacks */
void *r_priv; /* ditto */
- struct ceph_object_locator r_base_oloc;
- struct ceph_object_id r_base_oid;
- struct ceph_object_locator r_target_oloc;
- struct ceph_object_id r_target_oid;
-
- u64 r_snapid;
- unsigned long r_stamp; /* send OR check time */
+ /* set by submitter */
+ u64 r_snapid; /* for reads, CEPH_NOSNAP o/w */
+ struct ceph_snap_context *r_snapc; /* for writes */
+ struct timespec r_mtime; /* ditto */
+ u64 r_data_offset; /* ditto */
+ bool r_linger; /* don't resend on failure */
- struct ceph_snap_context *r_snapc; /* snap context for writes */
+ /* internal */
+ unsigned long r_stamp; /* jiffies, send or check time */
+ int r_attempts;
+ struct ceph_eversion r_replay_version; /* aka reassert_version */
+ u32 r_last_force_resend;
+ u32 r_map_dne_bound;
struct ceph_osd_req_op r_ops[];
};
@@ -183,44 +201,70 @@ struct ceph_request_redirect {
struct ceph_object_locator oloc;
};
-struct ceph_osd_event {
- u64 cookie;
- int one_shot;
+typedef void (*rados_watchcb2_t)(void *arg, u64 notify_id, u64 cookie,
+ u64 notifier_id, void *data, size_t data_len);
+typedef void (*rados_watcherrcb_t)(void *arg, u64 cookie, int err);
+
+struct ceph_osd_linger_request {
struct ceph_osd_client *osdc;
- void (*cb)(u64, u64, u8, void *);
- void *data;
- struct rb_node node;
- struct list_head osd_node;
+ u64 linger_id;
+ bool committed;
+ bool is_watch; /* watch or notify */
+
+ struct ceph_osd *osd;
+ struct ceph_osd_request *reg_req;
+ struct ceph_osd_request *ping_req;
+ unsigned long ping_sent;
+ unsigned long watch_valid_thru;
+ struct list_head pending_lworks;
+
+ struct ceph_osd_request_target t;
+ u32 last_force_resend;
+ u32 map_dne_bound;
+
+ struct timespec mtime;
+
struct kref kref;
-};
+ struct mutex lock;
+ struct rb_node node; /* osd */
+ struct rb_node osdc_node; /* osdc */
+ struct rb_node mc_node; /* map check */
+ struct list_head scan_item;
+
+ struct completion reg_commit_wait;
+ struct completion notify_finish_wait;
+ int reg_commit_error;
+ int notify_finish_error;
+ int last_error;
+
+ u32 register_gen;
+ u64 notify_id;
+
+ rados_watchcb2_t wcb;
+ rados_watcherrcb_t errcb;
+ void *data;
-struct ceph_osd_event_work {
- struct work_struct work;
- struct ceph_osd_event *event;
- u64 ver;
- u64 notify_id;
- u8 opcode;
+ struct page ***preply_pages;
+ size_t *preply_len;
};
struct ceph_osd_client {
struct ceph_client *client;
struct ceph_osdmap *osdmap; /* current map */
- struct rw_semaphore map_sem;
- struct completion map_waiters;
- u64 last_requested_map;
+ struct rw_semaphore lock;
- struct mutex request_mutex;
struct rb_root osds; /* osds */
struct list_head osd_lru; /* idle osds */
- u64 timeout_tid; /* tid of timeout triggering rq */
- u64 last_tid; /* tid of last request */
- struct rb_root requests; /* pending requests */
- struct list_head req_lru; /* in-flight lru */
- struct list_head req_unsent; /* unsent/need-resend queue */
- struct list_head req_notarget; /* map to no osd */
- struct list_head req_linger; /* lingering requests */
- int num_requests;
+ spinlock_t osd_lru_lock;
+ struct ceph_osd homeless_osd;
+ atomic64_t last_tid; /* tid of last request */
+ u64 last_linger_id;
+ struct rb_root linger_requests; /* lingering requests */
+ struct rb_root map_checks;
+ struct rb_root linger_map_checks;
+ atomic_t num_requests;
+ atomic_t num_homeless;
struct delayed_work timeout_work;
struct delayed_work osds_timeout_work;
#ifdef CONFIG_DEBUG_FS
@@ -232,13 +276,14 @@ struct ceph_osd_client {
struct ceph_msgpool msgpool_op;
struct ceph_msgpool msgpool_op_reply;
- spinlock_t event_lock;
- struct rb_root event_tree;
- u64 event_count;
-
struct workqueue_struct *notify_wq;
};
+static inline bool ceph_osdmap_flag(struct ceph_osd_client *osdc, int flag)
+{
+ return osdc->osdmap->flags & flag;
+}
+
extern int ceph_osdc_setup(void);
extern void ceph_osdc_cleanup(void);
@@ -272,9 +317,6 @@ extern void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req,
extern struct ceph_osd_data *osd_req_op_extent_osd_data(
struct ceph_osd_request *osd_req,
unsigned int which);
-extern struct ceph_osd_data *osd_req_op_cls_response_data(
- struct ceph_osd_request *osd_req,
- unsigned int which);
extern void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *,
unsigned int which,
@@ -310,9 +352,6 @@ extern void osd_req_op_cls_init(struct ceph_osd_request *osd_req,
extern int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
u16 opcode, const char *name, const void *value,
size_t size, u8 cmp_op, u8 cmp_mode);
-extern void osd_req_op_watch_init(struct ceph_osd_request *osd_req,
- unsigned int which, u16 opcode,
- u64 cookie, u64 version, int flag);
extern void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
unsigned int which,
u64 expected_object_size,
@@ -323,11 +362,7 @@ extern struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *
unsigned int num_ops,
bool use_mempool,
gfp_t gfp_flags);
-
-extern void ceph_osdc_build_request(struct ceph_osd_request *req, u64 off,
- struct ceph_snap_context *snapc,
- u64 snap_id,
- struct timespec *mtime);
+int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp);
extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *,
struct ceph_file_layout *layout,
@@ -339,9 +374,6 @@ extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *,
u32 truncate_seq, u64 truncate_size,
bool use_mempool);
-extern void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc,
- struct ceph_osd_request *req);
-
extern void ceph_osdc_get_request(struct ceph_osd_request *req);
extern void ceph_osdc_put_request(struct ceph_osd_request *req);
@@ -354,6 +386,7 @@ extern int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
extern void ceph_osdc_sync(struct ceph_osd_client *osdc);
extern void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc);
+void ceph_osdc_maybe_request_map(struct ceph_osd_client *osdc);
extern int ceph_osdc_readpages(struct ceph_osd_client *osdc,
struct ceph_vino vino,
@@ -372,11 +405,33 @@ extern int ceph_osdc_writepages(struct ceph_osd_client *osdc,
struct timespec *mtime,
struct page **pages, int nr_pages);
-/* watch/notify events */
-extern int ceph_osdc_create_event(struct ceph_osd_client *osdc,
- void (*event_cb)(u64, u64, u8, void *),
- void *data, struct ceph_osd_event **pevent);
-extern void ceph_osdc_cancel_event(struct ceph_osd_event *event);
-extern void ceph_osdc_put_event(struct ceph_osd_event *event);
+/* watch/notify */
+struct ceph_osd_linger_request *
+ceph_osdc_watch(struct ceph_osd_client *osdc,
+ struct ceph_object_id *oid,
+ struct ceph_object_locator *oloc,
+ rados_watchcb2_t wcb,
+ rados_watcherrcb_t errcb,
+ void *data);
+int ceph_osdc_unwatch(struct ceph_osd_client *osdc,
+ struct ceph_osd_linger_request *lreq);
+
+int ceph_osdc_notify_ack(struct ceph_osd_client *osdc,
+ struct ceph_object_id *oid,
+ struct ceph_object_locator *oloc,
+ u64 notify_id,
+ u64 cookie,
+ void *payload,
+ size_t payload_len);
+int ceph_osdc_notify(struct ceph_osd_client *osdc,
+ struct ceph_object_id *oid,
+ struct ceph_object_locator *oloc,
+ void *payload,
+ size_t payload_len,
+ u32 timeout,
+ struct page ***preply_pages,
+ size_t *preply_len);
+int ceph_osdc_watch_check(struct ceph_osd_client *osdc,
+ struct ceph_osd_linger_request *lreq);
#endif
diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h
index e55c08bc3a96..9ccf4dbe55f8 100644
--- a/include/linux/ceph/osdmap.h
+++ b/include/linux/ceph/osdmap.h
@@ -24,21 +24,29 @@ struct ceph_pg {
uint32_t seed;
};
-#define CEPH_POOL_FLAG_HASHPSPOOL 1
+int ceph_pg_compare(const struct ceph_pg *lhs, const struct ceph_pg *rhs);
+
+#define CEPH_POOL_FLAG_HASHPSPOOL (1ULL << 0) /* hash pg seed and pool id
+ together */
+#define CEPH_POOL_FLAG_FULL (1ULL << 1) /* pool is full */
struct ceph_pg_pool_info {
struct rb_node node;
s64 id;
- u8 type;
+ u8 type; /* CEPH_POOL_TYPE_* */
u8 size;
+ u8 min_size;
u8 crush_ruleset;
u8 object_hash;
+ u32 last_force_request_resend;
u32 pg_num, pgp_num;
int pg_num_mask, pgp_num_mask;
s64 read_tier;
s64 write_tier; /* wins for read+write ops */
- u64 flags;
+ u64 flags; /* CEPH_POOL_FLAG_* */
char *name;
+
+ bool was_full; /* for handle_one_map() */
};
static inline bool ceph_can_shift_osds(struct ceph_pg_pool_info *pool)
@@ -57,6 +65,22 @@ struct ceph_object_locator {
s64 pool;
};
+static inline void ceph_oloc_init(struct ceph_object_locator *oloc)
+{
+ oloc->pool = -1;
+}
+
+static inline bool ceph_oloc_empty(const struct ceph_object_locator *oloc)
+{
+ return oloc->pool == -1;
+}
+
+static inline void ceph_oloc_copy(struct ceph_object_locator *dest,
+ const struct ceph_object_locator *src)
+{
+ dest->pool = src->pool;
+}
+
/*
* Maximum supported by kernel client object name length
*
@@ -64,11 +88,47 @@ struct ceph_object_locator {
*/
#define CEPH_MAX_OID_NAME_LEN 100
+/*
+ * 51-char inline_name is long enough for all cephfs and all but one
+ * rbd requests: <imgname> in "<imgname>.rbd"/"rbd_id.<imgname>" can be
+ * arbitrarily long (~PAGE_SIZE). It's done once during rbd map; all
+ * other rbd requests fit into inline_name.
+ *
+ * Makes ceph_object_id 64 bytes on 64-bit.
+ */
+#define CEPH_OID_INLINE_LEN 52
+
+/*
+ * Both inline and external buffers have space for a NUL-terminator,
+ * which is carried around. It's not required though - RADOS object
+ * names don't have to be NUL-terminated and may contain NULs.
+ */
struct ceph_object_id {
- char name[CEPH_MAX_OID_NAME_LEN];
+ char *name;
+ char inline_name[CEPH_OID_INLINE_LEN];
int name_len;
};
+static inline void ceph_oid_init(struct ceph_object_id *oid)
+{
+ oid->name = oid->inline_name;
+ oid->name_len = 0;
+}
+
+static inline bool ceph_oid_empty(const struct ceph_object_id *oid)
+{
+ return oid->name == oid->inline_name && !oid->name_len;
+}
+
+void ceph_oid_copy(struct ceph_object_id *dest,
+ const struct ceph_object_id *src);
+__printf(2, 3)
+void ceph_oid_printf(struct ceph_object_id *oid, const char *fmt, ...);
+__printf(3, 4)
+int ceph_oid_aprintf(struct ceph_object_id *oid, gfp_t gfp,
+ const char *fmt, ...);
+void ceph_oid_destroy(struct ceph_object_id *oid);
+
struct ceph_pg_mapping {
struct rb_node node;
struct ceph_pg pgid;
@@ -87,7 +147,6 @@ struct ceph_pg_mapping {
struct ceph_osdmap {
struct ceph_fsid fsid;
u32 epoch;
- u32 mkfs_epoch;
struct ceph_timespec created, modified;
u32 flags; /* CEPH_OSDMAP_* */
@@ -113,52 +172,23 @@ struct ceph_osdmap {
int crush_scratch_ary[CEPH_PG_MAX_SIZE * 3];
};
-static inline void ceph_oid_set_name(struct ceph_object_id *oid,
- const char *name)
-{
- int len;
-
- len = strlen(name);
- if (len > sizeof(oid->name)) {
- WARN(1, "ceph_oid_set_name '%s' len %d vs %zu, truncating\n",
- name, len, sizeof(oid->name));
- len = sizeof(oid->name);
- }
-
- memcpy(oid->name, name, len);
- oid->name_len = len;
-}
-
-static inline void ceph_oid_copy(struct ceph_object_id *dest,
- struct ceph_object_id *src)
-{
- BUG_ON(src->name_len > sizeof(dest->name));
- memcpy(dest->name, src->name, src->name_len);
- dest->name_len = src->name_len;
-}
-
-static inline int ceph_osd_exists(struct ceph_osdmap *map, int osd)
+static inline bool ceph_osd_exists(struct ceph_osdmap *map, int osd)
{
return osd >= 0 && osd < map->max_osd &&
(map->osd_state[osd] & CEPH_OSD_EXISTS);
}
-static inline int ceph_osd_is_up(struct ceph_osdmap *map, int osd)
+static inline bool ceph_osd_is_up(struct ceph_osdmap *map, int osd)
{
return ceph_osd_exists(map, osd) &&
(map->osd_state[osd] & CEPH_OSD_UP);
}
-static inline int ceph_osd_is_down(struct ceph_osdmap *map, int osd)
+static inline bool ceph_osd_is_down(struct ceph_osdmap *map, int osd)
{
return !ceph_osd_is_up(map, osd);
}
-static inline bool ceph_osdmap_flag(struct ceph_osdmap *map, int flag)
-{
- return map && (map->flags & flag);
-}
-
extern char *ceph_osdmap_state_str(char *str, int len, int state);
extern u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd);
@@ -192,28 +222,59 @@ static inline int ceph_decode_pgid(void **p, void *end, struct ceph_pg *pgid)
return 0;
}
+struct ceph_osdmap *ceph_osdmap_alloc(void);
extern struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end);
-extern struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
- struct ceph_osdmap *map,
- struct ceph_messenger *msgr);
+struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
+ struct ceph_osdmap *map);
extern void ceph_osdmap_destroy(struct ceph_osdmap *map);
+struct ceph_osds {
+ int osds[CEPH_PG_MAX_SIZE];
+ int size;
+ int primary; /* id, NOT index */
+};
+
+static inline void ceph_osds_init(struct ceph_osds *set)
+{
+ set->size = 0;
+ set->primary = -1;
+}
+
+void ceph_osds_copy(struct ceph_osds *dest, const struct ceph_osds *src);
+
+bool ceph_is_new_interval(const struct ceph_osds *old_acting,
+ const struct ceph_osds *new_acting,
+ const struct ceph_osds *old_up,
+ const struct ceph_osds *new_up,
+ int old_size,
+ int new_size,
+ int old_min_size,
+ int new_min_size,
+ u32 old_pg_num,
+ u32 new_pg_num,
+ bool old_sort_bitwise,
+ bool new_sort_bitwise,
+ const struct ceph_pg *pgid);
+bool ceph_osds_changed(const struct ceph_osds *old_acting,
+ const struct ceph_osds *new_acting,
+ bool any_change);
+
/* calculate mapping of a file extent to an object */
extern int ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
u64 off, u64 len,
u64 *bno, u64 *oxoff, u64 *oxlen);
-/* calculate mapping of object to a placement group */
-extern int ceph_oloc_oid_to_pg(struct ceph_osdmap *osdmap,
- struct ceph_object_locator *oloc,
- struct ceph_object_id *oid,
- struct ceph_pg *pg_out);
-
-extern int ceph_calc_pg_acting(struct ceph_osdmap *osdmap,
- struct ceph_pg pgid,
- int *osds, int *primary);
-extern int ceph_calc_pg_primary(struct ceph_osdmap *osdmap,
- struct ceph_pg pgid);
+int ceph_object_locator_to_pg(struct ceph_osdmap *osdmap,
+ struct ceph_object_id *oid,
+ struct ceph_object_locator *oloc,
+ struct ceph_pg *raw_pgid);
+
+void ceph_pg_to_up_acting_osds(struct ceph_osdmap *osdmap,
+ const struct ceph_pg *raw_pgid,
+ struct ceph_osds *up,
+ struct ceph_osds *acting);
+int ceph_pg_to_acting_primary(struct ceph_osdmap *osdmap,
+ const struct ceph_pg *raw_pgid);
extern struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map,
u64 id);
diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h
index 2f822dca1046..5c0da61cb763 100644
--- a/include/linux/ceph/rados.h
+++ b/include/linux/ceph/rados.h
@@ -114,8 +114,8 @@ struct ceph_object_layout {
* compound epoch+version, used by storage layer to serialize mutations
*/
struct ceph_eversion {
- __le32 epoch;
__le64 version;
+ __le32 epoch;
} __attribute__ ((packed));
/*
@@ -153,6 +153,11 @@ extern const char *ceph_osd_state_name(int s);
#define CEPH_OSDMAP_NOIN (1<<8) /* block osd auto mark-in */
#define CEPH_OSDMAP_NOBACKFILL (1<<9) /* block osd backfill */
#define CEPH_OSDMAP_NORECOVER (1<<10) /* block osd recovery and backfill */
+#define CEPH_OSDMAP_NOSCRUB (1<<11) /* block periodic scrub */
+#define CEPH_OSDMAP_NODEEP_SCRUB (1<<12) /* block periodic deep-scrub */
+#define CEPH_OSDMAP_NOTIERAGENT (1<<13) /* disable tiering agent */
+#define CEPH_OSDMAP_NOREBALANCE (1<<14) /* block osd backfill unless pg is degraded */
+#define CEPH_OSDMAP_SORTBITWISE (1<<15) /* use bitwise hobject_t sort */
/*
* The error code to return when an OSD can't handle a write
@@ -389,6 +394,13 @@ enum {
CEPH_OSD_FLAG_SKIPRWLOCKS = 0x10000, /* skip rw locks */
CEPH_OSD_FLAG_IGNORE_OVERLAY = 0x20000, /* ignore pool overlay */
CEPH_OSD_FLAG_FLUSH = 0x40000, /* this is part of flush */
+ CEPH_OSD_FLAG_MAP_SNAP_CLONE = 0x80000, /* map snap direct to clone id */
+ CEPH_OSD_FLAG_ENFORCE_SNAPC = 0x100000, /* use snapc provided even if
+ pool uses pool snaps */
+ CEPH_OSD_FLAG_REDIRECTED = 0x200000, /* op has been redirected */
+ CEPH_OSD_FLAG_KNOWN_REDIR = 0x400000, /* redirect bit is authoritative */
+ CEPH_OSD_FLAG_FULL_TRY = 0x800000, /* try op despite full flag */
+ CEPH_OSD_FLAG_FULL_FORCE = 0x1000000, /* force op despite full flag */
};
enum {
@@ -415,7 +427,17 @@ enum {
CEPH_OSD_CMPXATTR_MODE_U64 = 2
};
-#define RADOS_NOTIFY_VER 1
+enum {
+ CEPH_OSD_WATCH_OP_UNWATCH = 0,
+ CEPH_OSD_WATCH_OP_LEGACY_WATCH = 1,
+ /* note: use only ODD ids to prevent pre-giant code from
+ interpreting the op as UNWATCH */
+ CEPH_OSD_WATCH_OP_WATCH = 3,
+ CEPH_OSD_WATCH_OP_RECONNECT = 5,
+ CEPH_OSD_WATCH_OP_PING = 7,
+};
+
+const char *ceph_osd_watch_op_name(int o);
/*
* an individual object operation. each may be accompanied by some data
@@ -450,10 +472,14 @@ struct ceph_osd_op {
} __attribute__ ((packed)) snap;
struct {
__le64 cookie;
- __le64 ver;
- __u8 flag; /* 0 = unwatch, 1 = watch */
+ __le64 ver; /* no longer used */
+ __u8 op; /* CEPH_OSD_WATCH_OP_* */
+ __le32 gen; /* registration generation */
} __attribute__ ((packed)) watch;
struct {
+ __le64 cookie;
+ } __attribute__ ((packed)) notify;
+ struct {
__le64 offset, length;
__le64 src_offset;
} __attribute__ ((packed)) clonerange;
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 3e39ae5bc799..5b17de62c962 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -444,6 +444,7 @@ struct cgroup_subsys {
int (*can_attach)(struct cgroup_taskset *tset);
void (*cancel_attach)(struct cgroup_taskset *tset);
void (*attach)(struct cgroup_taskset *tset);
+ void (*post_attach)(void);
int (*can_fork)(struct task_struct *task);
void (*cancel_fork)(struct task_struct *task);
void (*fork)(struct task_struct *task);
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index da95258127aa..0c72204c75fc 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -32,6 +32,7 @@
#define CLK_GET_ACCURACY_NOCACHE BIT(8) /* do not use the cached clk accuracy */
#define CLK_RECALC_NEW_RATES BIT(9) /* recalc rates after notifications */
#define CLK_SET_RATE_UNGATE BIT(10) /* clock needs to run to set rate */
+#define CLK_IS_CRITICAL BIT(11) /* do not gate, ever */
struct clk;
struct clk_hw;
@@ -282,10 +283,17 @@ extern const struct clk_ops clk_fixed_rate_ops;
struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
unsigned long fixed_rate);
+struct clk_hw *clk_hw_register_fixed_rate(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ unsigned long fixed_rate);
struct clk *clk_register_fixed_rate_with_accuracy(struct device *dev,
const char *name, const char *parent_name, unsigned long flags,
unsigned long fixed_rate, unsigned long fixed_accuracy);
void clk_unregister_fixed_rate(struct clk *clk);
+struct clk_hw *clk_hw_register_fixed_rate_with_accuracy(struct device *dev,
+ const char *name, const char *parent_name, unsigned long flags,
+ unsigned long fixed_rate, unsigned long fixed_accuracy);
+
void of_fixed_clk_setup(struct device_node *np);
/**
@@ -326,7 +334,12 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 bit_idx,
u8 clk_gate_flags, spinlock_t *lock);
+struct clk_hw *clk_hw_register_gate(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ void __iomem *reg, u8 bit_idx,
+ u8 clk_gate_flags, spinlock_t *lock);
void clk_unregister_gate(struct clk *clk);
+void clk_hw_unregister_gate(struct clk_hw *hw);
struct clk_div_table {
unsigned int val;
@@ -407,12 +420,22 @@ struct clk *clk_register_divider(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
u8 clk_divider_flags, spinlock_t *lock);
+struct clk_hw *clk_hw_register_divider(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ void __iomem *reg, u8 shift, u8 width,
+ u8 clk_divider_flags, spinlock_t *lock);
struct clk *clk_register_divider_table(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
u8 clk_divider_flags, const struct clk_div_table *table,
spinlock_t *lock);
+struct clk_hw *clk_hw_register_divider_table(struct device *dev,
+ const char *name, const char *parent_name, unsigned long flags,
+ void __iomem *reg, u8 shift, u8 width,
+ u8 clk_divider_flags, const struct clk_div_table *table,
+ spinlock_t *lock);
void clk_unregister_divider(struct clk *clk);
+void clk_hw_unregister_divider(struct clk_hw *hw);
/**
* struct clk_mux - multiplexer clock
@@ -463,14 +486,25 @@ struct clk *clk_register_mux(struct device *dev, const char *name,
unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
u8 clk_mux_flags, spinlock_t *lock);
+struct clk_hw *clk_hw_register_mux(struct device *dev, const char *name,
+ const char * const *parent_names, u8 num_parents,
+ unsigned long flags,
+ void __iomem *reg, u8 shift, u8 width,
+ u8 clk_mux_flags, spinlock_t *lock);
struct clk *clk_register_mux_table(struct device *dev, const char *name,
const char * const *parent_names, u8 num_parents,
unsigned long flags,
void __iomem *reg, u8 shift, u32 mask,
u8 clk_mux_flags, u32 *table, spinlock_t *lock);
+struct clk_hw *clk_hw_register_mux_table(struct device *dev, const char *name,
+ const char * const *parent_names, u8 num_parents,
+ unsigned long flags,
+ void __iomem *reg, u8 shift, u32 mask,
+ u8 clk_mux_flags, u32 *table, spinlock_t *lock);
void clk_unregister_mux(struct clk *clk);
+void clk_hw_unregister_mux(struct clk_hw *hw);
void of_fixed_factor_clk_setup(struct device_node *node);
@@ -499,6 +533,10 @@ struct clk *clk_register_fixed_factor(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
unsigned int mult, unsigned int div);
void clk_unregister_fixed_factor(struct clk *clk);
+struct clk_hw *clk_hw_register_fixed_factor(struct device *dev,
+ const char *name, const char *parent_name, unsigned long flags,
+ unsigned int mult, unsigned int div);
+void clk_hw_unregister_fixed_factor(struct clk_hw *hw);
/**
* struct clk_fractional_divider - adjustable fractional divider clock
@@ -533,6 +571,11 @@ struct clk *clk_register_fractional_divider(struct device *dev,
const char *name, const char *parent_name, unsigned long flags,
void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth,
u8 clk_divider_flags, spinlock_t *lock);
+struct clk_hw *clk_hw_register_fractional_divider(struct device *dev,
+ const char *name, const char *parent_name, unsigned long flags,
+ void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth,
+ u8 clk_divider_flags, spinlock_t *lock);
+void clk_hw_unregister_fractional_divider(struct clk_hw *hw);
/**
* struct clk_multiplier - adjustable multiplier clock
@@ -603,6 +646,14 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
unsigned long flags);
+void clk_unregister_composite(struct clk *clk);
+struct clk_hw *clk_hw_register_composite(struct device *dev, const char *name,
+ const char * const *parent_names, int num_parents,
+ struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
+ struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
+ struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
+ unsigned long flags);
+void clk_hw_unregister_composite(struct clk_hw *hw);
/***
* struct clk_gpio_gate - gpio gated clock
@@ -625,6 +676,10 @@ extern const struct clk_ops clk_gpio_gate_ops;
struct clk *clk_register_gpio_gate(struct device *dev, const char *name,
const char *parent_name, unsigned gpio, bool active_low,
unsigned long flags);
+struct clk_hw *clk_hw_register_gpio_gate(struct device *dev, const char *name,
+ const char *parent_name, unsigned gpio, bool active_low,
+ unsigned long flags);
+void clk_hw_unregister_gpio_gate(struct clk_hw *hw);
/**
* struct clk_gpio_mux - gpio controlled clock multiplexer
@@ -640,6 +695,10 @@ extern const struct clk_ops clk_gpio_mux_ops;
struct clk *clk_register_gpio_mux(struct device *dev, const char *name,
const char * const *parent_names, u8 num_parents, unsigned gpio,
bool active_low, unsigned long flags);
+struct clk_hw *clk_hw_register_gpio_mux(struct device *dev, const char *name,
+ const char * const *parent_names, u8 num_parents, unsigned gpio,
+ bool active_low, unsigned long flags);
+void clk_hw_unregister_gpio_mux(struct clk_hw *hw);
/**
* clk_register - allocate a new clock, register it and return an opaque cookie
@@ -655,9 +714,15 @@ struct clk *clk_register_gpio_mux(struct device *dev, const char *name,
struct clk *clk_register(struct device *dev, struct clk_hw *hw);
struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw);
+int __must_check clk_hw_register(struct device *dev, struct clk_hw *hw);
+int __must_check devm_clk_hw_register(struct device *dev, struct clk_hw *hw);
+
void clk_unregister(struct clk *clk);
void devm_clk_unregister(struct device *dev, struct clk *clk);
+void clk_hw_unregister(struct clk_hw *hw);
+void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw);
+
/* helper functions */
const char *__clk_get_name(const struct clk *clk);
const char *clk_hw_get_name(const struct clk_hw *hw);
@@ -703,6 +768,11 @@ struct clk_onecell_data {
unsigned int clk_num;
};
+struct clk_hw_onecell_data {
+ size_t num;
+ struct clk_hw *hws[];
+};
+
extern struct of_device_id __clk_of_table;
#define CLK_OF_DECLARE(name, compat, fn) OF_DECLARE_1(clk, name, compat, fn)
@@ -712,15 +782,24 @@ int of_clk_add_provider(struct device_node *np,
struct clk *(*clk_src_get)(struct of_phandle_args *args,
void *data),
void *data);
+int of_clk_add_hw_provider(struct device_node *np,
+ struct clk_hw *(*get)(struct of_phandle_args *clkspec,
+ void *data),
+ void *data);
void of_clk_del_provider(struct device_node *np);
struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
void *data);
+struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec,
+ void *data);
struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data);
+struct clk_hw *of_clk_hw_onecell_get(struct of_phandle_args *clkspec,
+ void *data);
unsigned int of_clk_get_parent_count(struct device_node *np);
int of_clk_parent_fill(struct device_node *np, const char **parents,
unsigned int size);
const char *of_clk_get_parent_name(struct device_node *np, int index);
-
+int of_clk_detect_critical(struct device_node *np, int index,
+ unsigned long *flags);
void of_clk_init(const struct of_device_id *matches);
#else /* !CONFIG_OF */
@@ -732,17 +811,34 @@ static inline int of_clk_add_provider(struct device_node *np,
{
return 0;
}
+static inline int of_clk_add_hw_provider(struct device_node *np,
+ struct clk_hw *(*get)(struct of_phandle_args *clkspec,
+ void *data),
+ void *data)
+{
+ return 0;
+}
static inline void of_clk_del_provider(struct device_node *np) {}
static inline struct clk *of_clk_src_simple_get(
struct of_phandle_args *clkspec, void *data)
{
return ERR_PTR(-ENOENT);
}
+static inline struct clk_hw *
+of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data)
+{
+ return ERR_PTR(-ENOENT);
+}
static inline struct clk *of_clk_src_onecell_get(
struct of_phandle_args *clkspec, void *data)
{
return ERR_PTR(-ENOENT);
}
+static inline struct clk_hw *
+of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data)
+{
+ return ERR_PTR(-ENOENT);
+}
static inline int of_clk_get_parent_count(struct device_node *np)
{
return 0;
@@ -757,6 +853,11 @@ static inline const char *of_clk_get_parent_name(struct device_node *np,
{
return NULL;
}
+static inline int of_clk_detect_critical(struct device_node *np, int index,
+ unsigned long *flags)
+{
+ return 0;
+}
static inline void of_clk_init(const struct of_device_id *matches) {}
#endif /* CONFIG_OF */
diff --git a/include/linux/clk/renesas.h b/include/linux/clk/renesas.h
index 7adfd80fbf55..ba6fa4148515 100644
--- a/include/linux/clk/renesas.h
+++ b/include/linux/clk/renesas.h
@@ -24,12 +24,20 @@ void r8a7778_clocks_init(u32 mode);
void r8a7779_clocks_init(u32 mode);
void rcar_gen2_clocks_init(u32 mode);
-#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
void cpg_mstp_add_clk_domain(struct device_node *np);
-int cpg_mstp_attach_dev(struct generic_pm_domain *domain, struct device *dev);
-void cpg_mstp_detach_dev(struct generic_pm_domain *domain, struct device *dev);
+#ifdef CONFIG_CLK_RENESAS_CPG_MSTP
+int cpg_mstp_attach_dev(struct generic_pm_domain *unused, struct device *dev);
+void cpg_mstp_detach_dev(struct generic_pm_domain *unused, struct device *dev);
#else
-static inline void cpg_mstp_add_clk_domain(struct device_node *np) {}
+#define cpg_mstp_attach_dev NULL
+#define cpg_mstp_detach_dev NULL
#endif
+#ifdef CONFIG_CLK_RENESAS_CPG_MSSR
+int cpg_mssr_attach_dev(struct generic_pm_domain *unused, struct device *dev);
+void cpg_mssr_detach_dev(struct generic_pm_domain *unused, struct device *dev);
+#else
+#define cpg_mssr_attach_dev NULL
+#define cpg_mssr_detach_dev NULL
+#endif
#endif
diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h
index 57bf7aab4516..7007a5f48080 100644
--- a/include/linux/clk/tegra.h
+++ b/include/linux/clk/tegra.h
@@ -121,4 +121,9 @@ static inline void tegra_cpu_clock_resume(void)
}
#endif
+extern void tegra210_xusb_pll_hw_control_enable(void);
+extern void tegra210_xusb_pll_hw_sequence_start(void);
+extern void tegra210_sata_pll_hw_control_enable(void);
+extern void tegra210_sata_pll_hw_sequence_start(void);
+
#endif /* __LINUX_CLK_TEGRA_H_ */
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h
index dc5164a6df29..6110fe09ed18 100644
--- a/include/linux/clk/ti.h
+++ b/include/linux/clk/ti.h
@@ -37,6 +37,7 @@
* @last_rounded_n: cache of the last N result of omap2_dpll_round_rate()
* @min_divider: minimum valid non-bypass divider value (actual)
* @max_divider: maximum valid non-bypass divider value (actual)
+ * @max_rate: maximum clock rate for the DPLL
* @modes: possible values of @enable_mask
* @autoidle_reg: register containing the DPLL autoidle mode bitfield
* @idlest_reg: register containing the DPLL idle status bitfield
@@ -81,6 +82,7 @@ struct dpll_data {
u8 last_rounded_n;
u8 min_divider;
u16 max_divider;
+ unsigned long max_rate;
u8 modes;
void __iomem *autoidle_reg;
void __iomem *idlest_reg;
diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h
index c2c04f7cbe8a..2eabc862abdb 100644
--- a/include/linux/clkdev.h
+++ b/include/linux/clkdev.h
@@ -15,6 +15,7 @@
#include <asm/clkdev.h>
struct clk;
+struct clk_hw;
struct device;
struct clk_lookup {
@@ -34,18 +35,22 @@ struct clk_lookup {
struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id,
const char *dev_fmt, ...) __printf(3, 4);
+struct clk_lookup *clkdev_hw_alloc(struct clk_hw *hw, const char *con_id,
+ const char *dev_fmt, ...) __printf(3, 4);
void clkdev_add(struct clk_lookup *cl);
void clkdev_drop(struct clk_lookup *cl);
struct clk_lookup *clkdev_create(struct clk *clk, const char *con_id,
const char *dev_fmt, ...) __printf(3, 4);
+struct clk_lookup *clkdev_hw_create(struct clk_hw *hw, const char *con_id,
+ const char *dev_fmt, ...) __printf(3, 4);
void clkdev_add_table(struct clk_lookup *, size_t);
int clk_add_alias(const char *, const char *, const char *, struct device *);
int clk_register_clkdev(struct clk *, const char *, const char *);
-int clk_register_clkdevs(struct clk *, struct clk_lookup *, size_t);
+int clk_hw_register_clkdev(struct clk_hw *, const char *, const char *);
#ifdef CONFIG_COMMON_CLK
int __clk_get(struct clk *clk);
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index a307bf62974f..44a1aff22566 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -15,6 +15,7 @@
#include <linux/cache.h>
#include <linux/timer.h>
#include <linux/init.h>
+#include <linux/of.h>
#include <asm/div64.h>
#include <asm/io.h>
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index d7c8de583a23..a58c852a268f 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -2,21 +2,46 @@
#define _LINUX_COMPACTION_H
/* Return values for compact_zone() and try_to_compact_pages() */
-/* compaction didn't start as it was deferred due to past failures */
-#define COMPACT_DEFERRED 0
-/* compaction didn't start as it was not possible or direct reclaim was more suitable */
-#define COMPACT_SKIPPED 1
-/* compaction should continue to another pageblock */
-#define COMPACT_CONTINUE 2
-/* direct compaction partially compacted a zone and there are suitable pages */
-#define COMPACT_PARTIAL 3
-/* The full zone was compacted */
-#define COMPACT_COMPLETE 4
-/* For more detailed tracepoint output */
-#define COMPACT_NO_SUITABLE_PAGE 5
-#define COMPACT_NOT_SUITABLE_ZONE 6
-#define COMPACT_CONTENDED 7
/* When adding new states, please adjust include/trace/events/compaction.h */
+enum compact_result {
+ /* For more detailed tracepoint output - internal to compaction */
+ COMPACT_NOT_SUITABLE_ZONE,
+ /*
+ * compaction didn't start as it was not possible or direct reclaim
+ * was more suitable
+ */
+ COMPACT_SKIPPED,
+ /* compaction didn't start as it was deferred due to past failures */
+ COMPACT_DEFERRED,
+
+ /* compaction not active last round */
+ COMPACT_INACTIVE = COMPACT_DEFERRED,
+
+ /* For more detailed tracepoint output - internal to compaction */
+ COMPACT_NO_SUITABLE_PAGE,
+ /* compaction should continue to another pageblock */
+ COMPACT_CONTINUE,
+
+ /*
+ * The full zone was compacted scanned but wasn't successfull to compact
+ * suitable pages.
+ */
+ COMPACT_COMPLETE,
+ /*
+ * direct compaction has scanned part of the zone but wasn't successfull
+ * to compact suitable pages.
+ */
+ COMPACT_PARTIAL_SKIPPED,
+
+ /* compaction terminated prematurely due to lock contentions */
+ COMPACT_CONTENDED,
+
+ /*
+ * direct compaction partially compacted a zone and there might be
+ * suitable pages
+ */
+ COMPACT_PARTIAL,
+};
/* Used to signal whether compaction detected need_sched() or lock contention */
/* No contention detected */
@@ -38,13 +63,14 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
extern int sysctl_compact_unevictable_allowed;
extern int fragmentation_index(struct zone *zone, unsigned int order);
-extern unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
- int alloc_flags, const struct alloc_context *ac,
- enum migrate_mode mode, int *contended);
+extern enum compact_result try_to_compact_pages(gfp_t gfp_mask,
+ unsigned int order,
+ unsigned int alloc_flags, const struct alloc_context *ac,
+ enum migrate_mode mode, int *contended);
extern void compact_pgdat(pg_data_t *pgdat, int order);
extern void reset_isolation_suitable(pg_data_t *pgdat);
-extern unsigned long compaction_suitable(struct zone *zone, int order,
- int alloc_flags, int classzone_idx);
+extern enum compact_result compaction_suitable(struct zone *zone, int order,
+ unsigned int alloc_flags, int classzone_idx);
extern void defer_compaction(struct zone *zone, int order);
extern bool compaction_deferred(struct zone *zone, int order);
@@ -52,12 +78,80 @@ extern void compaction_defer_reset(struct zone *zone, int order,
bool alloc_success);
extern bool compaction_restarting(struct zone *zone, int order);
+/* Compaction has made some progress and retrying makes sense */
+static inline bool compaction_made_progress(enum compact_result result)
+{
+ /*
+ * Even though this might sound confusing this in fact tells us
+ * that the compaction successfully isolated and migrated some
+ * pageblocks.
+ */
+ if (result == COMPACT_PARTIAL)
+ return true;
+
+ return false;
+}
+
+/* Compaction has failed and it doesn't make much sense to keep retrying. */
+static inline bool compaction_failed(enum compact_result result)
+{
+ /* All zones were scanned completely and still not result. */
+ if (result == COMPACT_COMPLETE)
+ return true;
+
+ return false;
+}
+
+/*
+ * Compaction has backed off for some reason. It might be throttling or
+ * lock contention. Retrying is still worthwhile.
+ */
+static inline bool compaction_withdrawn(enum compact_result result)
+{
+ /*
+ * Compaction backed off due to watermark checks for order-0
+ * so the regular reclaim has to try harder and reclaim something.
+ */
+ if (result == COMPACT_SKIPPED)
+ return true;
+
+ /*
+ * If compaction is deferred for high-order allocations, it is
+ * because sync compaction recently failed. If this is the case
+ * and the caller requested a THP allocation, we do not want
+ * to heavily disrupt the system, so we fail the allocation
+ * instead of entering direct reclaim.
+ */
+ if (result == COMPACT_DEFERRED)
+ return true;
+
+ /*
+ * If compaction in async mode encounters contention or blocks higher
+ * priority task we back off early rather than cause stalls.
+ */
+ if (result == COMPACT_CONTENDED)
+ return true;
+
+ /*
+ * Page scanners have met but we haven't scanned full zones so this
+ * is a back off in fact.
+ */
+ if (result == COMPACT_PARTIAL_SKIPPED)
+ return true;
+
+ return false;
+}
+
+
+bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
+ int alloc_flags);
+
extern int kcompactd_run(int nid);
extern void kcompactd_stop(int nid);
extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx);
#else
-static inline unsigned long try_to_compact_pages(gfp_t gfp_mask,
+static inline enum compact_result try_to_compact_pages(gfp_t gfp_mask,
unsigned int order, int alloc_flags,
const struct alloc_context *ac,
enum migrate_mode mode, int *contended)
@@ -73,7 +167,7 @@ static inline void reset_isolation_suitable(pg_data_t *pgdat)
{
}
-static inline unsigned long compaction_suitable(struct zone *zone, int order,
+static inline enum compact_result compaction_suitable(struct zone *zone, int order,
int alloc_flags, int classzone_idx)
{
return COMPACT_SKIPPED;
@@ -88,6 +182,21 @@ static inline bool compaction_deferred(struct zone *zone, int order)
return true;
}
+static inline bool compaction_made_progress(enum compact_result result)
+{
+ return false;
+}
+
+static inline bool compaction_failed(enum compact_result result)
+{
+ return false;
+}
+
+static inline bool compaction_withdrawn(enum compact_result result)
+{
+ return true;
+}
+
static inline int kcompactd_run(int nid)
{
return 0;
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index eeae401a2412..e2949397c19b 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -142,6 +142,7 @@
#if GCC_VERSION >= 30400
#define __must_check __attribute__((warn_unused_result))
+#define __malloc __attribute__((__malloc__))
#endif
#if GCC_VERSION >= 40000
@@ -246,7 +247,7 @@
#define __HAVE_BUILTIN_BSWAP32__
#define __HAVE_BUILTIN_BSWAP64__
#endif
-#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600)
+#if GCC_VERSION >= 40800
#define __HAVE_BUILTIN_BSWAP16__
#endif
#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index b5ff9881bef8..793c0829e3a3 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -357,6 +357,10 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
#define __deprecated_for_modules
#endif
+#ifndef __malloc
+#define __malloc
+#endif
+
/*
* Allow us to avoid 'defined but not used' warnings on functions and data,
* as well as force them to be emitted to the assembly file.
diff --git a/include/linux/console.h b/include/linux/console.h
index ea731af2451e..98c8615dc300 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -47,7 +47,7 @@ struct consw {
int (*con_font_copy)(struct vc_data *, int);
int (*con_resize)(struct vc_data *, unsigned int, unsigned int,
unsigned int);
- int (*con_set_palette)(struct vc_data *, unsigned char *);
+ int (*con_set_palette)(struct vc_data *, const unsigned char *);
int (*con_scrolldelta)(struct vc_data *, int);
int (*con_set_origin)(struct vc_data *);
void (*con_save_screen)(struct vc_data *);
@@ -191,6 +191,8 @@ void vcs_remove_sysfs(int index);
#ifdef CONFIG_VGA_CONSOLE
extern bool vgacon_text_force(void);
+#else
+static inline bool vgacon_text_force(void) { return false; }
#endif
#endif /* _LINUX_CONSOLE_H */
diff --git a/include/linux/coresight-stm.h b/include/linux/coresight-stm.h
new file mode 100644
index 000000000000..a978bb85599a
--- /dev/null
+++ b/include/linux/coresight-stm.h
@@ -0,0 +1,6 @@
+#ifndef __LINUX_CORESIGHT_STM_H_
+#define __LINUX_CORESIGHT_STM_H_
+
+#include <uapi/linux/coresight-stm.h>
+
+#endif
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index f9b1fab4388a..21597dcac0e2 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -59,25 +59,7 @@ struct notifier_block;
* CPU notifier priorities.
*/
enum {
- /*
- * SCHED_ACTIVE marks a cpu which is coming up active during
- * CPU_ONLINE and CPU_DOWN_FAILED and must be the first
- * notifier. CPUSET_ACTIVE adjusts cpuset according to
- * cpu_active mask right after SCHED_ACTIVE. During
- * CPU_DOWN_PREPARE, SCHED_INACTIVE and CPUSET_INACTIVE are
- * ordered in the similar way.
- *
- * This ordering guarantees consistent cpu_active mask and
- * migration behavior to all cpu notifiers.
- */
- CPU_PRI_SCHED_ACTIVE = INT_MAX,
- CPU_PRI_CPUSET_ACTIVE = INT_MAX - 1,
- CPU_PRI_SCHED_INACTIVE = INT_MIN + 1,
- CPU_PRI_CPUSET_INACTIVE = INT_MIN,
-
- /* migration should happen before other stuff but after perf */
CPU_PRI_PERF = 20,
- CPU_PRI_MIGRATION = 10,
/* bring up workqueues before normal notifiers and down after */
CPU_PRI_WORKQUEUE_UP = 5,
diff --git a/include/linux/cpufreq-dt.h b/include/linux/cpufreq-dt.h
deleted file mode 100644
index 0414009e2c30..000000000000
--- a/include/linux/cpufreq-dt.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (C) 2014 Marvell
- * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __CPUFREQ_DT_H__
-#define __CPUFREQ_DT_H__
-
-struct cpufreq_dt_platform_data {
- /*
- * True when each CPU has its own clock to control its
- * frequency, false when all CPUs are controlled by a single
- * clock.
- */
- bool independent_clocks;
-};
-
-#endif /* __CPUFREQ_DT_H__ */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 718e8725de8a..4e81e08db752 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -102,6 +102,17 @@ struct cpufreq_policy {
*/
struct rw_semaphore rwsem;
+ /*
+ * Fast switch flags:
+ * - fast_switch_possible should be set by the driver if it can
+ * guarantee that frequency can be changed on any CPU sharing the
+ * policy and that the change will affect all of the policy CPUs then.
+ * - fast_switch_enabled is to be set by governors that support fast
+ * freqnency switching with the help of cpufreq_enable_fast_switch().
+ */
+ bool fast_switch_possible;
+ bool fast_switch_enabled;
+
/* Synchronization for frequency transitions */
bool transition_ongoing; /* Tracks transition status */
spinlock_t transition_lock;
@@ -156,6 +167,8 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
int cpufreq_update_policy(unsigned int cpu);
bool have_governor_per_policy(void);
struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
+void cpufreq_enable_fast_switch(struct cpufreq_policy *policy);
+void cpufreq_disable_fast_switch(struct cpufreq_policy *policy);
#else
static inline unsigned int cpufreq_get(unsigned int cpu)
{
@@ -236,6 +249,8 @@ struct cpufreq_driver {
unsigned int relation); /* Deprecated */
int (*target_index)(struct cpufreq_policy *policy,
unsigned int index);
+ unsigned int (*fast_switch)(struct cpufreq_policy *policy,
+ unsigned int target_freq);
/*
* Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION
* unset.
@@ -426,6 +441,20 @@ static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
#define CPUFREQ_POLICY_POWERSAVE (1)
#define CPUFREQ_POLICY_PERFORMANCE (2)
+/*
+ * The polling frequency depends on the capability of the processor. Default
+ * polling frequency is 1000 times the transition latency of the processor. The
+ * ondemand governor will work on any processor with transition latency <= 10ms,
+ * using appropriate sampling rate.
+ *
+ * For CPUs with transition latency > 10ms (mostly drivers with CPUFREQ_ETERNAL)
+ * the ondemand governor will not work. All times here are in us (microseconds).
+ */
+#define MIN_SAMPLING_RATE_RATIO (2)
+#define LATENCY_MULTIPLIER (1000)
+#define MIN_LATENCY_MULTIPLIER (20)
+#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
+
/* Governor Events */
#define CPUFREQ_GOV_START 1
#define CPUFREQ_GOV_STOP 2
@@ -450,6 +479,8 @@ struct cpufreq_governor {
};
/* Pass a target to the cpufreq driver */
+unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
+ unsigned int target_freq);
int cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation);
@@ -462,6 +493,29 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor);
struct cpufreq_governor *cpufreq_default_governor(void);
struct cpufreq_governor *cpufreq_fallback_governor(void);
+/* Governor attribute set */
+struct gov_attr_set {
+ struct kobject kobj;
+ struct list_head policy_list;
+ struct mutex update_lock;
+ int usage_count;
+};
+
+/* sysfs ops for cpufreq governors */
+extern const struct sysfs_ops governor_sysfs_ops;
+
+void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node);
+void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node);
+unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node);
+
+/* Governor sysfs attribute */
+struct governor_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct gov_attr_set *attr_set, char *buf);
+ ssize_t (*store)(struct gov_attr_set *attr_set, const char *buf,
+ size_t count);
+};
+
/*********************************************************************
* FREQUENCY TABLE HELPERS *
*********************************************************************/
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 5d68e15e46b7..386374d19987 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -8,6 +8,7 @@ enum cpuhp_state {
CPUHP_BRINGUP_CPU,
CPUHP_AP_IDLE_DEAD,
CPUHP_AP_OFFLINE,
+ CPUHP_AP_SCHED_STARTING,
CPUHP_AP_NOTIFY_STARTING,
CPUHP_AP_ONLINE,
CPUHP_TEARDOWN_CPU,
@@ -16,6 +17,7 @@ enum cpuhp_state {
CPUHP_AP_NOTIFY_ONLINE,
CPUHP_AP_ONLINE_DYN,
CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
+ CPUHP_AP_ACTIVE,
CPUHP_ONLINE,
};
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 40cee6b77a93..e828cf65d7df 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -743,12 +743,10 @@ set_cpu_present(unsigned int cpu, bool present)
static inline void
set_cpu_online(unsigned int cpu, bool online)
{
- if (online) {
+ if (online)
cpumask_set_cpu(cpu, &__cpu_online_mask);
- cpumask_set_cpu(cpu, &__cpu_active_mask);
- } else {
+ else
cpumask_clear_cpu(cpu, &__cpu_online_mask);
- }
}
static inline void
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index fea160ee5803..bfc204e70338 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -16,26 +16,26 @@
#ifdef CONFIG_CPUSETS
-extern struct static_key cpusets_enabled_key;
+extern struct static_key_false cpusets_enabled_key;
static inline bool cpusets_enabled(void)
{
- return static_key_false(&cpusets_enabled_key);
+ return static_branch_unlikely(&cpusets_enabled_key);
}
static inline int nr_cpusets(void)
{
/* jump label reference count + the top-level cpuset */
- return static_key_count(&cpusets_enabled_key) + 1;
+ return static_key_count(&cpusets_enabled_key.key) + 1;
}
static inline void cpuset_inc(void)
{
- static_key_slow_inc(&cpusets_enabled_key);
+ static_branch_inc(&cpusets_enabled_key);
}
static inline void cpuset_dec(void)
{
- static_key_slow_dec(&cpusets_enabled_key);
+ static_branch_dec(&cpusets_enabled_key);
}
extern int cpuset_init(void);
@@ -48,16 +48,25 @@ extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
void cpuset_init_current_mems_allowed(void);
int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
-extern int __cpuset_node_allowed(int node, gfp_t gfp_mask);
+extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask);
-static inline int cpuset_node_allowed(int node, gfp_t gfp_mask)
+static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
{
- return nr_cpusets() <= 1 || __cpuset_node_allowed(node, gfp_mask);
+ if (cpusets_enabled())
+ return __cpuset_node_allowed(node, gfp_mask);
+ return true;
}
-static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
+static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
{
- return cpuset_node_allowed(zone_to_nid(z), gfp_mask);
+ return __cpuset_node_allowed(zone_to_nid(z), gfp_mask);
+}
+
+static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
+{
+ if (cpusets_enabled())
+ return __cpuset_zone_allowed(z, gfp_mask);
+ return true;
}
extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
@@ -137,8 +146,6 @@ static inline void set_mems_allowed(nodemask_t nodemask)
task_unlock(current);
}
-extern void cpuset_post_attach_flush(void);
-
#else /* !CONFIG_CPUSETS */
static inline bool cpusets_enabled(void) { return false; }
@@ -174,14 +181,19 @@ static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
return 1;
}
-static inline int cpuset_node_allowed(int node, gfp_t gfp_mask)
+static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
{
- return 1;
+ return true;
}
-static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
+static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
{
- return 1;
+ return true;
+}
+
+static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
+{
+ return true;
}
static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
@@ -245,10 +257,6 @@ static inline bool read_mems_allowed_retry(unsigned int seq)
return false;
}
-static inline void cpuset_post_attach_flush(void)
-{
-}
-
#endif /* !CONFIG_CPUSETS */
#endif /* _LINUX_CPUSET_H */
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index 3849fce7ecfe..3873697ba21c 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -34,9 +34,13 @@ void vmcore_cleanup(void);
/*
* Architecture code can redefine this if there are any special checks
- * needed for 64-bit ELF vmcores. In case of 32-bit only architecture,
- * this can be set to zero.
+ * needed for 32-bit ELF or 64-bit ELF vmcores. In case of 32-bit
+ * only architecture, vmcore_elf64_check_arch can be set to zero.
*/
+#ifndef vmcore_elf32_check_arch
+#define vmcore_elf32_check_arch(x) elf_check_arch(x)
+#endif
+
#ifndef vmcore_elf64_check_arch
#define vmcore_elf64_check_arch(x) (elf_check_arch(x) || vmcore_elf_check_arch_cross(x))
#endif
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 99c94899ad0f..6e28c895c376 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -948,8 +948,7 @@ static inline struct ablkcipher_request *ablkcipher_request_cast(
* encrypt and decrypt API calls. During the allocation, the provided ablkcipher
* handle is registered in the request data structure.
*
- * Return: allocated request handle in case of success; IS_ERR() is true in case
- * of an error, PTR_ERR() returns the error code.
+ * Return: allocated request handle in case of success, or NULL if out of memory
*/
static inline struct ablkcipher_request *ablkcipher_request_alloc(
struct crypto_ablkcipher *tfm, gfp_t gfp)
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 636dd59ab505..43d5f0b799c7 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -3,45 +3,62 @@
#include <linux/fs.h>
#include <linux/mm.h>
+#include <linux/radix-tree.h>
#include <asm/pgtable.h>
-ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t,
+/* We use lowest available exceptional entry bit for locking */
+#define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
+
+ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *,
get_block_t, dio_iodone_t, int flags);
-int dax_clear_sectors(struct block_device *bdev, sector_t _sector, long _size);
int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
int dax_truncate_page(struct inode *, loff_t from, get_block_t);
-int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
- dax_iodone_t);
-int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
- dax_iodone_t);
+int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t);
+int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t);
+int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
+void dax_wake_mapping_entry_waiter(struct address_space *mapping,
+ pgoff_t index, bool wake_all);
#ifdef CONFIG_FS_DAX
struct page *read_dax_sector(struct block_device *bdev, sector_t n);
+void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index);
+int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
+ unsigned int offset, unsigned int length);
#else
static inline struct page *read_dax_sector(struct block_device *bdev,
sector_t n)
{
return ERR_PTR(-ENXIO);
}
+/* Shouldn't ever be called when dax is disabled. */
+static inline void dax_unlock_mapping_entry(struct address_space *mapping,
+ pgoff_t index)
+{
+ BUG();
+}
+static inline int __dax_zero_page_range(struct block_device *bdev,
+ sector_t sector, unsigned int offset, unsigned int length)
+{
+ return -ENXIO;
+}
#endif
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *,
- unsigned int flags, get_block_t, dax_iodone_t);
+ unsigned int flags, get_block_t);
int __dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *,
- unsigned int flags, get_block_t, dax_iodone_t);
+ unsigned int flags, get_block_t);
#else
static inline int dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
- pmd_t *pmd, unsigned int flags, get_block_t gb,
- dax_iodone_t di)
+ pmd_t *pmd, unsigned int flags, get_block_t gb)
{
return VM_FAULT_FALLBACK;
}
#define __dax_pmd_fault dax_pmd_fault
#endif
int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *);
-#define dax_mkwrite(vma, vmf, gb, iod) dax_fault(vma, vmf, gb, iod)
-#define __dax_mkwrite(vma, vmf, gb, iod) __dax_fault(vma, vmf, gb, iod)
+#define dax_mkwrite(vma, vmf, gb) dax_fault(vma, vmf, gb)
+#define __dax_mkwrite(vma, vmf, gb) __dax_fault(vma, vmf, gb)
static inline bool vma_is_dax(struct vm_area_struct *vma)
{
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 4bb4de8d95ea..484c8792da82 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -10,6 +10,7 @@
#include <linux/cache.h>
#include <linux/rcupdate.h>
#include <linux/lockref.h>
+#include <linux/stringhash.h>
struct path;
struct vfsmount;
@@ -52,9 +53,6 @@ struct qstr {
};
#define QSTR_INIT(n,l) { { { .len = l } }, .name = n }
-#define hashlen_hash(hashlen) ((u32) (hashlen))
-#define hashlen_len(hashlen) ((u32)((hashlen) >> 32))
-#define hashlen_create(hash,len) (((u64)(len)<<32)|(u32)(hash))
struct dentry_stat_t {
long nr_dentry;
@@ -65,29 +63,6 @@ struct dentry_stat_t {
};
extern struct dentry_stat_t dentry_stat;
-/* Name hashing routines. Initial hash value */
-/* Hash courtesy of the R5 hash in reiserfs modulo sign bits */
-#define init_name_hash() 0
-
-/* partial hash update function. Assume roughly 4 bits per character */
-static inline unsigned long
-partial_name_hash(unsigned long c, unsigned long prevhash)
-{
- return (prevhash + (c << 4) + (c >> 4)) * 11;
-}
-
-/*
- * Finally: cut down the number of bits to a int value (and try to avoid
- * losing bits)
- */
-static inline unsigned long end_name_hash(unsigned long hash)
-{
- return (unsigned int) hash;
-}
-
-/* Compute the hash for a name string. */
-extern unsigned int full_name_hash(const unsigned char *, unsigned int);
-
/*
* Try to keep struct dentry aligned on 64 byte cachelines (this will
* give reasonable cacheline footprint with larger lines without the
@@ -123,7 +98,10 @@ struct dentry {
unsigned long d_time; /* used by d_revalidate */
void *d_fsdata; /* fs-specific data */
- struct list_head d_lru; /* LRU list */
+ union {
+ struct list_head d_lru; /* LRU list */
+ wait_queue_head_t *d_wait; /* in-lookup ones only */
+ };
struct list_head d_child; /* child of parent list */
struct list_head d_subdirs; /* our children */
/*
@@ -131,6 +109,7 @@ struct dentry {
*/
union {
struct hlist_node d_alias; /* inode alias list */
+ struct hlist_bl_node d_in_lookup_hash; /* only for in-lookup ones */
struct rcu_head d_rcu;
} d_u;
};
@@ -232,6 +211,8 @@ struct dentry_operations {
#define DCACHE_ENCRYPTED_WITH_KEY 0x04000000 /* dir is encrypted with a valid key */
#define DCACHE_OP_REAL 0x08000000
+#define DCACHE_PAR_LOOKUP 0x10000000 /* being looked up (with parent locked shared) */
+
extern seqlock_t rename_lock;
/*
@@ -248,6 +229,8 @@ extern void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op
/* allocate/de-allocate */
extern struct dentry * d_alloc(struct dentry *, const struct qstr *);
extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *);
+extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *,
+ wait_queue_head_t *);
extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *);
extern struct dentry * d_exact_alias(struct dentry *, struct inode *);
@@ -367,6 +350,22 @@ static inline void dont_mount(struct dentry *dentry)
spin_unlock(&dentry->d_lock);
}
+extern void __d_lookup_done(struct dentry *);
+
+static inline int d_in_lookup(struct dentry *dentry)
+{
+ return dentry->d_flags & DCACHE_PAR_LOOKUP;
+}
+
+static inline void d_lookup_done(struct dentry *dentry)
+{
+ if (unlikely(d_in_lookup(dentry))) {
+ spin_lock(&dentry->d_lock);
+ __d_lookup_done(dentry);
+ spin_unlock(&dentry->d_lock);
+ }
+}
+
extern void dput(struct dentry *);
static inline bool d_managed(const struct dentry *dentry)
@@ -565,4 +564,16 @@ static inline struct dentry *d_real(struct dentry *dentry)
return dentry;
}
+static inline struct inode *vfs_select_inode(struct dentry *dentry,
+ unsigned open_flags)
+{
+ struct inode *inode = d_inode(dentry);
+
+ if (inode && unlikely(dentry->d_flags & DCACHE_OP_SELECT_INODE))
+ inode = dentry->d_op->d_select_inode(dentry, open_flags);
+
+ return inode;
+}
+
+
#endif /* __LINUX_DCACHE_H */
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 981e53ab84e8..1438e2322d5c 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -19,9 +19,11 @@
#include <linux/seq_file.h>
#include <linux/types.h>
+#include <linux/compiler.h>
struct device;
struct file_operations;
+struct srcu_struct;
struct debugfs_blob_wrapper {
void *data;
@@ -41,14 +43,16 @@ struct debugfs_regset32 {
extern struct dentry *arch_debugfs_dir;
-#if defined(CONFIG_DEBUG_FS)
+extern struct srcu_struct debugfs_srcu;
-/* declared over in file.c */
-extern const struct file_operations debugfs_file_operations;
+#if defined(CONFIG_DEBUG_FS)
struct dentry *debugfs_create_file(const char *name, umode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops);
+struct dentry *debugfs_create_file_unsafe(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const struct file_operations *fops);
struct dentry *debugfs_create_file_size(const char *name, umode_t mode,
struct dentry *parent, void *data,
@@ -68,6 +72,31 @@ struct dentry *debugfs_create_automount(const char *name,
void debugfs_remove(struct dentry *dentry);
void debugfs_remove_recursive(struct dentry *dentry);
+int debugfs_use_file_start(const struct dentry *dentry, int *srcu_idx)
+ __acquires(&debugfs_srcu);
+
+void debugfs_use_file_finish(int srcu_idx) __releases(&debugfs_srcu);
+
+ssize_t debugfs_attr_read(struct file *file, char __user *buf,
+ size_t len, loff_t *ppos);
+ssize_t debugfs_attr_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos);
+
+#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
+static int __fops ## _open(struct inode *inode, struct file *file) \
+{ \
+ __simple_attr_check_format(__fmt, 0ull); \
+ return simple_attr_open(inode, file, __get, __set, __fmt); \
+} \
+static const struct file_operations __fops = { \
+ .owner = THIS_MODULE, \
+ .open = __fops ## _open, \
+ .release = simple_attr_release, \
+ .read = debugfs_attr_read, \
+ .write = debugfs_attr_write, \
+ .llseek = generic_file_llseek, \
+}
+
struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
struct dentry *new_dir, const char *new_name);
@@ -176,6 +205,20 @@ static inline void debugfs_remove(struct dentry *dentry)
static inline void debugfs_remove_recursive(struct dentry *dentry)
{ }
+static inline int debugfs_use_file_start(const struct dentry *dentry,
+ int *srcu_idx)
+ __acquires(&debugfs_srcu)
+{
+ return 0;
+}
+
+static inline void debugfs_use_file_finish(int srcu_idx)
+ __releases(&debugfs_srcu)
+{ }
+
+#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
+ static const struct file_operations __fops = { 0 }
+
static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
struct dentry *new_dir, char *new_name)
{
diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h
index 98ffcbd4888e..46056cb161fc 100644
--- a/include/linux/debugobjects.h
+++ b/include/linux/debugobjects.h
@@ -38,8 +38,10 @@ struct debug_obj {
* @name: name of the object typee
* @debug_hint: function returning address, which have associated
* kernel symbol, to allow identify the object
+ * @is_static_object return true if the obj is static, otherwise return false
* @fixup_init: fixup function, which is called when the init check
- * fails
+ * fails. All fixup functions must return true if fixup
+ * was successful, otherwise return false
* @fixup_activate: fixup function, which is called when the activate check
* fails
* @fixup_destroy: fixup function, which is called when the destroy check
@@ -51,12 +53,13 @@ struct debug_obj {
*/
struct debug_obj_descr {
const char *name;
- void *(*debug_hint) (void *addr);
- int (*fixup_init) (void *addr, enum debug_obj_state state);
- int (*fixup_activate) (void *addr, enum debug_obj_state state);
- int (*fixup_destroy) (void *addr, enum debug_obj_state state);
- int (*fixup_free) (void *addr, enum debug_obj_state state);
- int (*fixup_assert_init)(void *addr, enum debug_obj_state state);
+ void *(*debug_hint)(void *addr);
+ bool (*is_static_object)(void *addr);
+ bool (*fixup_init)(void *addr, enum debug_obj_state state);
+ bool (*fixup_activate)(void *addr, enum debug_obj_state state);
+ bool (*fixup_destroy)(void *addr, enum debug_obj_state state);
+ bool (*fixup_free)(void *addr, enum debug_obj_state state);
+ bool (*fixup_assert_init)(void *addr, enum debug_obj_state state);
};
#ifdef CONFIG_DEBUG_OBJECTS
diff --git a/include/linux/devcoredump.h b/include/linux/devcoredump.h
index c0a360e99f64..269521f143ac 100644
--- a/include/linux/devcoredump.h
+++ b/include/linux/devcoredump.h
@@ -1,3 +1,22 @@
+/*
+ * This file is provided under the GPLv2 license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ */
#ifndef __DEVCOREDUMP_H
#define __DEVCOREDUMP_H
@@ -5,17 +24,62 @@
#include <linux/module.h>
#include <linux/vmalloc.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+
+/*
+ * _devcd_free_sgtable - free all the memory of the given scatterlist table
+ * (i.e. both pages and scatterlist instances)
+ * NOTE: if two tables allocated and chained using the sg_chain function then
+ * this function should be called only once on the first table
+ * @table: pointer to sg_table to free
+ */
+static inline void _devcd_free_sgtable(struct scatterlist *table)
+{
+ int i;
+ struct page *page;
+ struct scatterlist *iter;
+ struct scatterlist *delete_iter;
+
+ /* free pages */
+ iter = table;
+ for_each_sg(table, iter, sg_nents(table), i) {
+ page = sg_page(iter);
+ if (page)
+ __free_page(page);
+ }
+
+ /* then free all chained tables */
+ iter = table;
+ delete_iter = table; /* always points on a head of a table */
+ while (!sg_is_last(iter)) {
+ iter++;
+ if (sg_is_chain(iter)) {
+ iter = sg_chain_ptr(iter);
+ kfree(delete_iter);
+ delete_iter = iter;
+ }
+ }
+
+ /* free the last table */
+ kfree(delete_iter);
+}
+
+
#ifdef CONFIG_DEV_COREDUMP
-void dev_coredumpv(struct device *dev, const void *data, size_t datalen,
+void dev_coredumpv(struct device *dev, void *data, size_t datalen,
gfp_t gfp);
void dev_coredumpm(struct device *dev, struct module *owner,
- const void *data, size_t datalen, gfp_t gfp,
+ void *data, size_t datalen, gfp_t gfp,
ssize_t (*read)(char *buffer, loff_t offset, size_t count,
- const void *data, size_t datalen),
- void (*free)(const void *data));
+ void *data, size_t datalen),
+ void (*free)(void *data));
+
+void dev_coredumpsg(struct device *dev, struct scatterlist *table,
+ size_t datalen, gfp_t gfp);
#else
-static inline void dev_coredumpv(struct device *dev, const void *data,
+static inline void dev_coredumpv(struct device *dev, void *data,
size_t datalen, gfp_t gfp)
{
vfree(data);
@@ -23,13 +87,19 @@ static inline void dev_coredumpv(struct device *dev, const void *data,
static inline void
dev_coredumpm(struct device *dev, struct module *owner,
- const void *data, size_t datalen, gfp_t gfp,
+ void *data, size_t datalen, gfp_t gfp,
ssize_t (*read)(char *buffer, loff_t offset, size_t count,
- const void *data, size_t datalen),
- void (*free)(const void *data))
+ void *data, size_t datalen),
+ void (*free)(void *data))
{
free(data);
}
+
+static inline void dev_coredumpsg(struct device *dev, struct scatterlist *table,
+ size_t datalen, gfp_t gfp)
+{
+ _devcd_free_sgtable(table);
+}
#endif /* CONFIG_DEV_COREDUMP */
#endif /* __DEVCOREDUMP_H */
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index 6fa02a20eb63..2de4e2eea180 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -19,6 +19,13 @@
#define DEVFREQ_NAME_LEN 16
+/* DEVFREQ notifier interface */
+#define DEVFREQ_TRANSITION_NOTIFIER (0)
+
+/* Transition notifiers of DEVFREQ_TRANSITION_NOTIFIER */
+#define DEVFREQ_PRECHANGE (0)
+#define DEVFREQ_POSTCHANGE (1)
+
struct devfreq;
/**
@@ -143,6 +150,7 @@ struct devfreq_governor {
* @trans_table: Statistics of devfreq transitions
* @time_in_state: Statistics of devfreq states
* @last_stat_updated: The last time stat updated
+ * @transition_notifier_list: list head of DEVFREQ_TRANSITION_NOTIFIER notifier
*
* This structure stores the devfreq information for a give device.
*
@@ -177,6 +185,13 @@ struct devfreq {
unsigned int *trans_table;
unsigned long *time_in_state;
unsigned long last_stat_updated;
+
+ struct srcu_notifier_head transition_notifier_list;
+};
+
+struct devfreq_freqs {
+ unsigned long old;
+ unsigned long new;
};
#if defined(CONFIG_PM_DEVFREQ)
@@ -207,6 +222,22 @@ extern int devm_devfreq_register_opp_notifier(struct device *dev,
struct devfreq *devfreq);
extern void devm_devfreq_unregister_opp_notifier(struct device *dev,
struct devfreq *devfreq);
+extern int devfreq_register_notifier(struct devfreq *devfreq,
+ struct notifier_block *nb,
+ unsigned int list);
+extern int devfreq_unregister_notifier(struct devfreq *devfreq,
+ struct notifier_block *nb,
+ unsigned int list);
+extern int devm_devfreq_register_notifier(struct device *dev,
+ struct devfreq *devfreq,
+ struct notifier_block *nb,
+ unsigned int list);
+extern void devm_devfreq_unregister_notifier(struct device *dev,
+ struct devfreq *devfreq,
+ struct notifier_block *nb,
+ unsigned int list);
+extern struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev,
+ int index);
/**
* devfreq_update_stats() - update the last_status pointer in struct devfreq
@@ -241,6 +272,39 @@ struct devfreq_simple_ondemand_data {
};
#endif
+#if IS_ENABLED(CONFIG_DEVFREQ_GOV_PASSIVE)
+/**
+ * struct devfreq_passive_data - void *data fed to struct devfreq
+ * and devfreq_add_device
+ * @parent: the devfreq instance of parent device.
+ * @get_target_freq: Optional callback, Returns desired operating frequency
+ * for the device using passive governor. That is called
+ * when passive governor should decide the next frequency
+ * by using the new frequency of parent devfreq device
+ * using governors except for passive governor.
+ * If the devfreq device has the specific method to decide
+ * the next frequency, should use this callback.
+ * @this: the devfreq instance of own device.
+ * @nb: the notifier block for DEVFREQ_TRANSITION_NOTIFIER list
+ *
+ * The devfreq_passive_data have to set the devfreq instance of parent
+ * device with governors except for the passive governor. But, don't need to
+ * initialize the 'this' and 'nb' field because the devfreq core will handle
+ * them.
+ */
+struct devfreq_passive_data {
+ /* Should set the devfreq instance of parent device */
+ struct devfreq *parent;
+
+ /* Optional callback to decide the next frequency of passvice device */
+ int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
+
+ /* For passive governor's internal use. Don't need to set them */
+ struct devfreq *this;
+ struct notifier_block nb;
+};
+#endif
+
#else /* !CONFIG_PM_DEVFREQ */
static inline struct devfreq *devfreq_add_device(struct device *dev,
struct devfreq_dev_profile *profile,
@@ -307,6 +371,41 @@ static inline void devm_devfreq_unregister_opp_notifier(struct device *dev,
{
}
+static inline int devfreq_register_notifier(struct devfreq *devfreq,
+ struct notifier_block *nb,
+ unsigned int list)
+{
+ return 0;
+}
+
+static inline int devfreq_unregister_notifier(struct devfreq *devfreq,
+ struct notifier_block *nb,
+ unsigned int list)
+{
+ return 0;
+}
+
+static inline int devm_devfreq_register_notifier(struct device *dev,
+ struct devfreq *devfreq,
+ struct notifier_block *nb,
+ unsigned int list)
+{
+ return 0;
+}
+
+static inline void devm_devfreq_unregister_notifier(struct device *dev,
+ struct devfreq *devfreq,
+ struct notifier_block *nb,
+ unsigned int list)
+{
+}
+
+static inline struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev,
+ int index)
+{
+ return ERR_PTR(-ENODEV);
+}
+
static inline int devfreq_update_stats(struct devfreq *df)
{
return -EINVAL;
diff --git a/include/linux/device.h b/include/linux/device.h
index 002c59728dbe..38f02814d53a 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -609,14 +609,14 @@ typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data);
#ifdef CONFIG_DEBUG_DEVRES
extern void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp,
- int nid, const char *name);
+ int nid, const char *name) __malloc;
#define devres_alloc(release, size, gfp) \
__devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release)
#define devres_alloc_node(release, size, gfp, nid) \
__devres_alloc_node(release, size, gfp, nid, #release)
#else
extern void *devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp,
- int nid);
+ int nid) __malloc;
static inline void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp)
{
return devres_alloc_node(release, size, gfp, NUMA_NO_NODE);
@@ -648,12 +648,12 @@ extern void devres_remove_group(struct device *dev, void *id);
extern int devres_release_group(struct device *dev, void *id);
/* managed devm_k.alloc/kfree for device drivers */
-extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp);
+extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __malloc;
extern __printf(3, 0)
char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
- va_list ap);
+ va_list ap) __malloc;
extern __printf(3, 4)
-char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...);
+char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) __malloc;
static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
{
return devm_kmalloc(dev, size, gfp | __GFP_ZERO);
@@ -671,7 +671,7 @@ static inline void *devm_kcalloc(struct device *dev,
return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO);
}
extern void devm_kfree(struct device *dev, void *p);
-extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp);
+extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc;
extern void *devm_kmemdup(struct device *dev, const void *src, size_t len,
gfp_t gfp);
@@ -956,11 +956,6 @@ static inline bool device_async_suspend_enabled(struct device *dev)
return !!dev->power.async_suspend;
}
-static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
-{
- dev->power.ignore_children = enable;
-}
-
static inline void dev_pm_syscore_device(struct device *dev, bool val)
{
#ifdef CONFIG_PM_SLEEP
@@ -1293,8 +1288,11 @@ do { \
dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
} while (0)
#else
-#define dev_dbg_ratelimited(dev, fmt, ...) \
- no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+#define dev_dbg_ratelimited(dev, fmt, ...) \
+do { \
+ if (0) \
+ dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
+} while (0)
#endif
#ifdef VERBOSE_DEBUG
diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h
index 358a4db72a27..277ab9af9ac2 100644
--- a/include/linux/devpts_fs.h
+++ b/include/linux/devpts_fs.h
@@ -15,23 +15,22 @@
#include <linux/errno.h>
-struct pts_fs_info;
-
#ifdef CONFIG_UNIX98_PTYS
-/* Look up a pts fs info and get a ref to it */
-struct pts_fs_info *devpts_get_ref(struct inode *, struct file *);
-void devpts_put_ref(struct pts_fs_info *);
+struct pts_fs_info;
+
+struct pts_fs_info *devpts_acquire(struct file *);
+void devpts_release(struct pts_fs_info *);
int devpts_new_index(struct pts_fs_info *);
void devpts_kill_index(struct pts_fs_info *, int);
/* mknod in devpts */
-struct inode *devpts_pty_new(struct pts_fs_info *, dev_t, int, void *);
+struct dentry *devpts_pty_new(struct pts_fs_info *, int, void *);
/* get private structure */
-void *devpts_get_priv(struct inode *pts_inode);
+void *devpts_get_priv(struct dentry *);
/* unlink */
-void devpts_pty_kill(struct inode *inode);
+void devpts_pty_kill(struct dentry *);
#endif
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 3fe90d494edb..4551c6f2a6c4 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -112,19 +112,24 @@ struct dma_buf_ops {
* @file: file pointer used for sharing buffers across, and for refcounting.
* @attachments: list of dma_buf_attachment that denotes all devices attached.
* @ops: dma_buf_ops associated with this buffer object.
+ * @lock: used internally to serialize list manipulation, attach/detach and vmap/unmap
+ * @vmapping_counter: used internally to refcnt the vmaps
+ * @vmap_ptr: the current vmap ptr if vmapping_counter > 0
* @exp_name: name of the exporter; useful for debugging.
* @owner: pointer to exporter module; used for refcounting when exporter is a
* kernel module.
* @list_node: node for dma_buf accounting and debugging.
* @priv: exporter specific private data for this buffer object.
* @resv: reservation object linked to this dma-buf
+ * @poll: for userspace poll support
+ * @cb_excl: for userspace poll support
+ * @cb_shared: for userspace poll support
*/
struct dma_buf {
size_t size;
struct file *file;
struct list_head attachments;
const struct dma_buf_ops *ops;
- /* mutex to serialize list manipulation, attach/detach and vmap/unmap */
struct mutex lock;
unsigned vmapping_counter;
void *vmap_ptr;
@@ -188,9 +193,11 @@ struct dma_buf_export_info {
/**
* helper macro for exporters; zeros and fills in most common values
+ *
+ * @name: export-info name
*/
-#define DEFINE_DMA_BUF_EXPORT_INFO(a) \
- struct dma_buf_export_info a = { .exp_name = KBUILD_MODNAME, \
+#define DEFINE_DMA_BUF_EXPORT_INFO(name) \
+ struct dma_buf_export_info name = { .exp_name = KBUILD_MODNAME, \
.owner = THIS_MODULE }
/**
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
index fc481037478a..8443bbb5c071 100644
--- a/include/linux/dma-iommu.h
+++ b/include/linux/dma-iommu.h
@@ -38,8 +38,8 @@ int dma_direction_to_prot(enum dma_data_direction dir, bool coherent);
* These implement the bulk of the relevant DMA mapping callbacks, but require
* the arch code to take care of attributes and cache maintenance
*/
-struct page **iommu_dma_alloc(struct device *dev, size_t size,
- gfp_t gfp, int prot, dma_addr_t *handle,
+struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
+ struct dma_attrs *attrs, int prot, dma_addr_t *handle,
void (*flush_page)(struct device *, const void *, phys_addr_t));
void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
dma_addr_t *handle);
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 9ea9aba28049..71c1b215ef66 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -514,7 +514,7 @@ extern u64 dma_get_required_mask(struct device *dev);
#ifndef arch_setup_dma_ops
static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
- u64 size, struct iommu_ops *iommu,
+ u64 size, const struct iommu_ops *iommu,
bool coherent) { }
#endif
diff --git a/include/linux/dma/dw.h b/include/linux/dma/dw.h
index 71456442ebe3..f2e538aaddad 100644
--- a/include/linux/dma/dw.h
+++ b/include/linux/dma/dw.h
@@ -27,6 +27,7 @@ struct dw_dma;
* @regs: memory mapped I/O space
* @clk: hclk clock
* @dw: struct dw_dma that is filed by dw_dma_probe()
+ * @pdata: pointer to platform data
*/
struct dw_dma_chip {
struct device *dev;
@@ -34,10 +35,12 @@ struct dw_dma_chip {
void __iomem *regs;
struct clk *clk;
struct dw_dma *dw;
+
+ const struct dw_dma_platform_data *pdata;
};
/* Export to the platform drivers */
-int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata);
+int dw_dma_probe(struct dw_dma_chip *chip);
int dw_dma_remove(struct dw_dma_chip *chip);
/* DMA API extensions */
diff --git a/include/linux/dma/xilinx_dma.h b/include/linux/dma/xilinx_dma.h
index 34b98f276ed0..3ae300052553 100644
--- a/include/linux/dma/xilinx_dma.h
+++ b/include/linux/dma/xilinx_dma.h
@@ -41,6 +41,20 @@ struct xilinx_vdma_config {
int ext_fsync;
};
+/**
+ * enum xdma_ip_type: DMA IP type.
+ *
+ * XDMA_TYPE_AXIDMA: Axi dma ip.
+ * XDMA_TYPE_CDMA: Axi cdma ip.
+ * XDMA_TYPE_VDMA: Axi vdma ip.
+ *
+ */
+enum xdma_ip_type {
+ XDMA_TYPE_AXIDMA = 0,
+ XDMA_TYPE_CDMA,
+ XDMA_TYPE_VDMA,
+};
+
int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
struct xilinx_vdma_config *cfg);
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 017433712833..30de0197263a 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -804,6 +804,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
sg_dma_address(&sg) = buf;
sg_dma_len(&sg) = len;
+ if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
+ return NULL;
+
return chan->device->device_prep_slave_sg(chan, &sg, 1,
dir, flags, NULL);
}
@@ -812,6 +815,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
enum dma_transfer_direction dir, unsigned long flags)
{
+ if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
+ return NULL;
+
return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
dir, flags, NULL);
}
@@ -823,6 +829,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg(
enum dma_transfer_direction dir, unsigned long flags,
struct rio_dma_ext *rio_ext)
{
+ if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
+ return NULL;
+
return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
dir, flags, rio_ext);
}
@@ -833,6 +842,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
size_t period_len, enum dma_transfer_direction dir,
unsigned long flags)
{
+ if (!chan || !chan->device || !chan->device->device_prep_dma_cyclic)
+ return NULL;
+
return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len,
period_len, dir, flags);
}
@@ -841,6 +853,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
struct dma_chan *chan, struct dma_interleaved_template *xt,
unsigned long flags)
{
+ if (!chan || !chan->device || !chan->device->device_prep_interleaved_dma)
+ return NULL;
+
return chan->device->device_prep_interleaved_dma(chan, xt, flags);
}
@@ -848,7 +863,7 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset(
struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
unsigned long flags)
{
- if (!chan || !chan->device)
+ if (!chan || !chan->device || !chan->device->device_prep_dma_memset)
return NULL;
return chan->device->device_prep_dma_memset(chan, dest, value,
@@ -861,6 +876,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg(
struct scatterlist *src_sg, unsigned int src_nents,
unsigned long flags)
{
+ if (!chan || !chan->device || !chan->device->device_prep_dma_sg)
+ return NULL;
+
return chan->device->device_prep_dma_sg(chan, dst_sg, dst_nents,
src_sg, src_nents, flags);
}
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 1626474567ac..c2db3ca22217 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -21,6 +21,8 @@
#include <linux/pfn.h>
#include <linux/pstore.h>
#include <linux/reboot.h>
+#include <linux/uuid.h>
+#include <linux/screen_info.h>
#include <asm/page.h>
@@ -43,17 +45,10 @@ typedef u16 efi_char16_t; /* UNICODE character */
typedef u64 efi_physical_addr_t;
typedef void *efi_handle_t;
-
-typedef struct {
- u8 b[16];
-} efi_guid_t;
+typedef uuid_le efi_guid_t;
#define EFI_GUID(a,b,c,d0,d1,d2,d3,d4,d5,d6,d7) \
-((efi_guid_t) \
-{{ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \
- (b) & 0xff, ((b) >> 8) & 0xff, \
- (c) & 0xff, ((c) >> 8) & 0xff, \
- (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }})
+ UUID_LE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7)
/*
* Generic EFI table header
@@ -124,6 +119,13 @@ typedef struct {
} efi_capsule_header_t;
/*
+ * EFI capsule flags
+ */
+#define EFI_CAPSULE_PERSIST_ACROSS_RESET 0x00010000
+#define EFI_CAPSULE_POPULATE_SYSTEM_TABLE 0x00020000
+#define EFI_CAPSULE_INITIATE_RESET 0x00040000
+
+/*
* Allocation types for calls to boottime->allocate_pages.
*/
#define EFI_ALLOCATE_ANY_PAGES 0
@@ -282,9 +284,10 @@ typedef struct {
efi_status_t (*handle_protocol)(efi_handle_t, efi_guid_t *, void **);
void *__reserved;
void *register_protocol_notify;
- void *locate_handle;
+ efi_status_t (*locate_handle)(int, efi_guid_t *, void *,
+ unsigned long *, efi_handle_t *);
void *locate_device_path;
- void *install_configuration_table;
+ efi_status_t (*install_configuration_table)(efi_guid_t *, void *);
void *load_image;
void *start_image;
void *exit;
@@ -623,6 +626,27 @@ void efi_native_runtime_setup(void);
EFI_GUID(0x3152bca5, 0xeade, 0x433d, \
0x86, 0x2e, 0xc0, 0x1c, 0xdc, 0x29, 0x1f, 0x44)
+#define EFI_MEMORY_ATTRIBUTES_TABLE_GUID \
+ EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, \
+ 0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20)
+
+#define EFI_CONSOLE_OUT_DEVICE_GUID \
+ EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, \
+ 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
+
+/*
+ * This GUID is used to pass to the kernel proper the struct screen_info
+ * structure that was populated by the stub based on the GOP protocol instance
+ * associated with ConOut
+ */
+#define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID \
+ EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, \
+ 0xb9, 0xe, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95)
+
+#define LINUX_EFI_LOADER_ENTRY_GUID \
+ EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, \
+ 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f)
+
typedef struct {
efi_guid_t guid;
u64 table;
@@ -847,6 +871,14 @@ typedef struct {
#define EFI_INVALID_TABLE_ADDR (~0UL)
+typedef struct {
+ u32 version;
+ u32 num_entries;
+ u32 desc_size;
+ u32 reserved;
+ efi_memory_desc_t entry[0];
+} efi_memory_attributes_table_t;
+
/*
* All runtime access to EFI goes through this structure:
*/
@@ -868,6 +900,7 @@ extern struct efi {
unsigned long config_table; /* config tables */
unsigned long esrt; /* ESRT table */
unsigned long properties_table; /* properties table */
+ unsigned long mem_attr_table; /* memory attributes table */
efi_get_time_t *get_time;
efi_set_time_t *set_time;
efi_get_wakeup_time_t *get_wakeup_time;
@@ -883,7 +916,7 @@ extern struct efi {
efi_get_next_high_mono_count_t *get_next_high_mono_count;
efi_reset_system_t *reset_system;
efi_set_virtual_address_map_t *set_virtual_address_map;
- struct efi_memory_map *memmap;
+ struct efi_memory_map memmap;
unsigned long flags;
} efi;
@@ -945,7 +978,6 @@ extern void efi_initialize_iomem_resources(struct resource *code_resource,
extern void efi_get_time(struct timespec *now);
extern void efi_reserve_boot_services(void);
extern int efi_get_fdt_params(struct efi_fdt_params *params);
-extern struct efi_memory_map memmap;
extern struct kobject *efi_kobj;
extern int efi_reboot_quirk_mode;
@@ -957,12 +989,34 @@ extern void __init efi_fake_memmap(void);
static inline void efi_fake_memmap(void) { }
#endif
+/*
+ * efi_memattr_perm_setter - arch specific callback function passed into
+ * efi_memattr_apply_permissions() that updates the
+ * mapping permissions described by the second
+ * argument in the page tables referred to by the
+ * first argument.
+ */
+typedef int (*efi_memattr_perm_setter)(struct mm_struct *, efi_memory_desc_t *);
+
+extern int efi_memattr_init(void);
+extern int efi_memattr_apply_permissions(struct mm_struct *mm,
+ efi_memattr_perm_setter fn);
+
/* Iterate through an efi_memory_map */
-#define for_each_efi_memory_desc(m, md) \
+#define for_each_efi_memory_desc_in_map(m, md) \
for ((md) = (m)->map; \
(md) <= (efi_memory_desc_t *)((m)->map_end - (m)->desc_size); \
(md) = (void *)(md) + (m)->desc_size)
+/**
+ * for_each_efi_memory_desc - iterate over descriptors in efi.memmap
+ * @md: the efi_memory_desc_t * iterator
+ *
+ * Once the loop finishes @md must not be accessed.
+ */
+#define for_each_efi_memory_desc(md) \
+ for_each_efi_memory_desc_in_map(&efi.memmap, md)
+
/*
* Format an EFI memory descriptor's type and attributes to a user-provided
* character buffer, as per snprintf(), and return the buffer.
@@ -1000,7 +1054,6 @@ extern int __init efi_setup_pcdp_console(char *);
* possible, remove EFI-related code altogether.
*/
#define EFI_BOOT 0 /* Were we booted from EFI? */
-#define EFI_SYSTEM_TABLES 1 /* Can we use EFI system tables? */
#define EFI_CONFIG_TABLES 2 /* Can we use EFI config tables? */
#define EFI_RUNTIME_SERVICES 3 /* Can we use runtime services? */
#define EFI_MEMMAP 4 /* Can we use EFI memory map? */
@@ -1026,8 +1079,16 @@ static inline bool efi_enabled(int feature)
}
static inline void
efi_reboot(enum reboot_mode reboot_mode, const char *__unused) {}
+
+static inline bool
+efi_capsule_pending(int *reset_type)
+{
+ return false;
+}
#endif
+extern int efi_status_to_err(efi_status_t status);
+
/*
* Variable Attributes
*/
@@ -1050,7 +1111,7 @@ efi_reboot(enum reboot_mode reboot_mode, const char *__unused) {}
* Length of a GUID string (strlen("aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee"))
* not including trailing NUL
*/
-#define EFI_VARIABLE_GUID_LEN 36
+#define EFI_VARIABLE_GUID_LEN UUID_STRING_LEN
/*
* The type of search to perform when calling boottime->locate_handle
@@ -1180,6 +1241,80 @@ struct efi_simple_text_output_protocol {
void *test_string;
};
+#define PIXEL_RGB_RESERVED_8BIT_PER_COLOR 0
+#define PIXEL_BGR_RESERVED_8BIT_PER_COLOR 1
+#define PIXEL_BIT_MASK 2
+#define PIXEL_BLT_ONLY 3
+#define PIXEL_FORMAT_MAX 4
+
+struct efi_pixel_bitmask {
+ u32 red_mask;
+ u32 green_mask;
+ u32 blue_mask;
+ u32 reserved_mask;
+};
+
+struct efi_graphics_output_mode_info {
+ u32 version;
+ u32 horizontal_resolution;
+ u32 vertical_resolution;
+ int pixel_format;
+ struct efi_pixel_bitmask pixel_information;
+ u32 pixels_per_scan_line;
+} __packed;
+
+struct efi_graphics_output_protocol_mode_32 {
+ u32 max_mode;
+ u32 mode;
+ u32 info;
+ u32 size_of_info;
+ u64 frame_buffer_base;
+ u32 frame_buffer_size;
+} __packed;
+
+struct efi_graphics_output_protocol_mode_64 {
+ u32 max_mode;
+ u32 mode;
+ u64 info;
+ u64 size_of_info;
+ u64 frame_buffer_base;
+ u64 frame_buffer_size;
+} __packed;
+
+struct efi_graphics_output_protocol_mode {
+ u32 max_mode;
+ u32 mode;
+ unsigned long info;
+ unsigned long size_of_info;
+ u64 frame_buffer_base;
+ unsigned long frame_buffer_size;
+} __packed;
+
+struct efi_graphics_output_protocol_32 {
+ u32 query_mode;
+ u32 set_mode;
+ u32 blt;
+ u32 mode;
+};
+
+struct efi_graphics_output_protocol_64 {
+ u64 query_mode;
+ u64 set_mode;
+ u64 blt;
+ u64 mode;
+};
+
+struct efi_graphics_output_protocol {
+ unsigned long query_mode;
+ unsigned long set_mode;
+ unsigned long blt;
+ struct efi_graphics_output_protocol_mode *mode;
+};
+
+typedef efi_status_t (*efi_graphics_output_protocol_query_mode)(
+ struct efi_graphics_output_protocol *, u32, unsigned long *,
+ struct efi_graphics_output_mode_info **);
+
extern struct list_head efivar_sysfs_list;
static inline void
@@ -1195,8 +1330,7 @@ int efivars_unregister(struct efivars *efivars);
struct kobject *efivars_kobject(void);
int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
- void *data, bool atomic, bool duplicates,
- struct list_head *head);
+ void *data, bool duplicates, struct list_head *head);
void efivar_entry_add(struct efivar_entry *entry, struct list_head *head);
void efivar_entry_remove(struct efivar_entry *entry);
@@ -1242,6 +1376,13 @@ int efivars_sysfs_init(void);
#define EFIVARS_DATA_SIZE_MAX 1024
#endif /* CONFIG_EFI_VARS */
+extern bool efi_capsule_pending(int *reset_type);
+
+extern int efi_capsule_supported(efi_guid_t guid, u32 flags,
+ size_t size, int *reset);
+
+extern int efi_capsule_update(efi_capsule_header_t *capsule,
+ struct page **pages);
#ifdef CONFIG_EFI_RUNTIME_MAP
int efi_runtime_map_init(struct kobject *);
@@ -1319,5 +1460,9 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
efi_status_t efi_parse_options(char *cmdline);
+efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg,
+ struct screen_info *si, efi_guid_t *proto,
+ unsigned long size);
+
bool efi_runtime_disabled(void);
#endif /* _LINUX_EFI_H */
diff --git a/include/linux/err.h b/include/linux/err.h
index 56762ab41713..1e3558845e4c 100644
--- a/include/linux/err.h
+++ b/include/linux/err.h
@@ -18,7 +18,7 @@
#ifndef __ASSEMBLY__
-#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
+#define IS_ERR_VALUE(x) unlikely((unsigned long)(void *)(x) >= (unsigned long)-MAX_ERRNO)
static inline void * __must_check ERR_PTR(long error)
{
diff --git a/include/linux/errno.h b/include/linux/errno.h
index 89627b9187f9..7ce9fb1b7d28 100644
--- a/include/linux/errno.h
+++ b/include/linux/errno.h
@@ -28,5 +28,6 @@
#define EBADTYPE 527 /* Type not supported by server */
#define EJUKEBOX 528 /* Request initiated, but will not complete before timeout */
#define EIOCBQUEUED 529 /* iocb queued, will get completion event */
+#define ERECALLCONFLICT 530 /* conflict with recalled state */
#endif
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index e2b7bf27c03e..9ded8c6d8176 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -150,6 +150,13 @@ extern int
__ethtool_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *link_ksettings);
+void ethtool_convert_legacy_u32_to_link_mode(unsigned long *dst,
+ u32 legacy_u32);
+
+/* return false if src had higher bits set. lower bits always updated. */
+bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
+ const unsigned long *src);
+
/**
* struct ethtool_ops - optional netdev operations
* @get_settings: DEPRECATED, use %get_link_ksettings/%set_link_ksettings
diff --git a/include/linux/export.h b/include/linux/export.h
index 96e45ea463e7..2f9ccbe6a639 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -38,7 +38,7 @@ extern struct module __this_module;
#ifdef CONFIG_MODULES
-#ifndef __GENKSYMS__
+#if defined(__KERNEL__) && !defined(__GENKSYMS__)
#ifdef CONFIG_MODVERSIONS
/* Mark the CRC weak since genksyms apparently decides not to
* generate a checksums for some symbols */
@@ -53,7 +53,7 @@ extern struct module __this_module;
#endif
/* For every exported symbol, place a struct in the __ksymtab section */
-#define __EXPORT_SYMBOL(sym, sec) \
+#define ___EXPORT_SYMBOL(sym, sec) \
extern typeof(sym) sym; \
__CRC_SYMBOL(sym, sec) \
static const char __kstrtab_##sym[] \
@@ -65,6 +65,35 @@ extern struct module __this_module;
__attribute__((section("___ksymtab" sec "+" #sym), unused)) \
= { (unsigned long)&sym, __kstrtab_##sym }
+#if defined(__KSYM_DEPS__)
+
+/*
+ * For fine grained build dependencies, we want to tell the build system
+ * about each possible exported symbol even if they're not actually exported.
+ * We use a string pattern that is unlikely to be valid code that the build
+ * system filters out from the preprocessor output (see ksym_dep_filter
+ * in scripts/Kbuild.include).
+ */
+#define __EXPORT_SYMBOL(sym, sec) === __KSYM_##sym ===
+
+#elif defined(CONFIG_TRIM_UNUSED_KSYMS)
+
+#include <linux/kconfig.h>
+#include <generated/autoksyms.h>
+
+#define __EXPORT_SYMBOL(sym, sec) \
+ __cond_export_sym(sym, sec, config_enabled(__KSYM_##sym))
+#define __cond_export_sym(sym, sec, conf) \
+ ___cond_export_sym(sym, sec, conf)
+#define ___cond_export_sym(sym, sec, enabled) \
+ __cond_export_sym_##enabled(sym, sec)
+#define __cond_export_sym_1(sym, sec) ___EXPORT_SYMBOL(sym, sec)
+#define __cond_export_sym_0(sym, sec) /* nothing */
+
+#else
+#define __EXPORT_SYMBOL ___EXPORT_SYMBOL
+#endif
+
#define EXPORT_SYMBOL(sym) \
__EXPORT_SYMBOL(sym, "")
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index b90e9bdbd1dd..4c02c6521fef 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -508,4 +508,6 @@ enum {
F2FS_FT_MAX
};
+#define S_SHIFT 12
+
#endif /* _LINUX_F2FS_FS_H */
diff --git a/include/linux/fb.h b/include/linux/fb.h
index dfe88351341f..a964d076b4dc 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -673,6 +673,7 @@ static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch,
}
/* drivers/video/fb_defio.c */
+int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma);
extern void fb_deferred_io_init(struct fb_info *info);
extern void fb_deferred_io_open(struct fb_info *info,
struct inode *inode,
diff --git a/include/linux/fence.h b/include/linux/fence.h
index 2b17698b60b8..2056e9fd0138 100644
--- a/include/linux/fence.h
+++ b/include/linux/fence.h
@@ -49,6 +49,8 @@ struct fence_cb;
* @timestamp: Timestamp when the fence was signaled.
* @status: Optional, only valid if < 0, must be set before calling
* fence_signal, indicates that the fence has completed with an error.
+ * @child_list: list of children fences
+ * @active_list: list of active fences
*
* the flags member must be manipulated and read using the appropriate
* atomic ops (bit_*), so taking the spinlock will not be needed most
diff --git a/include/linux/file.h b/include/linux/file.h
index f87d30882a24..7444f5feda12 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -44,6 +44,7 @@ extern struct file *fget_raw(unsigned int fd);
extern unsigned long __fdget(unsigned int fd);
extern unsigned long __fdget_raw(unsigned int fd);
extern unsigned long __fdget_pos(unsigned int fd);
+extern void __f_unlock_pos(struct file *);
static inline struct fd __to_fd(unsigned long v)
{
@@ -60,6 +61,18 @@ static inline struct fd fdget_raw(unsigned int fd)
return __to_fd(__fdget_raw(fd));
}
+static inline struct fd fdget_pos(int fd)
+{
+ return __to_fd(__fdget_pos(fd));
+}
+
+static inline void fdput_pos(struct fd f)
+{
+ if (f.flags & FDPUT_POS_UNLOCK)
+ __f_unlock_pos(f.file);
+ fdput(f);
+}
+
extern int f_dupfd(unsigned int from, struct file *file, unsigned flags);
extern int replace_fd(unsigned fd, struct file *file, unsigned flags);
extern void set_close_on_exec(unsigned int fd, int flag);
diff --git a/include/linux/filter.h b/include/linux/filter.h
index a51a5361695f..6fc31ef1da2d 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -13,6 +13,8 @@
#include <linux/printk.h>
#include <linux/workqueue.h>
#include <linux/sched.h>
+#include <linux/capability.h>
+
#include <net/sch_generic.h>
#include <asm/cacheflush.h>
@@ -42,6 +44,15 @@ struct bpf_prog_aux;
#define BPF_REG_X BPF_REG_7
#define BPF_REG_TMP BPF_REG_8
+/* Kernel hidden auxiliary/helper register for hardening step.
+ * Only used by eBPF JITs. It's nothing more than a temporary
+ * register that JITs use internally, only that here it's part
+ * of eBPF instructions that have been rewritten for blinding
+ * constants. See JIT pre-step in bpf_jit_blind_constants().
+ */
+#define BPF_REG_AX MAX_BPF_REG
+#define MAX_BPF_JIT_REG (MAX_BPF_REG + 1)
+
/* BPF program can access up to 512 bytes of stack space. */
#define MAX_BPF_STACK 512
@@ -352,6 +363,22 @@ struct sk_filter {
#define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN
+struct bpf_skb_data_end {
+ struct qdisc_skb_cb qdisc_cb;
+ void *data_end;
+};
+
+/* compute the linear packet data range [data, data_end) which
+ * will be accessed by cls_bpf and act_bpf programs
+ */
+static inline void bpf_compute_data_end(struct sk_buff *skb)
+{
+ struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
+
+ BUILD_BUG_ON(sizeof(*cb) > FIELD_SIZEOF(struct sk_buff, cb));
+ cb->data_end = skb->data + skb_headlen(skb);
+}
+
static inline u8 *bpf_skb_cb(struct sk_buff *skb)
{
/* eBPF programs may read/write skb->cb[] area to transfer meta
@@ -442,7 +469,7 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
int sk_filter(struct sock *sk, struct sk_buff *skb);
-int bpf_prog_select_runtime(struct bpf_prog *fp);
+struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
void bpf_prog_free(struct bpf_prog *fp);
struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
@@ -465,14 +492,10 @@ int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
void bpf_prog_destroy(struct bpf_prog *fp);
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
-int __sk_attach_filter(struct sock_fprog *fprog, struct sock *sk,
- bool locked);
int sk_attach_bpf(u32 ufd, struct sock *sk);
int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk);
int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk);
int sk_detach_filter(struct sock *sk);
-int __sk_detach_filter(struct sock *sk, bool locked);
-
int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
unsigned int len);
@@ -480,10 +503,17 @@ bool sk_filter_charge(struct sock *sk, struct sk_filter *fp);
void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
-void bpf_int_jit_compile(struct bpf_prog *fp);
+
+struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
bool bpf_helper_changes_skb_data(void *func);
+struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
+ const struct bpf_insn *patch, u32 len);
+
#ifdef CONFIG_BPF_JIT
+extern int bpf_jit_enable;
+extern int bpf_jit_harden;
+
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
struct bpf_binary_header *
@@ -495,6 +525,9 @@ void bpf_jit_binary_free(struct bpf_binary_header *hdr);
void bpf_jit_compile(struct bpf_prog *fp);
void bpf_jit_free(struct bpf_prog *fp);
+struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp);
+void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other);
+
static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
u32 pass, void *image)
{
@@ -505,6 +538,33 @@ static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
16, 1, image, proglen, false);
}
+
+static inline bool bpf_jit_is_ebpf(void)
+{
+# ifdef CONFIG_HAVE_EBPF_JIT
+ return true;
+# else
+ return false;
+# endif
+}
+
+static inline bool bpf_jit_blinding_enabled(void)
+{
+ /* These are the prerequisites, should someone ever have the
+ * idea to call blinding outside of them, we make sure to
+ * bail out.
+ */
+ if (!bpf_jit_is_ebpf())
+ return false;
+ if (!bpf_jit_enable)
+ return false;
+ if (!bpf_jit_harden)
+ return false;
+ if (bpf_jit_harden == 1 && capable(CAP_SYS_ADMIN))
+ return false;
+
+ return true;
+}
#else
static inline void bpf_jit_compile(struct bpf_prog *fp)
{
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 70e61b58baaf..dd288148a6b1 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -74,7 +74,6 @@ typedef int (get_block_t)(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create);
typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
ssize_t bytes, void *private);
-typedef void (dax_iodone_t)(struct buffer_head *bh_map, int uptodate);
#define MAY_EXEC 0x00000001
#define MAY_WRITE 0x00000002
@@ -323,6 +322,8 @@ struct writeback_control;
#define IOCB_APPEND (1 << 1)
#define IOCB_DIRECT (1 << 2)
#define IOCB_HIPRI (1 << 3)
+#define IOCB_DSYNC (1 << 4)
+#define IOCB_SYNC (1 << 5)
struct kiocb {
struct file *ki_filp;
@@ -394,7 +395,7 @@ struct address_space_operations {
void (*invalidatepage) (struct page *, unsigned int, unsigned int);
int (*releasepage) (struct page *, gfp_t);
void (*freepage)(struct page *);
- ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter, loff_t offset);
+ ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter);
/*
* migrate the contents of a page to the specified target. If
* migrate_mode is MIGRATE_ASYNC, it must not block.
@@ -577,6 +578,18 @@ static inline void mapping_allow_writable(struct address_space *mapping)
struct posix_acl;
#define ACL_NOT_CACHED ((void *)(-1))
+static inline struct posix_acl *
+uncached_acl_sentinel(struct task_struct *task)
+{
+ return (void *)task + 1;
+}
+
+static inline bool
+is_uncached_acl(struct posix_acl *acl)
+{
+ return (long)acl & 1;
+}
+
#define IOP_FASTPERM 0x0001
#define IOP_LOOKUP 0x0002
#define IOP_NOFOLLOW 0x0004
@@ -635,7 +648,7 @@ struct inode {
/* Misc */
unsigned long i_state;
- struct mutex i_mutex;
+ struct rw_semaphore i_rwsem;
unsigned long dirtied_when; /* jiffies of first dirtying */
unsigned long dirtied_time_when;
@@ -672,6 +685,7 @@ struct inode {
struct block_device *i_bdev;
struct cdev *i_cdev;
char *i_link;
+ unsigned i_dir_seq;
};
__u32 i_generation;
@@ -721,27 +735,42 @@ enum inode_i_mutex_lock_class
static inline void inode_lock(struct inode *inode)
{
- mutex_lock(&inode->i_mutex);
+ down_write(&inode->i_rwsem);
}
static inline void inode_unlock(struct inode *inode)
{
- mutex_unlock(&inode->i_mutex);
+ up_write(&inode->i_rwsem);
+}
+
+static inline void inode_lock_shared(struct inode *inode)
+{
+ down_read(&inode->i_rwsem);
+}
+
+static inline void inode_unlock_shared(struct inode *inode)
+{
+ up_read(&inode->i_rwsem);
}
static inline int inode_trylock(struct inode *inode)
{
- return mutex_trylock(&inode->i_mutex);
+ return down_write_trylock(&inode->i_rwsem);
+}
+
+static inline int inode_trylock_shared(struct inode *inode)
+{
+ return down_read_trylock(&inode->i_rwsem);
}
static inline int inode_is_locked(struct inode *inode)
{
- return mutex_is_locked(&inode->i_mutex);
+ return rwsem_is_locked(&inode->i_rwsem);
}
static inline void inode_lock_nested(struct inode *inode, unsigned subclass)
{
- mutex_lock_nested(&inode->i_mutex, subclass);
+ down_write_nested(&inode->i_rwsem, subclass);
}
void lock_two_nondirectories(struct inode *, struct inode*);
@@ -1646,6 +1675,7 @@ struct file_operations {
ssize_t (*read_iter) (struct kiocb *, struct iov_iter *);
ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
int (*iterate) (struct file *, struct dir_context *);
+ int (*iterate_shared) (struct file *, struct dir_context *);
unsigned int (*poll) (struct file *, struct poll_table_struct *);
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
@@ -1699,8 +1729,10 @@ struct inode_operations {
struct inode *, struct dentry *, unsigned int);
int (*setattr) (struct dentry *, struct iattr *);
int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *);
- int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
- ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
+ int (*setxattr) (struct dentry *, struct inode *,
+ const char *, const void *, size_t, int);
+ ssize_t (*getxattr) (struct dentry *, struct inode *,
+ const char *, void *, size_t);
ssize_t (*listxattr) (struct dentry *, char *, size_t);
int (*removexattr) (struct dentry *, const char *);
int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
@@ -2263,7 +2295,7 @@ struct filename {
const char iname[];
};
-extern long vfs_truncate(struct path *, loff_t);
+extern long vfs_truncate(const struct path *, loff_t);
extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs,
struct file *filp);
extern int vfs_fallocate(struct file *file, int mode, loff_t offset,
@@ -2320,14 +2352,6 @@ extern struct super_block *freeze_bdev(struct block_device *);
extern void emergency_thaw_all(void);
extern int thaw_bdev(struct block_device *bdev, struct super_block *sb);
extern int fsync_bdev(struct block_device *);
-#ifdef CONFIG_FS_DAX
-extern bool blkdev_dax_capable(struct block_device *bdev);
-#else
-static inline bool blkdev_dax_capable(struct block_device *bdev)
-{
- return false;
-}
-#endif
extern struct super_block *blockdev_superblock;
@@ -2395,6 +2419,8 @@ static inline void bd_unlink_disk_holder(struct block_device *bdev,
/* fs/char_dev.c */
#define CHRDEV_MAJOR_HASH_SIZE 255
+/* Marks the bottom of the first segment of free char majors */
+#define CHRDEV_MAJOR_DYN_END 234
extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *);
extern int register_chrdev_region(dev_t, unsigned, const char *);
extern int __register_chrdev(unsigned int major, unsigned int baseminor,
@@ -2485,13 +2511,25 @@ extern int filemap_fdatawrite_range(struct address_space *mapping,
extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end,
int datasync);
extern int vfs_fsync(struct file *file, int datasync);
-static inline int generic_write_sync(struct file *file, loff_t pos, loff_t count)
-{
- if (!(file->f_flags & O_DSYNC) && !IS_SYNC(file->f_mapping->host))
- return 0;
- return vfs_fsync_range(file, pos, pos + count - 1,
- (file->f_flags & __O_SYNC) ? 0 : 1);
+
+/*
+ * Sync the bytes written if this was a synchronous write. Expect ki_pos
+ * to already be updated for the write, and will return either the amount
+ * of bytes passed in, or an error if syncing the file failed.
+ */
+static inline ssize_t generic_write_sync(struct kiocb *iocb, ssize_t count)
+{
+ if (iocb->ki_flags & IOCB_DSYNC) {
+ int ret = vfs_fsync_range(iocb->ki_filp,
+ iocb->ki_pos - count, iocb->ki_pos - 1,
+ (iocb->ki_flags & IOCB_SYNC) ? 0 : 1);
+ if (ret)
+ return ret;
+ }
+
+ return count;
}
+
extern void emergency_sync(void);
extern void emergency_remount(void);
#ifdef CONFIG_BLOCK
@@ -2590,15 +2628,34 @@ static inline void i_readcount_inc(struct inode *inode)
#endif
extern int do_pipe_flags(int *, int);
+#define __kernel_read_file_id(id) \
+ id(UNKNOWN, unknown) \
+ id(FIRMWARE, firmware) \
+ id(MODULE, kernel-module) \
+ id(KEXEC_IMAGE, kexec-image) \
+ id(KEXEC_INITRAMFS, kexec-initramfs) \
+ id(POLICY, security-policy) \
+ id(MAX_ID, )
+
+#define __fid_enumify(ENUM, dummy) READING_ ## ENUM,
+#define __fid_stringify(dummy, str) #str,
+
enum kernel_read_file_id {
- READING_FIRMWARE = 1,
- READING_MODULE,
- READING_KEXEC_IMAGE,
- READING_KEXEC_INITRAMFS,
- READING_POLICY,
- READING_MAX_ID
+ __kernel_read_file_id(__fid_enumify)
};
+static const char * const kernel_read_file_str[] = {
+ __kernel_read_file_id(__fid_stringify)
+};
+
+static inline const char *kernel_read_file_id_str(enum kernel_read_file_id id)
+{
+ if (id < 0 || id >= READING_MAX_ID)
+ return kernel_read_file_str[READING_UNKNOWN];
+
+ return kernel_read_file_str[id];
+}
+
extern int kernel_read(struct file *, loff_t, char *, unsigned long);
extern int kernel_read_file(struct file *, void **, loff_t *, loff_t,
enum kernel_read_file_id);
@@ -2703,7 +2760,7 @@ extern ssize_t generic_write_checks(struct kiocb *, struct iov_iter *);
extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *);
extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *);
extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *);
-extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *, loff_t);
+extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *);
extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t);
ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos);
@@ -2766,18 +2823,17 @@ void dio_end_io(struct bio *bio, int error);
ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, struct iov_iter *iter,
- loff_t offset, get_block_t get_block,
+ get_block_t get_block,
dio_iodone_t end_io, dio_submit_t submit_io,
int flags);
static inline ssize_t blockdev_direct_IO(struct kiocb *iocb,
struct inode *inode,
- struct iov_iter *iter, loff_t offset,
+ struct iov_iter *iter,
get_block_t get_block)
{
return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
- offset, get_block, NULL, NULL,
- DIO_LOCKING | DIO_SKIP_HOLES);
+ get_block, NULL, NULL, DIO_LOCKING | DIO_SKIP_HOLES);
}
#endif
@@ -2943,6 +2999,10 @@ static inline int iocb_flags(struct file *file)
res |= IOCB_APPEND;
if (io_is_direct(file))
res |= IOCB_DIRECT;
+ if ((file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host))
+ res |= IOCB_DSYNC;
+ if (file->f_flags & __O_SYNC)
+ res |= IOCB_SYNC;
return res;
}
@@ -3104,6 +3164,13 @@ static inline bool dir_relax(struct inode *inode)
return !IS_DEADDIR(inode);
}
+static inline bool dir_relax_shared(struct inode *inode)
+{
+ inode_unlock_shared(inode);
+ inode_lock_shared(inode);
+ return !IS_DEADDIR(inode);
+}
+
extern bool path_noexec(const struct path *path);
extern void inode_nohighmem(struct inode *inode);
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index 604e1526cd00..13ba552e6c09 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -241,7 +241,7 @@ struct fscache_cache_ops {
/* check the consistency between the backing cache and the FS-Cache
* cookie */
- bool (*check_consistency)(struct fscache_operation *op);
+ int (*check_consistency)(struct fscache_operation *op);
/* store the updated auxiliary data on an object */
void (*update_object)(struct fscache_object *object);
diff --git a/include/linux/fscrypto.h b/include/linux/fscrypto.h
index 6027f6bbb061..cfa6cde25f8e 100644
--- a/include/linux/fscrypto.h
+++ b/include/linux/fscrypto.h
@@ -175,6 +175,7 @@ struct fscrypt_name {
*/
struct fscrypt_operations {
int (*get_context)(struct inode *, void *, size_t);
+ int (*key_prefix)(struct inode *, u8 **);
int (*prepare_context)(struct inode *);
int (*set_context)(struct inode *, const void *, size_t, void *);
int (*dummy_context)(struct inode *);
diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h
index 0023088b253b..3f9778cbc79d 100644
--- a/include/linux/fsl_ifc.h
+++ b/include/linux/fsl_ifc.h
@@ -39,6 +39,10 @@
#define FSL_IFC_VERSION_MASK 0x0F0F0000
#define FSL_IFC_VERSION_1_0_0 0x01000000
#define FSL_IFC_VERSION_1_1_0 0x01010000
+#define FSL_IFC_VERSION_2_0_0 0x02000000
+
+#define PGOFFSET_64K (64*1024)
+#define PGOFFSET_4K (4*1024)
/*
* CSPR - Chip Select Property Register
@@ -723,20 +727,26 @@ struct fsl_ifc_nand {
__be32 nand_evter_en;
u32 res17[0x2];
__be32 nand_evter_intr_en;
- u32 res18[0x2];
+ __be32 nand_vol_addr_stat;
+ u32 res18;
__be32 nand_erattr0;
__be32 nand_erattr1;
u32 res19[0x10];
__be32 nand_fsr;
- u32 res20;
- __be32 nand_eccstat[4];
- u32 res21[0x20];
+ u32 res20[0x3];
+ __be32 nand_eccstat[6];
+ u32 res21[0x1c];
__be32 nanndcr;
u32 res22[0x2];
__be32 nand_autoboot_trgr;
u32 res23;
__be32 nand_mdr;
- u32 res24[0x5C];
+ u32 res24[0x1C];
+ __be32 nand_dll_lowcfg0;
+ __be32 nand_dll_lowcfg1;
+ u32 res25;
+ __be32 nand_dll_lowstat;
+ u32 res26[0x3c];
};
/*
@@ -771,13 +781,12 @@ struct fsl_ifc_gpcm {
__be32 gpcm_erattr1;
__be32 gpcm_erattr2;
__be32 gpcm_stat;
- u32 res4[0x1F3];
};
/*
* IFC Controller Registers
*/
-struct fsl_ifc_regs {
+struct fsl_ifc_global {
__be32 ifc_rev;
u32 res1[0x2];
struct {
@@ -803,21 +812,26 @@ struct fsl_ifc_regs {
} ftim_cs[FSL_IFC_BANK_COUNT];
u32 res9[0x30];
__be32 rb_stat;
- u32 res10[0x2];
+ __be32 rb_map;
+ __be32 wb_map;
__be32 ifc_gcr;
- u32 res11[0x2];
+ u32 res10[0x2];
__be32 cm_evter_stat;
- u32 res12[0x2];
+ u32 res11[0x2];
__be32 cm_evter_en;
- u32 res13[0x2];
+ u32 res12[0x2];
__be32 cm_evter_intr_en;
- u32 res14[0x2];
+ u32 res13[0x2];
__be32 cm_erattr0;
__be32 cm_erattr1;
- u32 res15[0x2];
+ u32 res14[0x2];
__be32 ifc_ccr;
__be32 ifc_csr;
- u32 res16[0x2EB];
+ __be32 ddr_ccr_low;
+};
+
+
+struct fsl_ifc_runtime {
struct fsl_ifc_nand ifc_nand;
struct fsl_ifc_nor ifc_nor;
struct fsl_ifc_gpcm ifc_gpcm;
@@ -831,7 +845,8 @@ extern int fsl_ifc_find(phys_addr_t addr_base);
struct fsl_ifc_ctrl {
/* device info */
struct device *dev;
- struct fsl_ifc_regs __iomem *regs;
+ struct fsl_ifc_global __iomem *gregs;
+ struct fsl_ifc_runtime __iomem *rregs;
int irq;
int nand_irq;
spinlock_t lock;
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 1259e53d9296..29f917517299 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -359,8 +359,6 @@ extern void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group)
extern void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group);
/* run all the marks in a group, and clear all of the marks where mark->flags & flags is true*/
extern void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, unsigned int flags);
-/* run all the marks in a group, and flag them to be freed */
-extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group);
extern void fsnotify_get_mark(struct fsnotify_mark *mark);
extern void fsnotify_put_mark(struct fsnotify_mark *mark);
extern void fsnotify_unmount_inodes(struct super_block *sb);
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index dea12a6e413b..66a36a815f0a 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -455,6 +455,7 @@ int ftrace_update_record(struct dyn_ftrace *rec, int enable);
int ftrace_test_record(struct dyn_ftrace *rec, int enable);
void ftrace_run_stop_machine(int command);
unsigned long ftrace_location(unsigned long ip);
+unsigned long ftrace_location_range(unsigned long start, unsigned long end);
unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec);
unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec);
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 5c706765404a..359a8e4bd44d 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -14,6 +14,7 @@
#include <linux/rcupdate.h>
#include <linux/slab.h>
#include <linux/percpu-refcount.h>
+#include <linux/uuid.h>
#ifdef CONFIG_BLOCK
@@ -93,7 +94,7 @@ struct disk_stats {
* Enough for the string representation of any kind of UUID plus NULL.
* EFI UUID is 36 characters. MSDOS UUID is 11 characters.
*/
-#define PARTITION_META_INFO_UUIDLTH 37
+#define PARTITION_META_INFO_UUIDLTH (UUID_STRING_LEN + 1)
struct partition_meta_info {
char uuid[PARTITION_META_INFO_UUIDLTH];
@@ -228,27 +229,9 @@ static inline struct gendisk *part_to_disk(struct hd_struct *part)
return NULL;
}
-static inline void part_pack_uuid(const u8 *uuid_str, u8 *to)
-{
- int i;
- for (i = 0; i < 16; ++i) {
- *to++ = (hex_to_bin(*uuid_str) << 4) |
- (hex_to_bin(*(uuid_str + 1)));
- uuid_str += 2;
- switch (i) {
- case 3:
- case 5:
- case 7:
- case 9:
- uuid_str++;
- continue;
- }
- }
-}
-
static inline int blk_part_pack_uuid(const u8 *uuid_str, u8 *to)
{
- part_pack_uuid(uuid_str, to);
+ uuid_be_to_bin(uuid_str, (uuid_be *)to);
return 0;
}
diff --git a/include/linux/genl_magic_struct.h b/include/linux/genl_magic_struct.h
index eecd19b37001..6270a56e5edc 100644
--- a/include/linux/genl_magic_struct.h
+++ b/include/linux/genl_magic_struct.h
@@ -62,6 +62,11 @@ extern void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void);
/* MAGIC helpers {{{2 */
+static inline int nla_put_u64_0pad(struct sk_buff *skb, int attrtype, u64 value)
+{
+ return nla_put_64bit(skb, attrtype, sizeof(u64), &value, 0);
+}
+
/* possible field types */
#define __flg_field(attr_nr, attr_flag, name) \
__field(attr_nr, attr_flag, name, NLA_U8, char, \
@@ -80,7 +85,7 @@ extern void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void);
nla_get_u32, nla_put_u32, true)
#define __u64_field(attr_nr, attr_flag, name) \
__field(attr_nr, attr_flag, name, NLA_U64, __u64, \
- nla_get_u64, nla_put_u64, false)
+ nla_get_u64, nla_put_u64_0pad, false)
#define __str_field(attr_nr, attr_flag, name, maxlen) \
__array(attr_nr, attr_flag, name, NLA_NUL_STRING, char, maxlen, \
nla_strlcpy, nla_put, false)
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index bee976f82788..50882e09289b 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -20,6 +20,18 @@ struct gpio_device;
#ifdef CONFIG_GPIOLIB
/**
+ * enum single_ended_mode - mode for single ended operation
+ * @LINE_MODE_PUSH_PULL: normal mode for a GPIO line, drive actively high/low
+ * @LINE_MODE_OPEN_DRAIN: set line to be open drain
+ * @LINE_MODE_OPEN_SOURCE: set line to be open source
+ */
+enum single_ended_mode {
+ LINE_MODE_PUSH_PULL,
+ LINE_MODE_OPEN_DRAIN,
+ LINE_MODE_OPEN_SOURCE,
+};
+
+/**
* struct gpio_chip - abstract a GPIO controller
* @label: a functional name for the GPIO device, such as a part
* number or the name of the SoC IP-block implementing it.
@@ -38,7 +50,15 @@ struct gpio_device;
* @set: assigns output value for signal "offset"
* @set_multiple: assigns output values for multiple signals defined by "mask"
* @set_debounce: optional hook for setting debounce time for specified gpio in
- * interrupt triggered gpio chips
+ * interrupt triggered gpio chips
+ * @set_single_ended: optional hook for setting a line as open drain, open
+ * source, or non-single ended (restore from open drain/source to normal
+ * push-pull mode) this should be implemented if the hardware supports
+ * open drain or open source settings. The GPIOlib will otherwise try
+ * to emulate open drain/source by not actively driving lines high/low
+ * if a consumer request this. The driver may return -ENOTSUPP if e.g.
+ * it supports just open drain but not open source and is called
+ * with LINE_MODE_OPEN_SOURCE as mode argument.
* @to_irq: optional hook supporting non-static gpio_to_irq() mappings;
* implementation may not sleep
* @dbg_show: optional routine to show contents in debugfs; default code
@@ -130,6 +150,9 @@ struct gpio_chip {
int (*set_debounce)(struct gpio_chip *chip,
unsigned offset,
unsigned debounce);
+ int (*set_single_ended)(struct gpio_chip *chip,
+ unsigned offset,
+ enum single_ended_mode mode);
int (*to_irq)(struct gpio_chip *chip,
unsigned offset);
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index dfd59d6bc6f0..c683996110b1 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -61,6 +61,7 @@ extern void irq_exit(void);
#define nmi_enter() \
do { \
+ printk_nmi_enter(); \
lockdep_off(); \
ftrace_nmi_enter(); \
BUG_ON(in_nmi()); \
@@ -77,6 +78,7 @@ extern void irq_exit(void);
preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
ftrace_nmi_exit(); \
lockdep_on(); \
+ printk_nmi_exit(); \
} while (0)
#endif /* LINUX_HARDIRQ_H */
diff --git a/include/linux/hash.h b/include/linux/hash.h
index 1afde47e1528..ad6fa21d977b 100644
--- a/include/linux/hash.h
+++ b/include/linux/hash.h
@@ -3,76 +3,94 @@
/* Fast hashing routine for ints, longs and pointers.
(C) 2002 Nadia Yvette Chambers, IBM */
-/*
- * Knuth recommends primes in approximately golden ratio to the maximum
- * integer representable by a machine word for multiplicative hashing.
- * Chuck Lever verified the effectiveness of this technique:
- * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
- *
- * These primes are chosen to be bit-sparse, that is operations on
- * them can use shifts and additions instead of multiplications for
- * machines where multiplications are slow.
- */
-
#include <asm/types.h>
#include <linux/compiler.h>
-/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
-#define GOLDEN_RATIO_PRIME_32 0x9e370001UL
-/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
-#define GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001UL
-
+/*
+ * The "GOLDEN_RATIO_PRIME" is used in ifs/btrfs/brtfs_inode.h and
+ * fs/inode.c. It's not actually prime any more (the previous primes
+ * were actively bad for hashing), but the name remains.
+ */
#if BITS_PER_LONG == 32
-#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_32
+#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_32
#define hash_long(val, bits) hash_32(val, bits)
#elif BITS_PER_LONG == 64
#define hash_long(val, bits) hash_64(val, bits)
-#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_64
+#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_64
#else
#error Wordsize not 32 or 64
#endif
-static __always_inline u64 hash_64(u64 val, unsigned int bits)
-{
- u64 hash = val;
+/*
+ * This hash multiplies the input by a large odd number and takes the
+ * high bits. Since multiplication propagates changes to the most
+ * significant end only, it is essential that the high bits of the
+ * product be used for the hash value.
+ *
+ * Chuck Lever verified the effectiveness of this technique:
+ * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
+ *
+ * Although a random odd number will do, it turns out that the golden
+ * ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice
+ * properties. (See Knuth vol 3, section 6.4, exercise 9.)
+ *
+ * These are the negative, (1 - phi) = phi**2 = (3 - sqrt(5))/2,
+ * which is very slightly easier to multiply by and makes no
+ * difference to the hash distribution.
+ */
+#define GOLDEN_RATIO_32 0x61C88647
+#define GOLDEN_RATIO_64 0x61C8864680B583EBull
-#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
- hash = hash * GOLDEN_RATIO_PRIME_64;
-#else
- /* Sigh, gcc can't optimise this alone like it does for 32 bits. */
- u64 n = hash;
- n <<= 18;
- hash -= n;
- n <<= 33;
- hash -= n;
- n <<= 3;
- hash += n;
- n <<= 3;
- hash -= n;
- n <<= 4;
- hash += n;
- n <<= 2;
- hash += n;
+#ifdef CONFIG_HAVE_ARCH_HASH
+/* This header may use the GOLDEN_RATIO_xx constants */
+#include <asm/hash.h>
#endif
- /* High bits are more random, so use them. */
- return hash >> (64 - bits);
+/*
+ * The _generic versions exist only so lib/test_hash.c can compare
+ * the arch-optimized versions with the generic.
+ *
+ * Note that if you change these, any <asm/hash.h> that aren't updated
+ * to match need to have their HAVE_ARCH_* define values updated so the
+ * self-test will not false-positive.
+ */
+#ifndef HAVE_ARCH__HASH_32
+#define __hash_32 __hash_32_generic
+#endif
+static inline u32 __hash_32_generic(u32 val)
+{
+ return val * GOLDEN_RATIO_32;
}
-static inline u32 hash_32(u32 val, unsigned int bits)
+#ifndef HAVE_ARCH_HASH_32
+#define hash_32 hash_32_generic
+#endif
+static inline u32 hash_32_generic(u32 val, unsigned int bits)
{
- /* On some cpus multiply is faster, on others gcc will do shifts */
- u32 hash = val * GOLDEN_RATIO_PRIME_32;
-
/* High bits are more random, so use them. */
- return hash >> (32 - bits);
+ return __hash_32(val) >> (32 - bits);
+}
+
+#ifndef HAVE_ARCH_HASH_64
+#define hash_64 hash_64_generic
+#endif
+static __always_inline u32 hash_64_generic(u64 val, unsigned int bits)
+{
+#if BITS_PER_LONG == 64
+ /* 64x64-bit multiply is efficient on all 64-bit processors */
+ return val * GOLDEN_RATIO_64 >> (64 - bits);
+#else
+ /* Hash 64 bits using only 32x32-bit multiply. */
+ return hash_32((u32)val ^ __hash_32(val >> 32), bits);
+#endif
}
-static inline unsigned long hash_ptr(const void *ptr, unsigned int bits)
+static inline u32 hash_ptr(const void *ptr, unsigned int bits)
{
return hash_long((unsigned long)ptr, bits);
}
+/* This really should be called fold32_ptr; it does no hashing to speak of. */
static inline u32 hash32_ptr(const void *ptr)
{
unsigned long val = (unsigned long)ptr;
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 7008623e24b1..419fb9e03447 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -28,9 +28,7 @@ extern int zap_huge_pmd(struct mmu_gather *tlb,
extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end,
unsigned char *vec);
-extern bool move_huge_pmd(struct vm_area_struct *vma,
- struct vm_area_struct *new_vma,
- unsigned long old_addr,
+extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
unsigned long new_addr, unsigned long old_end,
pmd_t *old_pmd, pmd_t *new_pmd);
extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
@@ -152,6 +150,7 @@ static inline bool is_huge_zero_pmd(pmd_t pmd)
}
struct page *get_huge_zero_page(void);
+void put_huge_zero_page(void);
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
@@ -208,6 +207,10 @@ static inline bool is_huge_zero_page(struct page *page)
return false;
}
+static inline void put_huge_zero_page(void)
+{
+ BUILD_BUG();
+}
static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmd, int flags)
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 7d953c2542a8..c26d4638f665 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -338,6 +338,7 @@ int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
/* arch callback */
int __init alloc_bootmem_huge_page(struct hstate *h);
+void __init hugetlb_bad_size(void);
void __init hugetlb_add_hstate(unsigned order);
struct hstate *size_to_hstate(unsigned long size);
@@ -352,9 +353,7 @@ extern unsigned int default_hstate_idx;
static inline struct hstate *hstate_inode(struct inode *i)
{
- struct hugetlbfs_sb_info *hsb;
- hsb = HUGETLBFS_SB(i->i_sb);
- return hsb->hstate;
+ return HUGETLBFS_SB(i->i_sb)->hstate;
}
static inline struct hstate *hstate_file(struct file *f)
@@ -453,12 +452,12 @@ static inline pgoff_t basepage_index(struct page *page)
extern void dissolve_free_huge_pages(unsigned long start_pfn,
unsigned long end_pfn);
-static inline int hugepage_migration_supported(struct hstate *h)
+static inline bool hugepage_migration_supported(struct hstate *h)
{
#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
return huge_page_shift(h) == PMD_SHIFT;
#else
- return 0;
+ return false;
#endif
}
@@ -520,7 +519,7 @@ static inline pgoff_t basepage_index(struct page *page)
return page->index;
}
#define dissolve_free_huge_pages(s, e) do {} while (0)
-#define hugepage_migration_supported(h) 0
+#define hugepage_migration_supported(h) false
static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
struct mm_struct *mm, pte_t *pte)
diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
index 24154c26d469..063962f6dfc6 100644
--- a/include/linux/hugetlb_cgroup.h
+++ b/include/linux/hugetlb_cgroup.h
@@ -93,20 +93,17 @@ hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg,
struct page *page)
{
- return;
}
static inline void
hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, struct page *page)
{
- return;
}
static inline void
hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg)
{
- return;
}
static inline void hugetlb_cgroup_file_init(void)
@@ -116,7 +113,6 @@ static inline void hugetlb_cgroup_file_init(void)
static inline void hugetlb_cgroup_migrate(struct page *oldhpage,
struct page *newhpage)
{
- return;
}
#endif /* CONFIG_MEM_RES_CTLR_HUGETLB */
diff --git a/include/linux/hugetlb_inline.h b/include/linux/hugetlb_inline.h
index 2bb681fbeb35..a4e7ca0f3585 100644
--- a/include/linux/hugetlb_inline.h
+++ b/include/linux/hugetlb_inline.h
@@ -5,16 +5,16 @@
#include <linux/mm.h>
-static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
+static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma)
{
return !!(vma->vm_flags & VM_HUGETLB);
}
#else
-static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
+static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma)
{
- return 0;
+ return false;
}
#endif
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index aa0fadce9308..b10954a66939 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -126,6 +126,8 @@ struct hv_ring_buffer_info {
u32 ring_datasize; /* < ring_size */
u32 ring_data_startoffset;
+ u32 priv_write_index;
+ u32 priv_read_index;
};
/*
@@ -151,6 +153,33 @@ hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
*read = dsize - *write;
}
+static inline u32 hv_get_bytes_to_read(struct hv_ring_buffer_info *rbi)
+{
+ u32 read_loc, write_loc, dsize, read;
+
+ dsize = rbi->ring_datasize;
+ read_loc = rbi->ring_buffer->read_index;
+ write_loc = READ_ONCE(rbi->ring_buffer->write_index);
+
+ read = write_loc >= read_loc ? (write_loc - read_loc) :
+ (dsize - read_loc) + write_loc;
+
+ return read;
+}
+
+static inline u32 hv_get_bytes_to_write(struct hv_ring_buffer_info *rbi)
+{
+ u32 read_loc, write_loc, dsize, write;
+
+ dsize = rbi->ring_datasize;
+ read_loc = READ_ONCE(rbi->ring_buffer->read_index);
+ write_loc = rbi->ring_buffer->write_index;
+
+ write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
+ read_loc - write_loc;
+ return write;
+}
+
/*
* VMBUS version is 32 bit entity broken up into
* two 16 bit quantities: major_number. minor_number.
@@ -1091,7 +1120,7 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
resource_size_t min, resource_size_t max,
resource_size_t size, resource_size_t align,
bool fb_overlap_ok);
-
+void vmbus_free_mmio(resource_size_t start, resource_size_t size);
int vmbus_cpu_number_to_vp_number(int cpu_number);
u64 hv_do_hypercall(u64 control, void *input, void *output);
@@ -1338,4 +1367,143 @@ extern __u32 vmbus_proto_version;
int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id,
const uuid_le *shv_host_servie_id);
+void vmbus_set_event(struct vmbus_channel *channel);
+
+/* Get the start of the ring buffer. */
+static inline void *
+hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
+{
+ return (void *)ring_info->ring_buffer->buffer;
+}
+
+/*
+ * To optimize the flow management on the send-side,
+ * when the sender is blocked because of lack of
+ * sufficient space in the ring buffer, potential the
+ * consumer of the ring buffer can signal the producer.
+ * This is controlled by the following parameters:
+ *
+ * 1. pending_send_sz: This is the size in bytes that the
+ * producer is trying to send.
+ * 2. The feature bit feat_pending_send_sz set to indicate if
+ * the consumer of the ring will signal when the ring
+ * state transitions from being full to a state where
+ * there is room for the producer to send the pending packet.
+ */
+
+static inline bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
+{
+ u32 cur_write_sz;
+ u32 pending_sz;
+
+ /*
+ * Issue a full memory barrier before making the signaling decision.
+ * Here is the reason for having this barrier:
+ * If the reading of the pend_sz (in this function)
+ * were to be reordered and read before we commit the new read
+ * index (in the calling function) we could
+ * have a problem. If the host were to set the pending_sz after we
+ * have sampled pending_sz and go to sleep before we commit the
+ * read index, we could miss sending the interrupt. Issue a full
+ * memory barrier to address this.
+ */
+ virt_mb();
+
+ pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
+ /* If the other end is not blocked on write don't bother. */
+ if (pending_sz == 0)
+ return false;
+
+ cur_write_sz = hv_get_bytes_to_write(rbi);
+
+ if (cur_write_sz >= pending_sz)
+ return true;
+
+ return false;
+}
+
+/*
+ * An API to support in-place processing of incoming VMBUS packets.
+ */
+#define VMBUS_PKT_TRAILER 8
+
+static inline struct vmpacket_descriptor *
+get_next_pkt_raw(struct vmbus_channel *channel)
+{
+ struct hv_ring_buffer_info *ring_info = &channel->inbound;
+ u32 read_loc = ring_info->priv_read_index;
+ void *ring_buffer = hv_get_ring_buffer(ring_info);
+ struct vmpacket_descriptor *cur_desc;
+ u32 packetlen;
+ u32 dsize = ring_info->ring_datasize;
+ u32 delta = read_loc - ring_info->ring_buffer->read_index;
+ u32 bytes_avail_toread = (hv_get_bytes_to_read(ring_info) - delta);
+
+ if (bytes_avail_toread < sizeof(struct vmpacket_descriptor))
+ return NULL;
+
+ if ((read_loc + sizeof(*cur_desc)) > dsize)
+ return NULL;
+
+ cur_desc = ring_buffer + read_loc;
+ packetlen = cur_desc->len8 << 3;
+
+ /*
+ * If the packet under consideration is wrapping around,
+ * return failure.
+ */
+ if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > (dsize - 1))
+ return NULL;
+
+ return cur_desc;
+}
+
+/*
+ * A helper function to step through packets "in-place"
+ * This API is to be called after each successful call
+ * get_next_pkt_raw().
+ */
+static inline void put_pkt_raw(struct vmbus_channel *channel,
+ struct vmpacket_descriptor *desc)
+{
+ struct hv_ring_buffer_info *ring_info = &channel->inbound;
+ u32 read_loc = ring_info->priv_read_index;
+ u32 packetlen = desc->len8 << 3;
+ u32 dsize = ring_info->ring_datasize;
+
+ if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > dsize)
+ BUG();
+ /*
+ * Include the packet trailer.
+ */
+ ring_info->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
+}
+
+/*
+ * This call commits the read index and potentially signals the host.
+ * Here is the pattern for using the "in-place" consumption APIs:
+ *
+ * while (get_next_pkt_raw() {
+ * process the packet "in-place";
+ * put_pkt_raw();
+ * }
+ * if (packets processed in place)
+ * commit_rd_index();
+ */
+static inline void commit_rd_index(struct vmbus_channel *channel)
+{
+ struct hv_ring_buffer_info *ring_info = &channel->inbound;
+ /*
+ * Make sure all reads are done before we update the read index since
+ * the writer may start writing to the read area once the read index
+ * is updated.
+ */
+ virt_rmb();
+ ring_info->ring_buffer->read_index = ring_info->priv_read_index;
+
+ if (hv_need_to_signal_on_read(ring_info))
+ vmbus_set_event(channel);
+}
+
+
#endif /* _HYPERV_H */
diff --git a/include/linux/i2c-mux.h b/include/linux/i2c-mux.h
index b5f9a007a3ab..d4c1d12f900d 100644
--- a/include/linux/i2c-mux.h
+++ b/include/linux/i2c-mux.h
@@ -27,22 +27,49 @@
#ifdef __KERNEL__
+#include <linux/bitops.h>
+
+struct i2c_mux_core {
+ struct i2c_adapter *parent;
+ struct device *dev;
+ bool mux_locked;
+
+ void *priv;
+
+ int (*select)(struct i2c_mux_core *, u32 chan_id);
+ int (*deselect)(struct i2c_mux_core *, u32 chan_id);
+
+ int num_adapters;
+ int max_adapters;
+ struct i2c_adapter *adapter[0];
+};
+
+struct i2c_mux_core *i2c_mux_alloc(struct i2c_adapter *parent,
+ struct device *dev, int max_adapters,
+ int sizeof_priv, u32 flags,
+ int (*select)(struct i2c_mux_core *, u32),
+ int (*deselect)(struct i2c_mux_core *, u32));
+
+/* flags for i2c_mux_alloc */
+#define I2C_MUX_LOCKED BIT(0)
+
+static inline void *i2c_mux_priv(struct i2c_mux_core *muxc)
+{
+ return muxc->priv;
+}
+
+struct i2c_adapter *i2c_root_adapter(struct device *dev);
+
/*
- * Called to create a i2c bus on a multiplexed bus segment.
- * The mux_dev and chan_id parameters are passed to the select
- * and deselect callback functions to perform hardware-specific
- * mux control.
+ * Called to create an i2c bus on a multiplexed bus segment.
+ * The chan_id parameter is passed to the select and deselect
+ * callback functions to perform hardware-specific mux control.
*/
-struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
- struct device *mux_dev,
- void *mux_priv, u32 force_nr, u32 chan_id,
- unsigned int class,
- int (*select) (struct i2c_adapter *,
- void *mux_dev, u32 chan_id),
- int (*deselect) (struct i2c_adapter *,
- void *mux_dev, u32 chan_id));
-
-void i2c_del_mux_adapter(struct i2c_adapter *adap);
+int i2c_mux_add_adapter(struct i2c_mux_core *muxc,
+ u32 force_nr, u32 chan_id,
+ unsigned int class);
+
+void i2c_mux_del_adapters(struct i2c_mux_core *muxc);
#endif /* __KERNEL__ */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 200cf13b00f6..96a25ae14494 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -524,6 +524,7 @@ struct i2c_adapter {
/* data fields that are valid for all devices */
struct rt_mutex bus_lock;
+ struct rt_mutex mux_lock;
int timeout; /* in jiffies */
int retries;
@@ -538,6 +539,10 @@ struct i2c_adapter {
struct i2c_bus_recovery_info *bus_recovery_info;
const struct i2c_adapter_quirks *quirks;
+
+ void (*lock_bus)(struct i2c_adapter *, unsigned int flags);
+ int (*trylock_bus)(struct i2c_adapter *, unsigned int flags);
+ void (*unlock_bus)(struct i2c_adapter *, unsigned int flags);
};
#define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev)
@@ -567,8 +572,44 @@ i2c_parent_is_i2c_adapter(const struct i2c_adapter *adapter)
int i2c_for_each_dev(void *data, int (*fn)(struct device *, void *));
/* Adapter locking functions, exported for shared pin cases */
-void i2c_lock_adapter(struct i2c_adapter *);
-void i2c_unlock_adapter(struct i2c_adapter *);
+#define I2C_LOCK_ROOT_ADAPTER BIT(0)
+#define I2C_LOCK_SEGMENT BIT(1)
+
+/**
+ * i2c_lock_bus - Get exclusive access to an I2C bus segment
+ * @adapter: Target I2C bus segment
+ * @flags: I2C_LOCK_ROOT_ADAPTER locks the root i2c adapter, I2C_LOCK_SEGMENT
+ * locks only this branch in the adapter tree
+ */
+static inline void
+i2c_lock_bus(struct i2c_adapter *adapter, unsigned int flags)
+{
+ adapter->lock_bus(adapter, flags);
+}
+
+/**
+ * i2c_unlock_bus - Release exclusive access to an I2C bus segment
+ * @adapter: Target I2C bus segment
+ * @flags: I2C_LOCK_ROOT_ADAPTER unlocks the root i2c adapter, I2C_LOCK_SEGMENT
+ * unlocks only this branch in the adapter tree
+ */
+static inline void
+i2c_unlock_bus(struct i2c_adapter *adapter, unsigned int flags)
+{
+ adapter->unlock_bus(adapter, flags);
+}
+
+static inline void
+i2c_lock_adapter(struct i2c_adapter *adapter)
+{
+ i2c_lock_bus(adapter, I2C_LOCK_ROOT_ADAPTER);
+}
+
+static inline void
+i2c_unlock_adapter(struct i2c_adapter *adapter)
+{
+ i2c_unlock_bus(adapter, I2C_LOCK_ROOT_ADAPTER);
+}
/*flags for the client struct: */
#define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */
@@ -654,6 +695,11 @@ static inline int i2c_adapter_id(struct i2c_adapter *adap)
return adap->nr;
}
+static inline u8 i2c_8bit_addr_from_msg(const struct i2c_msg *msg)
+{
+ return (msg->addr << 1) | (msg->flags & I2C_M_RD ? 1 : 0);
+}
+
/**
* module_i2c_driver() - Helper macro for registering a modular I2C driver
* @__i2c_driver: i2c_driver struct
diff --git a/include/linux/i2c/sx150x.h b/include/linux/i2c/sx150x.h
deleted file mode 100644
index 52baa79d69a7..000000000000
--- a/include/linux/i2c/sx150x.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Driver for the Semtech SX150x I2C GPIO Expanders
- *
- * Copyright (c) 2010, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-#ifndef __LINUX_I2C_SX150X_H
-#define __LINUX_I2C_SX150X_H
-
-/**
- * struct sx150x_platform_data - config data for SX150x driver
- * @gpio_base: The index number of the first GPIO assigned to this
- * GPIO expander. The expander will create a block of
- * consecutively numbered gpios beginning at the given base,
- * with the size of the block depending on the model of the
- * expander chip.
- * @oscio_is_gpo: If set to true, the driver will configure OSCIO as a GPO
- * instead of as an oscillator, increasing the size of the
- * GP(I)O pool created by this expander by one. The
- * output-only GPO pin will be added at the end of the block.
- * @io_pullup_ena: A bit-mask which enables or disables the pull-up resistor
- * for each IO line in the expander. Setting the bit at
- * position n will enable the pull-up for the IO at
- * the corresponding offset. For chips with fewer than
- * 16 IO pins, high-end bits are ignored.
- * @io_pulldn_ena: A bit-mask which enables-or disables the pull-down
- * resistor for each IO line in the expander. Setting the
- * bit at position n will enable the pull-down for the IO at
- * the corresponding offset. For chips with fewer than
- * 16 IO pins, high-end bits are ignored.
- * @io_open_drain_ena: A bit-mask which enables-or disables open-drain
- * operation for each IO line in the expander. Setting the
- * bit at position n enables open-drain operation for
- * the IO at the corresponding offset. Clearing the bit
- * enables regular push-pull operation for that IO.
- * For chips with fewer than 16 IO pins, high-end bits
- * are ignored.
- * @io_polarity: A bit-mask which enables polarity inversion for each IO line
- * in the expander. Setting the bit at position n inverts
- * the polarity of that IO line, while clearing it results
- * in normal polarity. For chips with fewer than 16 IO pins,
- * high-end bits are ignored.
- * @irq_summary: The 'summary IRQ' line to which the GPIO expander's INT line
- * is connected, via which it reports interrupt events
- * across all GPIO lines. This must be a real,
- * pre-existing IRQ line.
- * Setting this value < 0 disables the irq_chip functionality
- * of the driver.
- * @irq_base: The first 'virtual IRQ' line at which our block of GPIO-based
- * IRQ lines will appear. Similarly to gpio_base, the expander
- * will create a block of irqs beginning at this number.
- * This value is ignored if irq_summary is < 0.
- * @reset_during_probe: If set to true, the driver will trigger a full
- * reset of the chip at the beginning of the probe
- * in order to place it in a known state.
- */
-struct sx150x_platform_data {
- unsigned gpio_base;
- bool oscio_is_gpo;
- u16 io_pullup_ena;
- u16 io_pulldn_ena;
- u16 io_open_drain_ena;
- u16 io_polarity;
- int irq_summary;
- unsigned irq_base;
- bool reset_during_probe;
-};
-
-#endif /* __LINUX_I2C_SX150X_H */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 3b1f6cef9513..b118744d3382 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -7,6 +7,7 @@
* Copyright (c) 2005, Devicescape Software, Inc.
* Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
* Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -163,6 +164,9 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
/* 30 byte 4 addr hdr, 2 byte QoS, 2304 byte MSDU, 12 byte crypt, 4 byte FCS */
#define IEEE80211_MAX_FRAME_LEN 2352
+/* Maximal size of an A-MSDU that can be transported in a HT BA session */
+#define IEEE80211_MAX_MPDU_LEN_HT_BA 4095
+
/* Maximal size of an A-MSDU */
#define IEEE80211_MAX_MPDU_LEN_HT_3839 3839
#define IEEE80211_MAX_MPDU_LEN_HT_7935 7935
@@ -637,6 +641,16 @@ static inline bool ieee80211_is_first_frag(__le16 seq_ctrl)
return (seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0;
}
+/**
+ * ieee80211_is_frag - check if a frame is a fragment
+ * @hdr: 802.11 header of the frame
+ */
+static inline bool ieee80211_is_frag(struct ieee80211_hdr *hdr)
+{
+ return ieee80211_has_morefrags(hdr->frame_control) ||
+ hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG);
+}
+
struct ieee80211s_hdr {
u8 flags;
u8 ttl;
@@ -1011,6 +1025,16 @@ struct ieee80211_mgmt {
u8 tpc_elem_length;
struct ieee80211_tpc_report_ie tpc;
} __packed tpc_report;
+ struct {
+ u8 action_code;
+ u8 dialog_token;
+ u8 follow_up;
+ u8 tod[6];
+ u8 toa[6];
+ __le16 tod_error;
+ __le16 toa_error;
+ u8 variable[0];
+ } __packed ftm;
} u;
} __packed action;
} u;
diff --git a/include/linux/ieee802154.h b/include/linux/ieee802154.h
index d3e415674dac..acedbb68a5a3 100644
--- a/include/linux/ieee802154.h
+++ b/include/linux/ieee802154.h
@@ -47,6 +47,7 @@
#define IEEE802154_ADDR_SHORT_UNSPEC 0xfffe
#define IEEE802154_EXTENDED_ADDR_LEN 8
+#define IEEE802154_SHORT_ADDR_LEN 2
#define IEEE802154_LIFS_PERIOD 40
#define IEEE802154_SIFS_PERIOD 12
@@ -218,6 +219,7 @@ enum {
/* frame control handling */
#define IEEE802154_FCTL_FTYPE 0x0003
#define IEEE802154_FCTL_ACKREQ 0x0020
+#define IEEE802154_FCTL_SECEN 0x0004
#define IEEE802154_FCTL_INTRA_PAN 0x0040
#define IEEE802154_FTYPE_DATA 0x0001
@@ -233,6 +235,15 @@ static inline int ieee802154_is_data(__le16 fc)
}
/**
+ * ieee802154_is_secen - check if Security bit is set
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline bool ieee802154_is_secen(__le16 fc)
+{
+ return fc & cpu_to_le16(IEEE802154_FCTL_SECEN);
+}
+
+/**
* ieee802154_is_ackreq - check if acknowledgment request bit is set
* @fc: frame control bytes in little-endian byteorder
*/
@@ -260,17 +271,17 @@ static inline bool ieee802154_is_intra_pan(__le16 fc)
*
* @len: psdu len with (MHR + payload + MFR)
*/
-static inline bool ieee802154_is_valid_psdu_len(const u8 len)
+static inline bool ieee802154_is_valid_psdu_len(u8 len)
{
return (len == IEEE802154_ACK_PSDU_LEN ||
(len >= IEEE802154_MIN_PSDU_LEN && len <= IEEE802154_MTU));
}
/**
- * ieee802154_is_valid_psdu_len - check if extended addr is valid
+ * ieee802154_is_valid_extended_unicast_addr - check if extended addr is valid
* @addr: extended addr to check
*/
-static inline bool ieee802154_is_valid_extended_unicast_addr(const __le64 addr)
+static inline bool ieee802154_is_valid_extended_unicast_addr(__le64 addr)
{
/* Bail out if the address is all zero, or if the group
* address bit is set.
@@ -280,6 +291,34 @@ static inline bool ieee802154_is_valid_extended_unicast_addr(const __le64 addr)
}
/**
+ * ieee802154_is_broadcast_short_addr - check if short addr is broadcast
+ * @addr: short addr to check
+ */
+static inline bool ieee802154_is_broadcast_short_addr(__le16 addr)
+{
+ return (addr == cpu_to_le16(IEEE802154_ADDR_SHORT_BROADCAST));
+}
+
+/**
+ * ieee802154_is_unspec_short_addr - check if short addr is unspecified
+ * @addr: short addr to check
+ */
+static inline bool ieee802154_is_unspec_short_addr(__le16 addr)
+{
+ return (addr == cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC));
+}
+
+/**
+ * ieee802154_is_valid_src_short_addr - check if source short address is valid
+ * @addr: short addr to check
+ */
+static inline bool ieee802154_is_valid_src_short_addr(__le16 addr)
+{
+ return !(ieee802154_is_broadcast_short_addr(addr) ||
+ ieee802154_is_unspec_short_addr(addr));
+}
+
+/**
* ieee802154_random_extended_addr - generates a random extended address
* @addr: extended addr pointer to place the random address
*/
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index d5569734f672..548fd535fd02 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -28,6 +28,11 @@ static inline struct ethhdr *eth_hdr(const struct sk_buff *skb)
return (struct ethhdr *)skb_mac_header(skb);
}
+static inline struct ethhdr *inner_eth_hdr(const struct sk_buff *skb)
+{
+ return (struct ethhdr *)skb_inner_mac_header(skb);
+}
+
int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len);
diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h
index 2ec3ad58e8a0..70a5164f4728 100644
--- a/include/linux/iio/buffer.h
+++ b/include/linux/iio/buffer.h
@@ -83,10 +83,12 @@ struct iio_buffer_access_funcs {
* @access: [DRIVER] buffer access functions associated with the
* implementation.
* @scan_el_dev_attr_list:[INTERN] list of scan element related attributes.
+ * @buffer_group: [INTERN] attributes of the buffer group
* @scan_el_group: [DRIVER] attribute group for those attributes not
* created from the iio_chan_info array.
* @pollq: [INTERN] wait queue to allow for polling on the buffer.
* @stufftoread: [INTERN] flag to indicate new data.
+ * @attrs: [INTERN] standard attributes of the buffer
* @demux_list: [INTERN] list of operations required to demux the scan.
* @demux_bounce: [INTERN] buffer for doing gather from incoming scan.
* @buffer_list: [INTERN] entry in the devices list of current buffers.
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index 6670c3d25c58..d029ffac0d69 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -37,6 +37,7 @@
#define ST_SENSORS_DEFAULT_AXIS_ADDR 0x20
#define ST_SENSORS_DEFAULT_AXIS_MASK 0x07
#define ST_SENSORS_DEFAULT_AXIS_N_BIT 3
+#define ST_SENSORS_DEFAULT_STAT_ADDR 0x27
#define ST_SENSORS_MAX_NAME 17
#define ST_SENSORS_MAX_4WAI 7
@@ -121,6 +122,9 @@ struct st_sensor_bdu {
* @mask_int2: mask to enable/disable IRQ on INT2 pin.
* @addr_ihl: address to enable/disable active low on the INT lines.
* @mask_ihl: mask to enable/disable active low on the INT lines.
+ * @addr_od: address to enable/disable Open Drain on the INT lines.
+ * @mask_od: mask to enable/disable Open Drain on the INT lines.
+ * @addr_stat_drdy: address to read status of DRDY (data ready) interrupt
* struct ig1 - represents the Interrupt Generator 1 of sensors.
* @en_addr: address of the enable ig1 register.
* @en_mask: mask to write the on/off value for enable.
@@ -131,6 +135,9 @@ struct st_sensor_data_ready_irq {
u8 mask_int2;
u8 addr_ihl;
u8 mask_ihl;
+ u8 addr_od;
+ u8 mask_od;
+ u8 addr_stat_drdy;
struct {
u8 en_addr;
u8 en_mask;
@@ -212,6 +219,7 @@ struct st_sensor_settings {
* @odr: Output data rate of the sensor [Hz].
* num_data_channels: Number of data channels used in buffer.
* @drdy_int_pin: Redirect DRDY on pin 1 (1) or pin 2 (2).
+ * @int_pin_open_drain: Set the interrupt/DRDY to open drain.
* @get_irq_data_ready: Function to get the IRQ used for data ready signal.
* @tf: Transfer function structure used by I/O operations.
* @tb: Transfer buffers and mutex used by I/O operations.
@@ -233,6 +241,7 @@ struct st_sensor_data {
unsigned int num_data_channels;
u8 drdy_int_pin;
+ bool int_pin_open_drain;
unsigned int (*get_irq_data_ready) (struct iio_dev *indio_dev);
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
index fad58671c49e..3d672f72e7ec 100644
--- a/include/linux/iio/consumer.h
+++ b/include/linux/iio/consumer.h
@@ -49,6 +49,33 @@ struct iio_channel *iio_channel_get(struct device *dev,
void iio_channel_release(struct iio_channel *chan);
/**
+ * devm_iio_channel_get() - Resource managed version of iio_channel_get().
+ * @dev: Pointer to consumer device. Device name must match
+ * the name of the device as provided in the iio_map
+ * with which the desired provider to consumer mapping
+ * was registered.
+ * @consumer_channel: Unique name to identify the channel on the consumer
+ * side. This typically describes the channels use within
+ * the consumer. E.g. 'battery_voltage'
+ *
+ * Returns a pointer to negative errno if it is not able to get the iio channel
+ * otherwise returns valid pointer for iio channel.
+ *
+ * The allocated iio channel is automatically released when the device is
+ * unbound.
+ */
+struct iio_channel *devm_iio_channel_get(struct device *dev,
+ const char *consumer_channel);
+/**
+ * devm_iio_channel_release() - Resource managed version of
+ * iio_channel_release().
+ * @dev: Pointer to consumer device for which resource
+ * is allocared.
+ * @chan: The channel to be released.
+ */
+void devm_iio_channel_release(struct device *dev, struct iio_channel *chan);
+
+/**
* iio_channel_get_all() - get all channels associated with a client
* @dev: Pointer to consumer device.
*
@@ -65,6 +92,32 @@ struct iio_channel *iio_channel_get_all(struct device *dev);
*/
void iio_channel_release_all(struct iio_channel *chan);
+/**
+ * devm_iio_channel_get_all() - Resource managed version of
+ * iio_channel_get_all().
+ * @dev: Pointer to consumer device.
+ *
+ * Returns a pointer to negative errno if it is not able to get the iio channel
+ * otherwise returns an array of iio_channel structures terminated with one with
+ * null iio_dev pointer.
+ *
+ * This function is used by fairly generic consumers to get all the
+ * channels registered as having this consumer.
+ *
+ * The allocated iio channels are automatically released when the device is
+ * unbounded.
+ */
+struct iio_channel *devm_iio_channel_get_all(struct device *dev);
+
+/**
+ * devm_iio_channel_release_all() - Resource managed version of
+ * iio_channel_release_all().
+ * @dev: Pointer to consumer device for which resource
+ * is allocared.
+ * @chan: Array channel to be released.
+ */
+void devm_iio_channel_release_all(struct device *dev, struct iio_channel *chan);
+
struct iio_cb_buffer;
/**
* iio_channel_get_all_cb() - register callback for triggered capture
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index b2b16772c651..7c29cb0124ae 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -148,6 +148,37 @@ ssize_t iio_enum_write(struct iio_dev *indio_dev,
}
/**
+ * struct iio_mount_matrix - iio mounting matrix
+ * @rotation: 3 dimensional space rotation matrix defining sensor alignment with
+ * main hardware
+ */
+struct iio_mount_matrix {
+ const char *rotation[9];
+};
+
+ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv,
+ const struct iio_chan_spec *chan, char *buf);
+int of_iio_read_mount_matrix(const struct device *dev, const char *propname,
+ struct iio_mount_matrix *matrix);
+
+typedef const struct iio_mount_matrix *
+ (iio_get_mount_matrix_t)(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan);
+
+/**
+ * IIO_MOUNT_MATRIX() - Initialize mount matrix extended channel attribute
+ * @_shared: Whether the attribute is shared between all channels
+ * @_get: Pointer to an iio_get_mount_matrix_t accessor
+ */
+#define IIO_MOUNT_MATRIX(_shared, _get) \
+{ \
+ .name = "mount_matrix", \
+ .shared = (_shared), \
+ .read = iio_show_mount_matrix, \
+ .private = (uintptr_t)(_get), \
+}
+
+/**
* struct iio_event_spec - specification for a channel event
* @type: Type of the event
* @dir: Direction of the event
@@ -527,6 +558,8 @@ void iio_device_unregister(struct iio_dev *indio_dev);
int devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev);
void devm_iio_device_unregister(struct device *dev, struct iio_dev *indio_dev);
int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp);
+int iio_device_claim_direct_mode(struct iio_dev *indio_dev);
+void iio_device_release_direct_mode(struct iio_dev *indio_dev);
extern struct bus_type iio_bus_type;
diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h
index fa2d01ef8f55..360da7d18a3d 100644
--- a/include/linux/iio/imu/adis.h
+++ b/include/linux/iio/imu/adis.h
@@ -41,6 +41,7 @@ struct adis_data {
unsigned int diag_stat_reg;
unsigned int self_test_mask;
+ bool self_test_no_autoclear;
unsigned int startup_delay;
const char * const *status_error_msgs;
diff --git a/include/linux/iio/magnetometer/ak8975.h b/include/linux/iio/magnetometer/ak8975.h
new file mode 100644
index 000000000000..c8400959d197
--- /dev/null
+++ b/include/linux/iio/magnetometer/ak8975.h
@@ -0,0 +1,16 @@
+#ifndef __IIO_MAGNETOMETER_AK8975_H__
+#define __IIO_MAGNETOMETER_AK8975_H__
+
+#include <linux/iio/iio.h>
+
+/**
+ * struct ak8975_platform_data - AK8975 magnetometer driver platform data
+ * @eoc_gpio: data ready event gpio
+ * @orientation: mounting matrix relative to main hardware
+ */
+struct ak8975_platform_data {
+ int eoc_gpio;
+ struct iio_mount_matrix orientation;
+};
+
+#endif
diff --git a/include/linux/ima.h b/include/linux/ima.h
index e6516cbbe9bf..0eb7c2e7f0d6 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -21,6 +21,7 @@ extern int ima_file_mmap(struct file *file, unsigned long prot);
extern int ima_read_file(struct file *file, enum kernel_read_file_id id);
extern int ima_post_read_file(struct file *file, void *buf, loff_t size,
enum kernel_read_file_id id);
+extern void ima_post_path_mknod(struct dentry *dentry);
#else
static inline int ima_bprm_check(struct linux_binprm *bprm)
@@ -54,6 +55,11 @@ static inline int ima_post_read_file(struct file *file, void *buf, loff_t size,
return 0;
}
+static inline void ima_post_path_mknod(struct dentry *dentry)
+{
+ return;
+}
+
#endif /* CONFIG_IMA */
#ifdef CONFIG_IMA_APPRAISE
diff --git a/include/linux/io-64-nonatomic-hi-lo.h b/include/linux/io-64-nonatomic-hi-lo.h
index 11d7e840d913..defcc4644ce3 100644
--- a/include/linux/io-64-nonatomic-hi-lo.h
+++ b/include/linux/io-64-nonatomic-hi-lo.h
@@ -21,6 +21,23 @@ static inline void hi_lo_writeq(__u64 val, volatile void __iomem *addr)
writel(val, addr);
}
+static inline __u64 hi_lo_readq_relaxed(const volatile void __iomem *addr)
+{
+ const volatile u32 __iomem *p = addr;
+ u32 low, high;
+
+ high = readl_relaxed(p + 1);
+ low = readl_relaxed(p);
+
+ return low + ((u64)high << 32);
+}
+
+static inline void hi_lo_writeq_relaxed(__u64 val, volatile void __iomem *addr)
+{
+ writel_relaxed(val >> 32, addr + 4);
+ writel_relaxed(val, addr);
+}
+
#ifndef readq
#define readq hi_lo_readq
#endif
@@ -29,4 +46,12 @@ static inline void hi_lo_writeq(__u64 val, volatile void __iomem *addr)
#define writeq hi_lo_writeq
#endif
+#ifndef readq_relaxed
+#define readq_relaxed hi_lo_readq_relaxed
+#endif
+
+#ifndef writeq_relaxed
+#define writeq_relaxed hi_lo_writeq_relaxed
+#endif
+
#endif /* _LINUX_IO_64_NONATOMIC_HI_LO_H_ */
diff --git a/include/linux/io-64-nonatomic-lo-hi.h b/include/linux/io-64-nonatomic-lo-hi.h
index 1a4315f97360..084461a4e5ab 100644
--- a/include/linux/io-64-nonatomic-lo-hi.h
+++ b/include/linux/io-64-nonatomic-lo-hi.h
@@ -21,6 +21,23 @@ static inline void lo_hi_writeq(__u64 val, volatile void __iomem *addr)
writel(val >> 32, addr + 4);
}
+static inline __u64 lo_hi_readq_relaxed(const volatile void __iomem *addr)
+{
+ const volatile u32 __iomem *p = addr;
+ u32 low, high;
+
+ low = readl_relaxed(p);
+ high = readl_relaxed(p + 1);
+
+ return low + ((u64)high << 32);
+}
+
+static inline void lo_hi_writeq_relaxed(__u64 val, volatile void __iomem *addr)
+{
+ writel_relaxed(val, addr);
+ writel_relaxed(val >> 32, addr + 4);
+}
+
#ifndef readq
#define readq lo_hi_readq
#endif
@@ -29,4 +46,12 @@ static inline void lo_hi_writeq(__u64 val, volatile void __iomem *addr)
#define writeq lo_hi_writeq
#endif
+#ifndef readq_relaxed
+#define readq_relaxed lo_hi_readq_relaxed
+#endif
+
+#ifndef writeq_relaxed
+#define writeq_relaxed lo_hi_writeq_relaxed
+#endif
+
#endif /* _LINUX_IO_64_NONATOMIC_LO_HI_H_ */
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index ef7a6ecd8584..664683aedcce 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -30,6 +30,7 @@
#define IOMMU_WRITE (1 << 1)
#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */
#define IOMMU_NOEXEC (1 << 3)
+#define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */
struct iommu_ops;
struct iommu_group;
@@ -78,6 +79,7 @@ struct iommu_domain_geometry {
struct iommu_domain {
unsigned type;
const struct iommu_ops *ops;
+ unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
iommu_fault_handler_t handler;
void *handler_token;
struct iommu_domain_geometry geometry;
@@ -155,8 +157,7 @@ struct iommu_dm_region {
* @domain_set_windows: Set the number of windows for a domain
* @domain_get_windows: Return the number of windows for a domain
* @of_xlate: add OF master IDs to iommu grouping
- * @pgsize_bitmap: bitmap of supported page sizes
- * @priv: per-instance data private to the iommu driver
+ * @pgsize_bitmap: bitmap of all possible supported page sizes
*/
struct iommu_ops {
bool (*capable)(enum iommu_cap);
@@ -198,7 +199,6 @@ struct iommu_ops {
int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
unsigned long pgsize_bitmap;
- void *priv;
};
#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 0b65543dc6cf..6230064d7f95 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -26,6 +26,9 @@ struct resource {
/*
* IO resources have these defined flags.
+ *
+ * PCI devices expose these flags to userspace in the "resource" sysfs file,
+ * so don't move them.
*/
#define IORESOURCE_BITS 0x000000ff /* Bus-specific bits */
@@ -110,6 +113,7 @@ struct resource {
/* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */
#define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */
+#define IORESOURCE_PCI_EA_BEI (1<<5) /* BAR Equivalent Indicator */
/*
* I/O Resource Descriptors
diff --git a/include/linux/iova.h b/include/linux/iova.h
index 92f7177db2ce..f27bb2c62fca 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -19,8 +19,21 @@
/* iova structure */
struct iova {
struct rb_node node;
- unsigned long pfn_hi; /* IOMMU dish out addr hi */
- unsigned long pfn_lo; /* IOMMU dish out addr lo */
+ unsigned long pfn_hi; /* Highest allocated pfn */
+ unsigned long pfn_lo; /* Lowest allocated pfn */
+};
+
+struct iova_magazine;
+struct iova_cpu_rcache;
+
+#define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */
+#define MAX_GLOBAL_MAGS 32 /* magazines per bin */
+
+struct iova_rcache {
+ spinlock_t lock;
+ unsigned long depot_size;
+ struct iova_magazine *depot[MAX_GLOBAL_MAGS];
+ struct iova_cpu_rcache __percpu *cpu_rcaches;
};
/* holds all the iova translations for a domain */
@@ -31,6 +44,7 @@ struct iova_domain {
unsigned long granule; /* pfn granularity for this domain */
unsigned long start_pfn; /* Lower limit for this domain */
unsigned long dma_32bit_pfn;
+ struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
};
static inline unsigned long iova_size(struct iova *iova)
@@ -78,6 +92,10 @@ void __free_iova(struct iova_domain *iovad, struct iova *iova);
struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
unsigned long limit_pfn,
bool size_aligned);
+void free_iova_fast(struct iova_domain *iovad, unsigned long pfn,
+ unsigned long size);
+unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
+ unsigned long limit_pfn);
struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
unsigned long pfn_hi);
void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
@@ -87,5 +105,6 @@ struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
void put_iova_domain(struct iova_domain *iovad);
struct iova *split_and_remove_iova(struct iova_domain *iovad,
struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi);
+void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
#endif
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 7edc14fb66b6..5c91b0b055d4 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -63,7 +63,8 @@ struct ipv6_devconf {
} stable_secret;
__s32 use_oif_addrs_only;
__s32 keep_addr_on_down;
- void *sysctl;
+
+ struct ctl_table_header *sysctl_header;
};
struct ipv6_params {
@@ -117,14 +118,29 @@ struct inet6_skb_parm {
#define IP6SKB_ROUTERALERT 8
#define IP6SKB_FRAGMENTED 16
#define IP6SKB_HOPBYHOP 32
+#define IP6SKB_L3SLAVE 64
};
+#if defined(CONFIG_NET_L3_MASTER_DEV)
+static inline bool skb_l3mdev_slave(__u16 flags)
+{
+ return flags & IP6SKB_L3SLAVE;
+}
+#else
+static inline bool skb_l3mdev_slave(__u16 flags)
+{
+ return false;
+}
+#endif
+
#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb))
#define IP6CBMTU(skb) ((struct ip6_mtuinfo *)((skb)->cb))
static inline int inet6_iif(const struct sk_buff *skb)
{
- return IP6CB(skb)->iif;
+ bool l3_slave = skb_l3mdev_slave(IP6CB(skb)->flags);
+
+ return l3_slave ? skb->skb_iif : IP6CB(skb)->iif;
}
struct tcp6_request_sock {
diff --git a/include/linux/irq.h b/include/linux/irq.h
index c4de62348ff2..4d758a7c604a 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -530,6 +530,10 @@ static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *c
}
extern int irq_set_percpu_devid(unsigned int irq);
+extern int irq_set_percpu_devid_partition(unsigned int irq,
+ const struct cpumask *affinity);
+extern int irq_get_percpu_devid_partition(unsigned int irq,
+ struct cpumask *affinity);
extern void
__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
diff --git a/include/linux/irqbypass.h b/include/linux/irqbypass.h
index 1551b5b2f4c2..f0f5d2671509 100644
--- a/include/linux/irqbypass.h
+++ b/include/linux/irqbypass.h
@@ -34,7 +34,7 @@ struct irq_bypass_consumer;
/**
* struct irq_bypass_producer - IRQ bypass producer definition
* @node: IRQ bypass manager private list management
- * @token: opaque token to match between producer and consumer
+ * @token: opaque token to match between producer and consumer (non-NULL)
* @irq: Linux IRQ number for the producer device
* @add_consumer: Connect the IRQ producer to an IRQ consumer (optional)
* @del_consumer: Disconnect the IRQ producer from an IRQ consumer (optional)
@@ -60,7 +60,7 @@ struct irq_bypass_producer {
/**
* struct irq_bypass_consumer - IRQ bypass consumer definition
* @node: IRQ bypass manager private list management
- * @token: opaque token to match between producer and consumer
+ * @token: opaque token to match between producer and consumer (non-NULL)
* @add_producer: Connect the IRQ consumer to an IRQ producer
* @del_producer: Disconnect the IRQ consumer from an IRQ producer
* @stop: Perform any quiesce operations necessary prior to add/del (optional)
diff --git a/include/linux/irqchip/arm-gic-common.h b/include/linux/irqchip/arm-gic-common.h
new file mode 100644
index 000000000000..c647b0547bcd
--- /dev/null
+++ b/include/linux/irqchip/arm-gic-common.h
@@ -0,0 +1,34 @@
+/*
+ * include/linux/irqchip/arm-gic-common.h
+ *
+ * Copyright (C) 2016 ARM Limited, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __LINUX_IRQCHIP_ARM_GIC_COMMON_H
+#define __LINUX_IRQCHIP_ARM_GIC_COMMON_H
+
+#include <linux/types.h>
+#include <linux/ioport.h>
+
+enum gic_type {
+ GIC_V2,
+ GIC_V3,
+};
+
+struct gic_kvm_info {
+ /* GIC type */
+ enum gic_type type;
+ /* Virtual CPU interface */
+ struct resource vcpu;
+ /* Interrupt number */
+ unsigned int maint_irq;
+ /* Virtual control interface */
+ struct resource vctrl;
+};
+
+const struct gic_kvm_info *gic_get_kvm_info(void);
+
+#endif /* __LINUX_IRQCHIP_ARM_GIC_COMMON_H */
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index d5d798b35c1f..dc493e0f0ff7 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -102,8 +102,6 @@
#define GICR_SYNCR 0x00C0
#define GICR_MOVLPIR 0x0100
#define GICR_MOVALLR 0x0110
-#define GICR_ISACTIVER GICD_ISACTIVER
-#define GICR_ICACTIVER GICD_ICACTIVER
#define GICR_IDREGS GICD_IDREGS
#define GICR_PIDR2 GICD_PIDR2
@@ -275,6 +273,12 @@
#define ICH_LR_ACTIVE_BIT (1ULL << 63)
#define ICH_LR_PHYS_ID_SHIFT 32
#define ICH_LR_PHYS_ID_MASK (0x3ffULL << ICH_LR_PHYS_ID_SHIFT)
+#define ICH_LR_PRIORITY_SHIFT 48
+
+/* These are for GICv2 emulation only */
+#define GICH_LR_VIRTUALID (0x3ffUL << 0)
+#define GICH_LR_PHYSID_CPUID_SHIFT (10)
+#define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT)
#define ICH_MISR_EOI (1 << 0)
#define ICH_MISR_U (1 << 1)
@@ -301,12 +305,12 @@
#define ICC_SGI1R_AFFINITY_1_SHIFT 16
#define ICC_SGI1R_AFFINITY_1_MASK (0xff << ICC_SGI1R_AFFINITY_1_SHIFT)
#define ICC_SGI1R_SGI_ID_SHIFT 24
-#define ICC_SGI1R_SGI_ID_MASK (0xff << ICC_SGI1R_SGI_ID_SHIFT)
+#define ICC_SGI1R_SGI_ID_MASK (0xfULL << ICC_SGI1R_SGI_ID_SHIFT)
#define ICC_SGI1R_AFFINITY_2_SHIFT 32
-#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT)
+#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_2_SHIFT)
#define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40
#define ICC_SGI1R_AFFINITY_3_SHIFT 48
-#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT)
+#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_3_SHIFT)
#include <asm/arch_gicv3.h>
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 9c940263ca23..fd051855539b 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -33,6 +33,7 @@
#define GIC_DIST_CTRL 0x000
#define GIC_DIST_CTR 0x004
+#define GIC_DIST_IIDR 0x008
#define GIC_DIST_IGROUP 0x080
#define GIC_DIST_ENABLE_SET 0x100
#define GIC_DIST_ENABLE_CLEAR 0x180
@@ -76,6 +77,7 @@
#define GICH_LR_VIRTUALID (0x3ff << 0)
#define GICH_LR_PHYSID_CPUID_SHIFT (10)
#define GICH_LR_PHYSID_CPUID (0x3ff << GICH_LR_PHYSID_CPUID_SHIFT)
+#define GICH_LR_PRIORITY_SHIFT 23
#define GICH_LR_STATE (3 << 28)
#define GICH_LR_PENDING_BIT (1 << 28)
#define GICH_LR_ACTIVE_BIT (1 << 29)
diff --git a/include/linux/irqchip/irq-partition-percpu.h b/include/linux/irqchip/irq-partition-percpu.h
new file mode 100644
index 000000000000..87433a5d1285
--- /dev/null
+++ b/include/linux/irqchip/irq-partition-percpu.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2016 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/fwnode.h>
+#include <linux/cpumask.h>
+#include <linux/irqdomain.h>
+
+struct partition_affinity {
+ cpumask_t mask;
+ void *partition_id;
+};
+
+struct partition_desc;
+
+#ifdef CONFIG_PARTITION_PERCPU
+int partition_translate_id(struct partition_desc *desc, void *partition_id);
+struct partition_desc *partition_create_desc(struct fwnode_handle *fwnode,
+ struct partition_affinity *parts,
+ int nr_parts,
+ int chained_irq,
+ const struct irq_domain_ops *ops);
+struct irq_domain *partition_get_domain(struct partition_desc *dsc);
+#else
+static inline int partition_translate_id(struct partition_desc *desc,
+ void *partition_id)
+{
+ return -EINVAL;
+}
+
+static inline
+struct partition_desc *partition_create_desc(struct fwnode_handle *fwnode,
+ struct partition_affinity *parts,
+ int nr_parts,
+ int chained_irq,
+ const struct irq_domain_ops *ops)
+{
+ return NULL;
+}
+
+static inline
+struct irq_domain *partition_get_domain(struct partition_desc *dsc)
+{
+ return NULL;
+}
+#endif
diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h
index 80f89e4a29ac..81f930b0bca9 100644
--- a/include/linux/irqchip/mips-gic.h
+++ b/include/linux/irqchip/mips-gic.h
@@ -103,6 +103,7 @@
#define GIC_VPE_SWINT0_MAP_OFS 0x0054
#define GIC_VPE_SWINT1_MAP_OFS 0x0058
#define GIC_VPE_OTHER_ADDR_OFS 0x0080
+#define GIC_VP_IDENT_OFS 0x0088
#define GIC_VPE_WD_CONFIG0_OFS 0x0090
#define GIC_VPE_WD_COUNT0_OFS 0x0094
#define GIC_VPE_WD_INITIAL0_OFS 0x0098
@@ -211,6 +212,10 @@
#define GIC_VPE_SMASK_FDC_SHF 6
#define GIC_VPE_SMASK_FDC_MSK (MSK(1) << GIC_VPE_SMASK_FDC_SHF)
+/* GIC_VP_IDENT fields */
+#define GIC_VP_IDENT_VCNUM_SHF 0
+#define GIC_VP_IDENT_VCNUM_MSK (MSK(6) << GIC_VP_IDENT_VCNUM_SHF)
+
/* GIC nomenclature for Core Interrupt Pins. */
#define GIC_CPU_INT0 0 /* Core Interrupt 2 */
#define GIC_CPU_INT1 1 /* . */
@@ -278,4 +283,16 @@ static inline int gic_get_usm_range(struct resource *gic_usm_res)
#endif /* CONFIG_MIPS_GIC */
+/**
+ * gic_read_local_vp_id() - read the local VPs VCNUM
+ *
+ * Read the VCNUM of the local VP from the GIC_VP_IDENT register and
+ * return it to the caller. This ID should be used to refer to the VP
+ * via the GICs VP-other region, or when calculating an offset to a
+ * bit representing the VP in interrupt masks.
+ *
+ * Return: The VCNUM value for the local VP.
+ */
+extern unsigned gic_read_local_vp_id(void);
+
#endif /* __LINUX_IRQCHIP_MIPS_GIC_H */
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index dcca77c4b9d2..b51beebf9804 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -66,6 +66,7 @@ struct irq_desc {
int threads_handled_last;
raw_spinlock_t lock;
struct cpumask *percpu_enabled;
+ const struct cpumask *percpu_affinity;
#ifdef CONFIG_SMP
const struct cpumask *affinity_hint;
struct irq_affinity_notify *affinity_notify;
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 2aed04396210..f1f36e04d885 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -96,6 +96,8 @@ enum irq_domain_bus_token {
struct irq_domain_ops {
int (*match)(struct irq_domain *d, struct device_node *node,
enum irq_domain_bus_token bus_token);
+ int (*select)(struct irq_domain *d, struct irq_fwspec *fwspec,
+ enum irq_domain_bus_token bus_token);
int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw);
void (*unmap)(struct irq_domain *d, unsigned int virq);
int (*xlate)(struct irq_domain *d, struct device_node *node,
@@ -211,7 +213,7 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
irq_hw_number_t first_hwirq,
const struct irq_domain_ops *ops,
void *host_data);
-extern struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
+extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
enum irq_domain_bus_token bus_token);
extern void irq_set_default_host(struct irq_domain *host);
extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
@@ -227,6 +229,17 @@ static inline bool is_fwnode_irqchip(struct fwnode_handle *fwnode)
return fwnode && fwnode->type == FWNODE_IRQCHIP;
}
+static inline
+struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
+ enum irq_domain_bus_token bus_token)
+{
+ struct irq_fwspec fwspec = {
+ .fwnode = fwnode,
+ };
+
+ return irq_find_matching_fwspec(&fwspec, bus_token);
+}
+
static inline struct irq_domain *irq_find_matching_host(struct device_node *node,
enum irq_domain_bus_token bus_token)
{
@@ -346,9 +359,8 @@ int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr,
irq_hw_number_t *out_hwirq, unsigned int *out_type);
/* IPI functions */
-unsigned int irq_reserve_ipi(struct irq_domain *domain,
- const struct cpumask *dest);
-void irq_destroy_ipi(unsigned int irq);
+int irq_reserve_ipi(struct irq_domain *domain, const struct cpumask *dest);
+int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest);
/* V2 interfaces to support hierarchy IRQ domains. */
extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
diff --git a/include/linux/isa.h b/include/linux/isa.h
index b0270e3814c8..5ab85281230b 100644
--- a/include/linux/isa.h
+++ b/include/linux/isa.h
@@ -36,4 +36,36 @@ static inline void isa_unregister_driver(struct isa_driver *d)
}
#endif
+/**
+ * module_isa_driver() - Helper macro for registering a ISA driver
+ * @__isa_driver: isa_driver struct
+ * @__num_isa_dev: number of devices to register
+ *
+ * Helper macro for ISA drivers which do not do anything special in module
+ * init/exit. This eliminates a lot of boilerplate code. Each module may only
+ * use this macro once, and calling it replaces module_init and module_exit.
+ */
+#define module_isa_driver(__isa_driver, __num_isa_dev) \
+static int __init __isa_driver##_init(void) \
+{ \
+ return isa_register_driver(&(__isa_driver), __num_isa_dev); \
+} \
+module_init(__isa_driver##_init); \
+static void __exit __isa_driver##_exit(void) \
+{ \
+ isa_unregister_driver(&(__isa_driver)); \
+} \
+module_exit(__isa_driver##_exit);
+
+/**
+ * max_num_isa_dev() - Maximum possible number registered of an ISA device
+ * @__ida_dev_ext: ISA device address extent
+ *
+ * The highest base address possible for an ISA device is 0x3FF; this results in
+ * 1024 possible base addresses. Dividing the number of possible base addresses
+ * by the address extent taken by each device results in the maximum number of
+ * devices on a system.
+ */
+#define max_num_isa_dev(__isa_dev_ext) (1024 / __isa_dev_ext)
+
#endif /* __LINUX_ISA_H */
diff --git a/include/linux/iscsi_boot_sysfs.h b/include/linux/iscsi_boot_sysfs.h
index 548d55395488..10923d730486 100644
--- a/include/linux/iscsi_boot_sysfs.h
+++ b/include/linux/iscsi_boot_sysfs.h
@@ -64,6 +64,12 @@ enum iscsi_boot_initiator_properties_enum {
ISCSI_BOOT_INI_END_MARKER,
};
+enum iscsi_boot_acpitbl_properties_enum {
+ ISCSI_BOOT_ACPITBL_SIGNATURE,
+ ISCSI_BOOT_ACPITBL_OEM_ID,
+ ISCSI_BOOT_ACPITBL_OEM_TABLE_ID,
+};
+
struct attribute_group;
struct iscsi_boot_kobj {
@@ -127,6 +133,13 @@ iscsi_boot_create_target(struct iscsi_boot_kset *boot_kset, int index,
umode_t (*is_visible) (void *data, int type),
void (*release) (void *data));
+struct iscsi_boot_kobj *
+iscsi_boot_create_acpitbl(struct iscsi_boot_kset *boot_kset, int index,
+ void *data,
+ ssize_t (*show)(void *data, int type, char *buf),
+ umode_t (*is_visible)(void *data, int type),
+ void (*release)(void *data));
+
struct iscsi_boot_kset *iscsi_boot_create_kset(const char *set_name);
struct iscsi_boot_kset *iscsi_boot_create_host_kset(unsigned int hostno);
void iscsi_boot_destroy_kset(struct iscsi_boot_kset *boot_kset);
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index fd1083c46c61..efb232c5f668 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -403,11 +403,19 @@ static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
/* Flags in jbd_inode->i_flags */
#define __JI_COMMIT_RUNNING 0
-/* Commit of the inode data in progress. We use this flag to protect us from
+#define __JI_WRITE_DATA 1
+#define __JI_WAIT_DATA 2
+
+/*
+ * Commit of the inode data in progress. We use this flag to protect us from
* concurrent deletion of inode. We cannot use reference to inode for this
* since we cannot afford doing last iput() on behalf of kjournald
*/
#define JI_COMMIT_RUNNING (1 << __JI_COMMIT_RUNNING)
+/* Write allocated dirty buffers in this inode before commit */
+#define JI_WRITE_DATA (1 << __JI_WRITE_DATA)
+/* Wait for outstanding data writes for this inode before commit */
+#define JI_WAIT_DATA (1 << __JI_WAIT_DATA)
/**
* struct jbd_inode is the structure linking inodes in ordered mode
@@ -781,9 +789,6 @@ jbd2_time_diff(unsigned long start, unsigned long end)
* @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the
* number that will fit in j_blocksize
* @j_last_sync_writer: most recent pid which did a synchronous write
- * @j_history: Buffer storing the transactions statistics history
- * @j_history_max: Maximum number of transactions in the statistics history
- * @j_history_cur: Current number of transactions in the statistics history
* @j_history_lock: Protect the transactions statistics history
* @j_proc_entry: procfs entry for the jbd statistics directory
* @j_stats: Overall statistics
@@ -1270,7 +1275,8 @@ extern int jbd2_journal_clear_err (journal_t *);
extern int jbd2_journal_bmap(journal_t *, unsigned long, unsigned long long *);
extern int jbd2_journal_force_commit(journal_t *);
extern int jbd2_journal_force_commit_nested(journal_t *);
-extern int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *inode);
+extern int jbd2_journal_inode_add_write(handle_t *handle, struct jbd2_inode *inode);
+extern int jbd2_journal_inode_add_wait(handle_t *handle, struct jbd2_inode *inode);
extern int jbd2_journal_begin_ordered_truncate(journal_t *journal,
struct jbd2_inode *inode, loff_t new_size);
extern void jbd2_journal_init_jbd_inode(struct jbd2_inode *jinode, struct inode *inode);
diff --git a/include/linux/kasan-checks.h b/include/linux/kasan-checks.h
new file mode 100644
index 000000000000..b7f8aced7870
--- /dev/null
+++ b/include/linux/kasan-checks.h
@@ -0,0 +1,12 @@
+#ifndef _LINUX_KASAN_CHECKS_H
+#define _LINUX_KASAN_CHECKS_H
+
+#ifdef CONFIG_KASAN
+void kasan_check_read(const void *p, unsigned int size);
+void kasan_check_write(const void *p, unsigned int size);
+#else
+static inline void kasan_check_read(const void *p, unsigned int size) { }
+static inline void kasan_check_write(const void *p, unsigned int size) { }
+#endif
+
+#endif
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 737371b56044..611927f5870d 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -50,6 +50,8 @@ void kasan_free_pages(struct page *page, unsigned int order);
void kasan_cache_create(struct kmem_cache *cache, size_t *size,
unsigned long *flags);
+void kasan_cache_shrink(struct kmem_cache *cache);
+void kasan_cache_destroy(struct kmem_cache *cache);
void kasan_poison_slab(struct page *page);
void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
@@ -63,7 +65,8 @@ void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size,
void kasan_krealloc(const void *object, size_t new_size, gfp_t flags);
void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);
-void kasan_slab_free(struct kmem_cache *s, void *object);
+bool kasan_slab_free(struct kmem_cache *s, void *object);
+void kasan_poison_slab_free(struct kmem_cache *s, void *object);
struct kasan_cache {
int alloc_meta_offset;
@@ -88,6 +91,8 @@ static inline void kasan_free_pages(struct page *page, unsigned int order) {}
static inline void kasan_cache_create(struct kmem_cache *cache,
size_t *size,
unsigned long *flags) {}
+static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
+static inline void kasan_cache_destroy(struct kmem_cache *cache) {}
static inline void kasan_poison_slab(struct page *page) {}
static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
@@ -105,7 +110,11 @@ static inline void kasan_krealloc(const void *object, size_t new_size,
static inline void kasan_slab_alloc(struct kmem_cache *s, void *object,
gfp_t flags) {}
-static inline void kasan_slab_free(struct kmem_cache *s, void *object) {}
+static inline bool kasan_slab_free(struct kmem_cache *s, void *object)
+{
+ return false;
+}
+static inline void kasan_poison_slab_free(struct kmem_cache *s, void *object) {}
static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
static inline void kasan_free_shadow(const struct vm_struct *vm) {}
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 2f7775e229b0..94aa10ffe156 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -53,6 +53,13 @@
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
+#define u64_to_user_ptr(x) ( \
+{ \
+ typecheck(u64, x); \
+ (void __user *)(uintptr_t)x; \
+} \
+)
+
/*
* This looks more complex than it should be. But we need to
* get the type for the ~ right in round_down (it needs to be
@@ -412,9 +419,9 @@ extern __printf(3, 4)
int scnprintf(char *buf, size_t size, const char *fmt, ...);
extern __printf(3, 0)
int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
-extern __printf(2, 3)
+extern __printf(2, 3) __malloc
char *kasprintf(gfp_t gfp, const char *fmt, ...);
-extern __printf(2, 0)
+extern __printf(2, 0) __malloc
char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
extern __printf(2, 0)
const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list args);
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index c06c44242f39..96356ef012de 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -152,6 +152,8 @@ struct kernfs_syscall_ops {
int (*rmdir)(struct kernfs_node *kn);
int (*rename)(struct kernfs_node *kn, struct kernfs_node *new_parent,
const char *new_name);
+ int (*show_path)(struct seq_file *sf, struct kernfs_node *kn,
+ struct kernfs_root *root);
};
struct kernfs_root {
@@ -177,6 +179,7 @@ struct kernfs_open_file {
/* private fields, do not use outside kernfs proper */
struct mutex mutex;
+ struct mutex prealloc_mutex;
int event;
struct list_head list;
char *prealloc_buf;
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 2cc643c6e870..e8acb2b43dd9 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -230,8 +230,6 @@ extern void crash_kexec(struct pt_regs *);
int kexec_should_crash(struct task_struct *);
void crash_save_cpu(struct pt_regs *regs, int cpu);
void crash_save_vmcoreinfo(void);
-void crash_map_reserved_pages(void);
-void crash_unmap_reserved_pages(void);
void arch_crash_save_vmcoreinfo(void);
__printf(1, 2)
void vmcoreinfo_append_str(const char *fmt, ...);
@@ -317,6 +315,8 @@ int __weak arch_kexec_apply_relocations_add(const Elf_Ehdr *ehdr,
Elf_Shdr *sechdrs, unsigned int relsec);
int __weak arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
unsigned int relsec);
+void arch_kexec_protect_crashkres(void);
+void arch_kexec_unprotect_crashkres(void);
#else /* !CONFIG_KEXEC_CORE */
struct pt_regs;
diff --git a/include/linux/key-type.h b/include/linux/key-type.h
index 7463355a198b..eaee981c5558 100644
--- a/include/linux/key-type.h
+++ b/include/linux/key-type.h
@@ -45,7 +45,6 @@ struct key_preparsed_payload {
size_t datalen; /* Raw datalen */
size_t quotalen; /* Quota length for proposed payload */
time_t expiry; /* Expiry time of key */
- bool trusted; /* True if key is trusted */
};
typedef int (*request_key_actor_t)(struct key_construction *key,
diff --git a/include/linux/key.h b/include/linux/key.h
index 5f5b1129dc92..722914798f37 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -173,11 +173,9 @@ struct key {
#define KEY_FLAG_NEGATIVE 5 /* set if key is negative */
#define KEY_FLAG_ROOT_CAN_CLEAR 6 /* set if key can be cleared by root without permission */
#define KEY_FLAG_INVALIDATED 7 /* set if key has been invalidated */
-#define KEY_FLAG_TRUSTED 8 /* set if key is trusted */
-#define KEY_FLAG_TRUSTED_ONLY 9 /* set if keyring only accepts links to trusted keys */
-#define KEY_FLAG_BUILTIN 10 /* set if key is builtin */
-#define KEY_FLAG_ROOT_CAN_INVAL 11 /* set if key can be invalidated by root without permission */
-#define KEY_FLAG_KEEP 12 /* set if key should not be removed */
+#define KEY_FLAG_BUILTIN 8 /* set if key is built in to the kernel */
+#define KEY_FLAG_ROOT_CAN_INVAL 9 /* set if key can be invalidated by root without permission */
+#define KEY_FLAG_KEEP 10 /* set if key should not be removed */
/* the key type and key description string
* - the desc is used to match a key against search criteria
@@ -205,6 +203,20 @@ struct key {
};
int reject_error;
};
+
+ /* This is set on a keyring to restrict the addition of a link to a key
+ * to it. If this method isn't provided then it is assumed that the
+ * keyring is open to any addition. It is ignored for non-keyring
+ * keys.
+ *
+ * This is intended for use with rings of trusted keys whereby addition
+ * to the keyring needs to be controlled. KEY_ALLOC_BYPASS_RESTRICTION
+ * overrides this, allowing the kernel to add extra keys without
+ * restriction.
+ */
+ int (*restrict_link)(struct key *keyring,
+ const struct key_type *type,
+ const union key_payload *payload);
};
extern struct key *key_alloc(struct key_type *type,
@@ -212,14 +224,17 @@ extern struct key *key_alloc(struct key_type *type,
kuid_t uid, kgid_t gid,
const struct cred *cred,
key_perm_t perm,
- unsigned long flags);
+ unsigned long flags,
+ int (*restrict_link)(struct key *,
+ const struct key_type *,
+ const union key_payload *));
-#define KEY_ALLOC_IN_QUOTA 0x0000 /* add to quota, reject if would overrun */
-#define KEY_ALLOC_QUOTA_OVERRUN 0x0001 /* add to quota, permit even if overrun */
-#define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */
-#define KEY_ALLOC_TRUSTED 0x0004 /* Key should be flagged as trusted */
-#define KEY_ALLOC_BUILT_IN 0x0008 /* Key is built into kernel */
+#define KEY_ALLOC_IN_QUOTA 0x0000 /* add to quota, reject if would overrun */
+#define KEY_ALLOC_QUOTA_OVERRUN 0x0001 /* add to quota, permit even if overrun */
+#define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */
+#define KEY_ALLOC_BUILT_IN 0x0004 /* Key is built into kernel */
+#define KEY_ALLOC_BYPASS_RESTRICTION 0x0008 /* Override the check on restricted keyrings */
extern void key_revoke(struct key *key);
extern void key_invalidate(struct key *key);
@@ -288,8 +303,15 @@ extern struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid
const struct cred *cred,
key_perm_t perm,
unsigned long flags,
+ int (*restrict_link)(struct key *,
+ const struct key_type *,
+ const union key_payload *),
struct key *dest);
+extern int restrict_link_reject(struct key *keyring,
+ const struct key_type *type,
+ const union key_payload *payload);
+
extern int keyring_clear(struct key *keyring);
extern key_ref_t keyring_search(key_ref_t keyring,
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 5276fe0916fc..1c9c973a7dd9 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -35,6 +35,10 @@
#include <asm/kvm_host.h>
+#ifndef KVM_MAX_VCPU_ID
+#define KVM_MAX_VCPU_ID KVM_MAX_VCPUS
+#endif
+
/*
* The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used
* in kvm, other bits are visible for userspace which are defined in
@@ -225,6 +229,7 @@ struct kvm_vcpu {
sigset_t sigset;
struct kvm_vcpu_stat stat;
unsigned int halt_poll_ns;
+ bool valid_wakeup;
#ifdef CONFIG_HAS_IOMEM
int mmio_needed;
@@ -407,6 +412,8 @@ struct kvm {
#endif
long tlbs_dirty;
struct list_head devices;
+ struct dentry *debugfs_dentry;
+ struct kvm_stat_data **debugfs_stat_data;
};
#define kvm_err(fmt, ...) \
@@ -447,12 +454,13 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
{
- struct kvm_vcpu *vcpu;
+ struct kvm_vcpu *vcpu = NULL;
int i;
- if (id < 0 || id >= KVM_MAX_VCPUS)
+ if (id < 0)
return NULL;
- vcpu = kvm_get_vcpu(kvm, id);
+ if (id < KVM_MAX_VCPUS)
+ vcpu = kvm_get_vcpu(kvm, id);
if (vcpu && vcpu->vcpu_id == id)
return vcpu;
kvm_for_each_vcpu(i, vcpu, kvm)
@@ -651,6 +659,7 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
void kvm_vcpu_block(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
+void kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
int kvm_vcpu_yield_to(struct kvm_vcpu *target);
void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
@@ -984,6 +993,11 @@ enum kvm_stat_kind {
KVM_STAT_VCPU,
};
+struct kvm_stat_data {
+ int offset;
+ struct kvm *kvm;
+};
+
struct kvm_stats_debugfs_item {
const char *name;
int offset;
@@ -1091,6 +1105,11 @@ static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; }
static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
{
+ /*
+ * Ensure the rest of the request is published to kvm_check_request's
+ * caller. Paired with the smp_mb__after_atomic in kvm_check_request.
+ */
+ smp_wmb();
set_bit(req, &vcpu->requests);
}
@@ -1098,6 +1117,12 @@ static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
{
if (test_bit(req, &vcpu->requests)) {
clear_bit(req, &vcpu->requests);
+
+ /*
+ * Ensure the rest of the request is visible to kvm_check_request's
+ * caller. Paired with the smp_wmb in kvm_make_request.
+ */
+ smp_mb__after_atomic();
return true;
} else {
return false;
@@ -1169,6 +1194,7 @@ static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
#endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
+bool kvm_arch_has_irq_bypass(void);
int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *,
struct irq_bypass_producer *);
void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *,
@@ -1179,4 +1205,18 @@ int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
uint32_t guest_irq, bool set);
#endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */
+#ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS
+/* If we wakeup during the poll time, was it a sucessful poll? */
+static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
+{
+ return vcpu->valid_wakeup;
+}
+
+#else
+static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
+{
+ return true;
+}
+#endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */
+
#endif
diff --git a/include/linux/leds.h b/include/linux/leds.h
index f203a8f89d30..d2b13066e781 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -50,6 +50,7 @@ struct led_classdev {
#define LED_SYSFS_DISABLE (1 << 22)
#define LED_DEV_CAP_FLASH (1 << 23)
#define LED_HW_PLUGGABLE (1 << 24)
+#define LED_PANIC_INDICATOR (1 << 25)
/* Set LED brightness level
* Must not sleep. Use brightness_set_blocking for drivers
@@ -329,6 +330,12 @@ extern void ledtrig_ide_activity(void);
static inline void ledtrig_ide_activity(void) {}
#endif
+#ifdef CONFIG_LEDS_TRIGGER_MTD
+extern void ledtrig_mtd_activity(void);
+#else
+static inline void ledtrig_mtd_activity(void) {}
+#endif
+
#if defined(CONFIG_LEDS_TRIGGER_CAMERA) || defined(CONFIG_LEDS_TRIGGER_CAMERA_MODULE)
extern void ledtrig_flash_ctrl(bool on);
extern void ledtrig_torch_ctrl(bool on);
@@ -358,6 +365,7 @@ struct gpio_led {
unsigned gpio;
unsigned active_low : 1;
unsigned retain_state_suspended : 1;
+ unsigned panic_indicator : 1;
unsigned default_state : 2;
/* default_state should be one of LEDS_GPIO_DEFSTATE_(ON|OFF|KEEP) */
struct gpio_desc *gpiod;
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 2c4ebef79d0c..d15c19e331d1 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -180,6 +180,8 @@ enum {
ATA_DFLAG_DA = (1 << 26), /* device supports Device Attention */
ATA_DFLAG_DEVSLP = (1 << 27), /* device supports Device Sleep */
ATA_DFLAG_ACPI_DISABLED = (1 << 28), /* ACPI for the device is disabled */
+ ATA_DFLAG_D_SENSE = (1 << 29), /* Descriptor sense requested */
+ ATA_DFLAG_ZAC = (1 << 30), /* ZAC device */
ATA_DEV_UNKNOWN = 0, /* unknown device */
ATA_DEV_ATA = 1, /* ATA device */
@@ -191,7 +193,8 @@ enum {
ATA_DEV_SEMB = 7, /* SEMB */
ATA_DEV_SEMB_UNSUP = 8, /* SEMB (unsupported) */
ATA_DEV_ZAC = 9, /* ZAC device */
- ATA_DEV_NONE = 10, /* no device */
+ ATA_DEV_ZAC_UNSUP = 10, /* ZAC device (unsupported) */
+ ATA_DEV_NONE = 11, /* no device */
/* struct ata_link flags */
ATA_LFLAG_NO_HRST = (1 << 1), /* avoid hardreset */
@@ -727,6 +730,13 @@ struct ata_device {
/* NCQ send and receive log subcommand support */
u8 ncq_send_recv_cmds[ATA_LOG_NCQ_SEND_RECV_SIZE];
+ u8 ncq_non_data_cmds[ATA_LOG_NCQ_NON_DATA_SIZE];
+
+ /* ZAC zone configuration */
+ u32 zac_zoned_cap;
+ u32 zac_zones_optimal_open;
+ u32 zac_zones_optimal_nonseq;
+ u32 zac_zones_max_open;
/* error history */
int spdn_cnt;
@@ -1523,7 +1533,8 @@ static inline unsigned int ata_class_enabled(unsigned int class)
static inline unsigned int ata_class_disabled(unsigned int class)
{
return class == ATA_DEV_ATA_UNSUP || class == ATA_DEV_ATAPI_UNSUP ||
- class == ATA_DEV_PMP_UNSUP || class == ATA_DEV_SEMB_UNSUP;
+ class == ATA_DEV_PMP_UNSUP || class == ATA_DEV_SEMB_UNSUP ||
+ class == ATA_DEV_ZAC_UNSUP;
}
static inline unsigned int ata_class_absent(unsigned int class)
@@ -1641,6 +1652,26 @@ static inline bool ata_fpdma_dsm_supported(struct ata_device *dev)
ATA_LOG_NCQ_SEND_RECV_DSM_TRIM);
}
+static inline bool ata_fpdma_read_log_supported(struct ata_device *dev)
+{
+ return (dev->flags & ATA_DFLAG_NCQ_SEND_RECV) &&
+ (dev->ncq_send_recv_cmds[ATA_LOG_NCQ_SEND_RECV_RD_LOG_OFFSET] &
+ ATA_LOG_NCQ_SEND_RECV_RD_LOG_SUPPORTED);
+}
+
+static inline bool ata_fpdma_zac_mgmt_in_supported(struct ata_device *dev)
+{
+ return (dev->flags & ATA_DFLAG_NCQ_SEND_RECV) &&
+ (dev->ncq_send_recv_cmds[ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_OFFSET] &
+ ATA_LOG_NCQ_SEND_RECV_ZAC_MGMT_IN_SUPPORTED);
+}
+
+static inline bool ata_fpdma_zac_mgmt_out_supported(struct ata_device *dev)
+{
+ return (dev->ncq_non_data_cmds[ATA_LOG_NCQ_NON_DATA_ZAC_MGMT_OFFSET] &
+ ATA_LOG_NCQ_NON_DATA_ZAC_MGMT_OUT);
+}
+
static inline void ata_qc_set_polling(struct ata_queued_cmd *qc)
{
qc->tf.ctl |= ATA_NIEN;
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index 833867b9ddc2..0c3c30cbbea5 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -27,7 +27,7 @@ enum {
/* need to set a limit somewhere, but yes, this is likely overkill */
ND_IOCTL_MAX_BUFLEN = SZ_4M,
ND_CMD_MAX_ELEM = 5,
- ND_CMD_MAX_ENVELOPE = 16,
+ ND_CMD_MAX_ENVELOPE = 256,
ND_MAX_MAPPINGS = 32,
/* region flag indicating to direct-map persistent memory by default */
@@ -68,7 +68,7 @@ struct nd_mapping {
struct nvdimm_bus_descriptor {
const struct attribute_group **attr_groups;
- unsigned long dsm_mask;
+ unsigned long cmd_mask;
char *provider_name;
ndctl_fn ndctl;
int (*flush_probe)(struct nvdimm_bus_descriptor *nd_desc);
@@ -130,10 +130,11 @@ struct nd_region *to_nd_region(struct device *dev);
struct nd_blk_region *to_nd_blk_region(struct device *dev);
struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus);
const char *nvdimm_name(struct nvdimm *nvdimm);
+unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm);
void *nvdimm_provider_data(struct nvdimm *nvdimm);
struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data,
const struct attribute_group **groups, unsigned long flags,
- unsigned long *dsm_mask);
+ unsigned long cmd_mask);
const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd);
const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd);
u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd,
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index cdcb2ccbefa8..ef2c7d2e76c4 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -18,7 +18,7 @@ enum {
#define NVM_SEC_BITS (8)
#define NVM_PL_BITS (8)
#define NVM_LUN_BITS (8)
-#define NVM_CH_BITS (8)
+#define NVM_CH_BITS (7)
struct ppa_addr {
/* Generic structure for all addresses */
@@ -30,8 +30,14 @@ struct ppa_addr {
u64 pl : NVM_PL_BITS;
u64 lun : NVM_LUN_BITS;
u64 ch : NVM_CH_BITS;
+ u64 reserved : 1;
} g;
+ struct {
+ u64 line : 63;
+ u64 is_cached : 1;
+ } c;
+
u64 ppa;
};
};
@@ -41,13 +47,11 @@ struct nvm_id;
struct nvm_dev;
typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
-typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *);
typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
nvm_l2p_update_fn *, void *);
-typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, int,
- nvm_bb_update_fn *, void *);
-typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct nvm_rq *, int);
+typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *);
+typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int);
typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *);
typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
@@ -202,6 +206,7 @@ struct nvm_id {
struct nvm_target {
struct list_head list;
+ struct nvm_dev *dev;
struct nvm_tgt_type *type;
struct gendisk *disk;
};
@@ -232,14 +237,14 @@ struct nvm_rq {
struct ppa_addr *ppa_list;
- void *metadata;
- dma_addr_t dma_metadata;
+ void *meta_list;
+ dma_addr_t dma_meta_list;
struct completion *wait;
nvm_end_io_fn *end_io;
uint8_t opcode;
- uint16_t nr_pages;
+ uint16_t nr_ppas;
uint16_t flags;
u64 ppa_status; /* ppa media status */
@@ -307,7 +312,6 @@ struct nvm_dev {
struct nvm_dev_ops *ops;
struct list_head devices;
- struct list_head online_targets;
/* Media manager */
struct nvmm_type *mt;
@@ -323,6 +327,8 @@ struct nvm_dev {
int sec_per_pg; /* only sectors for a single page */
int pgs_per_blk;
int blks_per_lun;
+ int fpg_size;
+ int pfpg_size; /* size of buffer if all pages are to be read */
int sec_size;
int oob_size;
int mccap;
@@ -345,10 +351,9 @@ struct nvm_dev {
unsigned long total_blocks;
unsigned long total_secs;
int nr_luns;
- unsigned max_pages_per_blk;
unsigned long *lun_map;
- void *ppalist_pool;
+ void *dma_pool;
struct nvm_id identity;
@@ -450,8 +455,8 @@ struct nvm_tgt_type {
struct list_head list;
};
-extern int nvm_register_target(struct nvm_tgt_type *);
-extern void nvm_unregister_target(struct nvm_tgt_type *);
+extern int nvm_register_tgt_type(struct nvm_tgt_type *);
+extern void nvm_unregister_tgt_type(struct nvm_tgt_type *);
extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *);
extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t);
@@ -467,6 +472,7 @@ typedef void (nvmm_flush_blk_fn)(struct nvm_dev *, struct nvm_block *);
typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
unsigned long);
+typedef void (nvmm_mark_blk_fn)(struct nvm_dev *, struct ppa_addr, int);
typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
typedef int (nvmm_reserve_lun)(struct nvm_dev *, int);
typedef void (nvmm_release_lun)(struct nvm_dev *, int);
@@ -494,6 +500,9 @@ struct nvmm_type {
nvmm_submit_io_fn *submit_io;
nvmm_erase_blk_fn *erase_blk;
+ /* Bad block mgmt */
+ nvmm_mark_blk_fn *mark_blk;
+
/* Configuration management */
nvmm_get_lun_fn *get_lun;
nvmm_reserve_lun *reserve_lun;
@@ -527,13 +536,17 @@ extern int nvm_submit_io(struct nvm_dev *, struct nvm_rq *);
extern void nvm_generic_to_addr_mode(struct nvm_dev *, struct nvm_rq *);
extern void nvm_addr_to_generic_mode(struct nvm_dev *, struct nvm_rq *);
extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *,
- struct ppa_addr *, int);
+ struct ppa_addr *, int, int);
extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *);
extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr *, int);
extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *);
extern void nvm_end_io(struct nvm_rq *, int);
extern int nvm_submit_ppa(struct nvm_dev *, struct ppa_addr *, int, int, int,
void *, int);
+extern int nvm_submit_ppa_list(struct nvm_dev *, struct ppa_addr *, int, int,
+ int, void *, int);
+extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int);
+extern int nvm_get_bb_tbl(struct nvm_dev *, struct ppa_addr, u8 *);
/* sysblk.c */
#define NVM_SYSBLK_MAGIC 0x4E564D53 /* "NVMS" */
@@ -554,6 +567,13 @@ extern int nvm_update_sysblock(struct nvm_dev *, struct nvm_sb_info *);
extern int nvm_init_sysblock(struct nvm_dev *, struct nvm_sb_info *);
extern int nvm_dev_factory(struct nvm_dev *, int flags);
+
+#define nvm_for_each_lun_ppa(dev, ppa, chid, lunid) \
+ for ((chid) = 0, (ppa).ppa = 0; (chid) < (dev)->nr_chnls; \
+ (chid)++, (ppa).g.ch = (chid)) \
+ for ((lunid) = 0; (lunid) < (dev)->luns_per_chnl; \
+ (lunid)++, (ppa).g.lun = (lunid))
+
#else /* CONFIG_NVM */
struct nvm_dev_ops;
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index bd830d590465..a93a0b23dc8d 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -65,27 +65,8 @@ struct klp_func {
};
/**
- * struct klp_reloc - relocation structure for live patching
- * @loc: address where the relocation will be written
- * @sympos: position in kallsyms to disambiguate symbols (optional)
- * @type: ELF relocation type
- * @name: name of the referenced symbol (for lookup/verification)
- * @addend: offset from the referenced symbol
- * @external: symbol is either exported or within the live patch module itself
- */
-struct klp_reloc {
- unsigned long loc;
- unsigned long sympos;
- unsigned long type;
- const char *name;
- int addend;
- int external;
-};
-
-/**
* struct klp_object - kernel object structure for live patching
* @name: module name (or NULL for vmlinux)
- * @relocs: relocation entries to be applied at load time
* @funcs: function entries for functions to be patched in the object
* @kobj: kobject for sysfs resources
* @mod: kernel module associated with the patched object
@@ -95,7 +76,6 @@ struct klp_reloc {
struct klp_object {
/* external */
const char *name;
- struct klp_reloc *relocs;
struct klp_func *funcs;
/* internal */
@@ -124,10 +104,12 @@ struct klp_patch {
};
#define klp_for_each_object(patch, obj) \
- for (obj = patch->objs; obj->funcs; obj++)
+ for (obj = patch->objs; obj->funcs || obj->name; obj++)
#define klp_for_each_func(obj, func) \
- for (func = obj->funcs; func->old_name; func++)
+ for (func = obj->funcs; \
+ func->old_name || func->new_func || func->old_sympos; \
+ func++)
int klp_register_patch(struct klp_patch *);
int klp_unregister_patch(struct klp_patch *);
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index d026b190c530..eabe0138eb06 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -196,9 +196,11 @@ struct lock_list {
* We record lock dependency chains, so that we can cache them:
*/
struct lock_chain {
- u8 irq_context;
- u8 depth;
- u16 base;
+ /* see BUILD_BUG_ON()s in lookup_chain_cache() */
+ unsigned int irq_context : 2,
+ depth : 6,
+ base : 24;
+ /* 4 byte hole */
struct hlist_node entry;
u64 chain_key;
};
@@ -354,8 +356,13 @@ extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
extern void lockdep_clear_current_reclaim_state(void);
extern void lockdep_trace_alloc(gfp_t mask);
-extern void lock_pin_lock(struct lockdep_map *lock);
-extern void lock_unpin_lock(struct lockdep_map *lock);
+struct pin_cookie { unsigned int val; };
+
+#define NIL_COOKIE (struct pin_cookie){ .val = 0U, }
+
+extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
+extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
+extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
@@ -371,8 +378,9 @@ extern void lock_unpin_lock(struct lockdep_map *lock);
#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
-#define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map)
-#define lockdep_unpin_lock(l) lock_unpin_lock(&(l)->dep_map)
+#define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map)
+#define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c))
+#define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c))
#else /* !CONFIG_LOCKDEP */
@@ -425,8 +433,13 @@ struct lock_class_key { };
#define lockdep_recursing(tsk) (0)
-#define lockdep_pin_lock(l) do { (void)(l); } while (0)
-#define lockdep_unpin_lock(l) do { (void)(l); } while (0)
+struct pin_cookie { };
+
+#define NIL_COOKIE (struct pin_cookie){ }
+
+#define lockdep_pin_lock(l) ({ struct pin_cookie cookie; cookie; })
+#define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0)
+#define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0)
#endif /* !LOCKDEP */
@@ -444,6 +457,18 @@ do { \
lock_acquired(&(_lock)->dep_map, _RET_IP_); \
} while (0)
+#define LOCK_CONTENDED_RETURN(_lock, try, lock) \
+({ \
+ int ____err = 0; \
+ if (!try(_lock)) { \
+ lock_contended(&(_lock)->dep_map, _RET_IP_); \
+ ____err = lock(_lock); \
+ } \
+ if (!____err) \
+ lock_acquired(&(_lock)->dep_map, _RET_IP_); \
+ ____err; \
+})
+
#else /* CONFIG_LOCK_STAT */
#define lock_contended(lockdep_map, ip) do {} while (0)
@@ -452,6 +477,9 @@ do { \
#define LOCK_CONTENDED(_lock, try, lock) \
lock(_lock)
+#define LOCK_CONTENDED_RETURN(_lock, try, lock) \
+ lock(_lock)
+
#endif /* CONFIG_LOCK_STAT */
#ifdef CONFIG_LOCKDEP
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index cdee11cbcdf1..7ae397669d8b 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -1190,7 +1190,8 @@
* Return 0 if permission is granted.
* @settime:
* Check permission to change the system time.
- * struct timespec and timezone are defined in include/linux/time.h
+ * struct timespec64 is defined in include/linux/time64.h and timezone
+ * is defined in include/linux/time.h
* @ts contains new time
* @tz contains new timezone
* Return 0 if permission is granted.
@@ -1327,7 +1328,7 @@ union security_list_options {
int (*quotactl)(int cmds, int type, int id, struct super_block *sb);
int (*quota_on)(struct dentry *dentry);
int (*syslog)(int type);
- int (*settime)(const struct timespec *ts, const struct timezone *tz);
+ int (*settime)(const struct timespec64 *ts, const struct timezone *tz);
int (*vm_enough_memory)(struct mm_struct *mm, long pages);
int (*bprm_set_creds)(struct linux_binprm *bprm);
@@ -1343,10 +1344,10 @@ union security_list_options {
int (*sb_kern_mount)(struct super_block *sb, int flags, void *data);
int (*sb_show_options)(struct seq_file *m, struct super_block *sb);
int (*sb_statfs)(struct dentry *dentry);
- int (*sb_mount)(const char *dev_name, struct path *path,
+ int (*sb_mount)(const char *dev_name, const struct path *path,
const char *type, unsigned long flags, void *data);
int (*sb_umount)(struct vfsmount *mnt, int flags);
- int (*sb_pivotroot)(struct path *old_path, struct path *new_path);
+ int (*sb_pivotroot)(const struct path *old_path, const struct path *new_path);
int (*sb_set_mnt_opts)(struct super_block *sb,
struct security_mnt_opts *opts,
unsigned long kern_flags,
@@ -1360,23 +1361,23 @@ union security_list_options {
#ifdef CONFIG_SECURITY_PATH
- int (*path_unlink)(struct path *dir, struct dentry *dentry);
- int (*path_mkdir)(struct path *dir, struct dentry *dentry,
+ int (*path_unlink)(const struct path *dir, struct dentry *dentry);
+ int (*path_mkdir)(const struct path *dir, struct dentry *dentry,
umode_t mode);
- int (*path_rmdir)(struct path *dir, struct dentry *dentry);
- int (*path_mknod)(struct path *dir, struct dentry *dentry,
+ int (*path_rmdir)(const struct path *dir, struct dentry *dentry);
+ int (*path_mknod)(const struct path *dir, struct dentry *dentry,
umode_t mode, unsigned int dev);
- int (*path_truncate)(struct path *path);
- int (*path_symlink)(struct path *dir, struct dentry *dentry,
+ int (*path_truncate)(const struct path *path);
+ int (*path_symlink)(const struct path *dir, struct dentry *dentry,
const char *old_name);
- int (*path_link)(struct dentry *old_dentry, struct path *new_dir,
+ int (*path_link)(struct dentry *old_dentry, const struct path *new_dir,
struct dentry *new_dentry);
- int (*path_rename)(struct path *old_dir, struct dentry *old_dentry,
- struct path *new_dir,
+ int (*path_rename)(const struct path *old_dir, struct dentry *old_dentry,
+ const struct path *new_dir,
struct dentry *new_dentry);
- int (*path_chmod)(struct path *path, umode_t mode);
- int (*path_chown)(struct path *path, kuid_t uid, kgid_t gid);
- int (*path_chroot)(struct path *path);
+ int (*path_chmod)(const struct path *path, umode_t mode);
+ int (*path_chown)(const struct path *path, kuid_t uid, kgid_t gid);
+ int (*path_chroot)(const struct path *path);
#endif
int (*inode_alloc_security)(struct inode *inode);
@@ -1804,7 +1805,6 @@ struct security_hook_heads {
struct list_head tun_dev_attach_queue;
struct list_head tun_dev_attach;
struct list_head tun_dev_open;
- struct list_head skb_owned_by;
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_NETWORK_XFRM
struct list_head xfrm_policy_alloc_security;
@@ -1893,5 +1893,10 @@ extern void __init yama_add_hooks(void);
#else
static inline void __init yama_add_hooks(void) { }
#endif
+#ifdef CONFIG_SECURITY_LOADPIN
+void __init loadpin_add_hooks(void);
+#else
+static inline void loadpin_add_hooks(void) { };
+#endif
#endif /* ! __LINUX_LSM_HOOKS_H */
diff --git a/include/linux/mcb.h b/include/linux/mcb.h
index ed06e15a36aa..ead13d233a97 100644
--- a/include/linux/mcb.h
+++ b/include/linux/mcb.h
@@ -15,22 +15,30 @@
#include <linux/device.h>
#include <linux/irqreturn.h>
+#define CHAMELEON_FILENAME_LEN 12
+
struct mcb_driver;
struct mcb_device;
/**
* struct mcb_bus - MEN Chameleon Bus
*
- * @dev: pointer to carrier device
- * @children: the child busses
+ * @dev: bus device
+ * @carrier: pointer to carrier device
* @bus_nr: mcb bus number
* @get_irq: callback to get IRQ number
+ * @revision: the FPGA's revision number
+ * @model: the FPGA's model number
+ * @filename: the FPGA's name
*/
struct mcb_bus {
- struct list_head children;
struct device dev;
struct device *carrier;
int bus_nr;
+ u8 revision;
+ char model;
+ u8 minor;
+ char name[CHAMELEON_FILENAME_LEN + 1];
int (*get_irq)(struct mcb_device *dev);
};
#define to_mcb_bus(b) container_of((b), struct mcb_bus, dev)
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index 5bfd99d1a40a..bf9d1d750693 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -13,6 +13,17 @@
struct mii_bus;
+/* Multiple levels of nesting are possible. However typically this is
+ * limited to nested DSA like layer, a MUX layer, and the normal
+ * user. Instead of trying to handle the general case, just define
+ * these cases.
+ */
+enum mdio_mutex_lock_class {
+ MDIO_MUTEX_NORMAL,
+ MDIO_MUTEX_MUX,
+ MDIO_MUTEX_NESTED,
+};
+
struct mdio_device {
struct device dev;
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 1191d79aa495..a805474df4ab 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -415,25 +415,6 @@ unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
return mz->lru_size[lru];
}
-static inline bool mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
-{
- unsigned long inactive_ratio;
- unsigned long inactive;
- unsigned long active;
- unsigned long gb;
-
- inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON);
- active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON);
-
- gb = (inactive + active) >> (30 - PAGE_SHIFT);
- if (gb)
- inactive_ratio = int_sqrt(10 * gb);
- else
- inactive_ratio = 1;
-
- return inactive * inactive_ratio < active;
-}
-
void mem_cgroup_handle_over_high(void);
void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
@@ -646,24 +627,12 @@ static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
return true;
}
-static inline bool
-mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
-{
- return true;
-}
-
static inline unsigned long
mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
{
return 0;
}
-static inline void
-mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
- int increment)
-{
-}
-
static inline unsigned long
mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
int nid, unsigned int lru_mask)
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index adbef586e696..5145620ba48a 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -182,7 +182,7 @@ static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
-extern void register_page_bootmem_info_node(struct pglist_data *pgdat);
+extern void __init register_page_bootmem_info_node(struct pglist_data *pgdat);
#else
static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
{
@@ -247,16 +247,16 @@ static inline void mem_hotplug_done(void) {}
#ifdef CONFIG_MEMORY_HOTREMOVE
-extern int is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);
+extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);
extern void try_offline_node(int nid);
extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
extern void remove_memory(int nid, u64 start, u64 size);
#else
-static inline int is_mem_section_removable(unsigned long pfn,
+static inline bool is_mem_section_removable(unsigned long pfn,
unsigned long nr_pages)
{
- return 0;
+ return false;
}
static inline void try_offline_node(int nid) {}
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 2696c1f05ed1..4429d255c8ab 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -172,14 +172,14 @@ extern int mpol_parse_str(char *str, struct mempolicy **mpol);
extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
/* Check if a vma is migratable */
-static inline int vma_migratable(struct vm_area_struct *vma)
+static inline bool vma_migratable(struct vm_area_struct *vma)
{
if (vma->vm_flags & (VM_IO | VM_PFNMAP))
- return 0;
+ return false;
#ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
if (vma->vm_flags & VM_HUGETLB)
- return 0;
+ return false;
#endif
/*
@@ -190,8 +190,8 @@ static inline int vma_migratable(struct vm_area_struct *vma)
if (vma->vm_file &&
gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
< policy_zone)
- return 0;
- return 1;
+ return false;
+ return true;
}
extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
@@ -228,6 +228,12 @@ static inline void mpol_free_shared_policy(struct shared_policy *p)
{
}
+static inline struct mempolicy *
+mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
+{
+ return NULL;
+}
+
#define vma_policy(vma) NULL
static inline int
diff --git a/include/linux/mempool.h b/include/linux/mempool.h
index 69b6951e8fd2..b1086c936507 100644
--- a/include/linux/mempool.h
+++ b/include/linux/mempool.h
@@ -5,6 +5,7 @@
#define _LINUX_MEMPOOL_H
#include <linux/wait.h>
+#include <linux/compiler.h>
struct kmem_cache;
@@ -31,7 +32,7 @@ extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
extern int mempool_resize(mempool_t *pool, int new_min_nr);
extern void mempool_destroy(mempool_t *pool);
-extern void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask);
+extern void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) __malloc;
extern void mempool_free(void *element, mempool_t *pool);
/*
diff --git a/include/linux/mfd/as3722.h b/include/linux/mfd/as3722.h
index 8d43e9f2a842..51e6f9414575 100644
--- a/include/linux/mfd/as3722.h
+++ b/include/linux/mfd/as3722.h
@@ -196,6 +196,7 @@
#define AS3722_LDO3_VSEL_MIN 0x01
#define AS3722_LDO3_VSEL_MAX 0x2D
#define AS3722_LDO3_NUM_VOLT 0x2D
+#define AS3722_LDO6_VSEL_BYPASS 0x3F
#define AS3722_LDO_VSEL_MASK 0x7F
#define AS3722_LDO_VSEL_MIN 0x01
#define AS3722_LDO_VSEL_MAX 0x7F
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index d82e7d51372b..0be4982f08fe 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -20,6 +20,7 @@ enum {
AXP221_ID,
AXP223_ID,
AXP288_ID,
+ AXP809_ID,
NR_AXP20X_VARIANTS,
};
@@ -264,6 +265,29 @@ enum {
AXP22X_REG_ID_MAX,
};
+enum {
+ AXP809_DCDC1 = 0,
+ AXP809_DCDC2,
+ AXP809_DCDC3,
+ AXP809_DCDC4,
+ AXP809_DCDC5,
+ AXP809_DC1SW,
+ AXP809_DC5LDO,
+ AXP809_ALDO1,
+ AXP809_ALDO2,
+ AXP809_ALDO3,
+ AXP809_ELDO1,
+ AXP809_ELDO2,
+ AXP809_ELDO3,
+ AXP809_DLDO1,
+ AXP809_DLDO2,
+ AXP809_RTC_LDO,
+ AXP809_LDO_IO0,
+ AXP809_LDO_IO1,
+ AXP809_SW,
+ AXP809_REG_ID_MAX,
+};
+
/* IRQs */
enum {
AXP152_IRQ_LDO0IN_CONNECT = 1,
@@ -390,6 +414,41 @@ enum axp288_irqs {
AXP288_IRQ_BC_USB_CHNG,
};
+enum axp809_irqs {
+ AXP809_IRQ_ACIN_OVER_V = 1,
+ AXP809_IRQ_ACIN_PLUGIN,
+ AXP809_IRQ_ACIN_REMOVAL,
+ AXP809_IRQ_VBUS_OVER_V,
+ AXP809_IRQ_VBUS_PLUGIN,
+ AXP809_IRQ_VBUS_REMOVAL,
+ AXP809_IRQ_VBUS_V_LOW,
+ AXP809_IRQ_BATT_PLUGIN,
+ AXP809_IRQ_BATT_REMOVAL,
+ AXP809_IRQ_BATT_ENT_ACT_MODE,
+ AXP809_IRQ_BATT_EXIT_ACT_MODE,
+ AXP809_IRQ_CHARG,
+ AXP809_IRQ_CHARG_DONE,
+ AXP809_IRQ_BATT_CHG_TEMP_HIGH,
+ AXP809_IRQ_BATT_CHG_TEMP_HIGH_END,
+ AXP809_IRQ_BATT_CHG_TEMP_LOW,
+ AXP809_IRQ_BATT_CHG_TEMP_LOW_END,
+ AXP809_IRQ_BATT_ACT_TEMP_HIGH,
+ AXP809_IRQ_BATT_ACT_TEMP_HIGH_END,
+ AXP809_IRQ_BATT_ACT_TEMP_LOW,
+ AXP809_IRQ_BATT_ACT_TEMP_LOW_END,
+ AXP809_IRQ_DIE_TEMP_HIGH,
+ AXP809_IRQ_LOW_PWR_LVL1,
+ AXP809_IRQ_LOW_PWR_LVL2,
+ AXP809_IRQ_TIMER,
+ AXP809_IRQ_PEK_RIS_EDGE,
+ AXP809_IRQ_PEK_FAL_EDGE,
+ AXP809_IRQ_PEK_SHORT,
+ AXP809_IRQ_PEK_LONG,
+ AXP809_IRQ_PEK_OVER_OFF,
+ AXP809_IRQ_GPIO1_INPUT,
+ AXP809_IRQ_GPIO0_INPUT,
+};
+
#define AXP288_TS_ADC_H 0x58
#define AXP288_TS_ADC_L 0x59
#define AXP288_GP_ADC_H 0x5a
diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h
index bc6f7e00fb3d..99c0395fe1f9 100644
--- a/include/linux/mfd/core.h
+++ b/include/linux/mfd/core.h
@@ -17,7 +17,7 @@
#include <linux/platform_device.h>
struct irq_domain;
-struct property_set;
+struct property_entry;
/* Matches ACPI PNP id, either _HID or _CID, or ACPI _ADR */
struct mfd_cell_acpi_match {
@@ -47,7 +47,7 @@ struct mfd_cell {
size_t pdata_size;
/* device properties passed to the sub devices drivers */
- const struct property_set *pset;
+ struct property_entry *properties;
/*
* Device Tree compatible string
@@ -131,4 +131,8 @@ static inline int mfd_add_hotplug_devices(struct device *parent,
extern void mfd_remove_devices(struct device *parent);
+extern int devm_mfd_add_devices(struct device *dev, int id,
+ const struct mfd_cell *cells, int n_devs,
+ struct resource *mem_base,
+ int irq_base, struct irq_domain *irq_domain);
#endif
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
index a677c2bd485c..64184d27e3cd 100644
--- a/include/linux/mfd/cros_ec.h
+++ b/include/linux/mfd/cros_ec.h
@@ -50,9 +50,11 @@ enum {
EC_MSG_TX_TRAILER_BYTES,
EC_MSG_RX_PROTO_BYTES = 3,
- /* Max length of messages */
- EC_MSG_BYTES = EC_PROTO2_MAX_PARAM_SIZE +
+ /* Max length of messages for proto 2*/
+ EC_PROTO2_MSG_BYTES = EC_PROTO2_MAX_PARAM_SIZE +
EC_MSG_TX_PROTO_BYTES,
+
+ EC_MAX_MSG_BYTES = 64 * 1024,
};
/*
diff --git a/include/linux/mfd/hi655x-pmic.h b/include/linux/mfd/hi655x-pmic.h
new file mode 100644
index 000000000000..dbbe9a644622
--- /dev/null
+++ b/include/linux/mfd/hi655x-pmic.h
@@ -0,0 +1,55 @@
+/*
+ * Device driver for regulators in hi655x IC
+ *
+ * Copyright (c) 2016 Hisilicon.
+ *
+ * Authors:
+ * Chen Feng <puck.chen@hisilicon.com>
+ * Fei Wang <w.f@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __HI655X_PMIC_H
+#define __HI655X_PMIC_H
+
+/* Hi655x registers are mapped to memory bus in 4 bytes stride */
+#define HI655X_STRIDE 4
+#define HI655X_BUS_ADDR(x) ((x) << 2)
+
+#define HI655X_BITS 8
+
+#define HI655X_NR_IRQ 32
+
+#define HI655X_IRQ_STAT_BASE (0x003 << 2)
+#define HI655X_IRQ_MASK_BASE (0x007 << 2)
+#define HI655X_ANA_IRQM_BASE (0x1b5 << 2)
+#define HI655X_IRQ_ARRAY 4
+#define HI655X_IRQ_MASK 0xFF
+#define HI655X_IRQ_CLR 0xFF
+#define HI655X_VER_REG 0x00
+
+#define PMU_VER_START 0x10
+#define PMU_VER_END 0x38
+
+#define RESERVE_INT BIT(7)
+#define PWRON_D20R_INT BIT(6)
+#define PWRON_D20F_INT BIT(5)
+#define PWRON_D4SR_INT BIT(4)
+#define VSYS_6P0_D200UR_INT BIT(3)
+#define VSYS_UV_D3R_INT BIT(2)
+#define VSYS_2P5_R_INT BIT(1)
+#define OTMP_D1R_INT BIT(0)
+
+struct hi655x_pmic {
+ struct resource *res;
+ struct device *dev;
+ struct regmap *regmap;
+ int gpio;
+ unsigned int ver;
+ struct regmap_irq_chip_data *irq_data;
+};
+
+#endif
diff --git a/include/linux/mfd/max77620.h b/include/linux/mfd/max77620.h
new file mode 100644
index 000000000000..3ca0af07fc78
--- /dev/null
+++ b/include/linux/mfd/max77620.h
@@ -0,0 +1,346 @@
+/*
+ * Defining registers address and its bit definitions of MAX77620 and MAX20024
+ *
+ * Copyright (C) 2016 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#ifndef _MFD_MAX77620_H_
+#define _MFD_MAX77620_H_
+
+#include <linux/types.h>
+
+/* GLOBAL, PMIC, GPIO, FPS, ONOFFC, CID Registers */
+#define MAX77620_REG_CNFGGLBL1 0x00
+#define MAX77620_REG_CNFGGLBL2 0x01
+#define MAX77620_REG_CNFGGLBL3 0x02
+#define MAX77620_REG_CNFG1_32K 0x03
+#define MAX77620_REG_CNFGBBC 0x04
+#define MAX77620_REG_IRQTOP 0x05
+#define MAX77620_REG_INTLBT 0x06
+#define MAX77620_REG_IRQSD 0x07
+#define MAX77620_REG_IRQ_LVL2_L0_7 0x08
+#define MAX77620_REG_IRQ_LVL2_L8 0x09
+#define MAX77620_REG_IRQ_LVL2_GPIO 0x0A
+#define MAX77620_REG_ONOFFIRQ 0x0B
+#define MAX77620_REG_NVERC 0x0C
+#define MAX77620_REG_IRQTOPM 0x0D
+#define MAX77620_REG_INTENLBT 0x0E
+#define MAX77620_REG_IRQMASKSD 0x0F
+#define MAX77620_REG_IRQ_MSK_L0_7 0x10
+#define MAX77620_REG_IRQ_MSK_L8 0x11
+#define MAX77620_REG_ONOFFIRQM 0x12
+#define MAX77620_REG_STATLBT 0x13
+#define MAX77620_REG_STATSD 0x14
+#define MAX77620_REG_ONOFFSTAT 0x15
+
+/* SD and LDO Registers */
+#define MAX77620_REG_SD0 0x16
+#define MAX77620_REG_SD1 0x17
+#define MAX77620_REG_SD2 0x18
+#define MAX77620_REG_SD3 0x19
+#define MAX77620_REG_SD4 0x1A
+#define MAX77620_REG_DVSSD0 0x1B
+#define MAX77620_REG_DVSSD1 0x1C
+#define MAX77620_REG_SD0_CFG 0x1D
+#define MAX77620_REG_SD1_CFG 0x1E
+#define MAX77620_REG_SD2_CFG 0x1F
+#define MAX77620_REG_SD3_CFG 0x20
+#define MAX77620_REG_SD4_CFG 0x21
+#define MAX77620_REG_SD_CFG2 0x22
+#define MAX77620_REG_LDO0_CFG 0x23
+#define MAX77620_REG_LDO0_CFG2 0x24
+#define MAX77620_REG_LDO1_CFG 0x25
+#define MAX77620_REG_LDO1_CFG2 0x26
+#define MAX77620_REG_LDO2_CFG 0x27
+#define MAX77620_REG_LDO2_CFG2 0x28
+#define MAX77620_REG_LDO3_CFG 0x29
+#define MAX77620_REG_LDO3_CFG2 0x2A
+#define MAX77620_REG_LDO4_CFG 0x2B
+#define MAX77620_REG_LDO4_CFG2 0x2C
+#define MAX77620_REG_LDO5_CFG 0x2D
+#define MAX77620_REG_LDO5_CFG2 0x2E
+#define MAX77620_REG_LDO6_CFG 0x2F
+#define MAX77620_REG_LDO6_CFG2 0x30
+#define MAX77620_REG_LDO7_CFG 0x31
+#define MAX77620_REG_LDO7_CFG2 0x32
+#define MAX77620_REG_LDO8_CFG 0x33
+#define MAX77620_REG_LDO8_CFG2 0x34
+#define MAX77620_REG_LDO_CFG3 0x35
+
+#define MAX77620_LDO_SLEW_RATE_MASK 0x1
+
+/* LDO Configuration 3 */
+#define MAX77620_TRACK4_MASK BIT(5)
+#define MAX77620_TRACK4_SHIFT 5
+
+/* Voltage */
+#define MAX77620_SDX_VOLT_MASK 0xFF
+#define MAX77620_SD0_VOLT_MASK 0x3F
+#define MAX77620_SD1_VOLT_MASK 0x7F
+#define MAX77620_LDO_VOLT_MASK 0x3F
+
+#define MAX77620_REG_GPIO0 0x36
+#define MAX77620_REG_GPIO1 0x37
+#define MAX77620_REG_GPIO2 0x38
+#define MAX77620_REG_GPIO3 0x39
+#define MAX77620_REG_GPIO4 0x3A
+#define MAX77620_REG_GPIO5 0x3B
+#define MAX77620_REG_GPIO6 0x3C
+#define MAX77620_REG_GPIO7 0x3D
+#define MAX77620_REG_PUE_GPIO 0x3E
+#define MAX77620_REG_PDE_GPIO 0x3F
+#define MAX77620_REG_AME_GPIO 0x40
+#define MAX77620_REG_ONOFFCNFG1 0x41
+#define MAX77620_REG_ONOFFCNFG2 0x42
+
+/* FPS Registers */
+#define MAX77620_REG_FPS_CFG0 0x43
+#define MAX77620_REG_FPS_CFG1 0x44
+#define MAX77620_REG_FPS_CFG2 0x45
+#define MAX77620_REG_FPS_LDO0 0x46
+#define MAX77620_REG_FPS_LDO1 0x47
+#define MAX77620_REG_FPS_LDO2 0x48
+#define MAX77620_REG_FPS_LDO3 0x49
+#define MAX77620_REG_FPS_LDO4 0x4A
+#define MAX77620_REG_FPS_LDO5 0x4B
+#define MAX77620_REG_FPS_LDO6 0x4C
+#define MAX77620_REG_FPS_LDO7 0x4D
+#define MAX77620_REG_FPS_LDO8 0x4E
+#define MAX77620_REG_FPS_SD0 0x4F
+#define MAX77620_REG_FPS_SD1 0x50
+#define MAX77620_REG_FPS_SD2 0x51
+#define MAX77620_REG_FPS_SD3 0x52
+#define MAX77620_REG_FPS_SD4 0x53
+#define MAX77620_REG_FPS_NONE 0
+
+#define MAX77620_FPS_SRC_MASK 0xC0
+#define MAX77620_FPS_SRC_SHIFT 6
+#define MAX77620_FPS_PU_PERIOD_MASK 0x38
+#define MAX77620_FPS_PU_PERIOD_SHIFT 3
+#define MAX77620_FPS_PD_PERIOD_MASK 0x07
+#define MAX77620_FPS_PD_PERIOD_SHIFT 0
+#define MAX77620_FPS_TIME_PERIOD_MASK 0x38
+#define MAX77620_FPS_TIME_PERIOD_SHIFT 3
+#define MAX77620_FPS_EN_SRC_MASK 0x06
+#define MAX77620_FPS_EN_SRC_SHIFT 1
+#define MAX77620_FPS_ENFPS_SW_MASK 0x01
+#define MAX77620_FPS_ENFPS_SW 0x01
+
+/* Minimum and maximum FPS period time (in microseconds) are
+ * different for MAX77620 and Max20024.
+ */
+#define MAX77620_FPS_PERIOD_MIN_US 40
+#define MAX20024_FPS_PERIOD_MIN_US 20
+
+#define MAX77620_FPS_PERIOD_MAX_US 2560
+#define MAX20024_FPS_PERIOD_MAX_US 5120
+
+#define MAX77620_REG_FPS_GPIO1 0x54
+#define MAX77620_REG_FPS_GPIO2 0x55
+#define MAX77620_REG_FPS_GPIO3 0x56
+#define MAX77620_REG_FPS_RSO 0x57
+#define MAX77620_REG_CID0 0x58
+#define MAX77620_REG_CID1 0x59
+#define MAX77620_REG_CID2 0x5A
+#define MAX77620_REG_CID3 0x5B
+#define MAX77620_REG_CID4 0x5C
+#define MAX77620_REG_CID5 0x5D
+
+#define MAX77620_REG_DVSSD4 0x5E
+#define MAX20024_REG_MAX_ADD 0x70
+
+#define MAX77620_CID_DIDM_MASK 0xF0
+#define MAX77620_CID_DIDM_SHIFT 4
+
+/* CNCG2SD */
+#define MAX77620_SD_CNF2_ROVS_EN_SD1 BIT(1)
+#define MAX77620_SD_CNF2_ROVS_EN_SD0 BIT(2)
+
+/* Device Identification Metal */
+#define MAX77620_CID5_DIDM(n) (((n) >> 4) & 0xF)
+/* Device Indentification OTP */
+#define MAX77620_CID5_DIDO(n) ((n) & 0xF)
+
+/* SD CNFG1 */
+#define MAX77620_SD_SR_MASK 0xC0
+#define MAX77620_SD_SR_SHIFT 6
+#define MAX77620_SD_POWER_MODE_MASK 0x30
+#define MAX77620_SD_POWER_MODE_SHIFT 4
+#define MAX77620_SD_CFG1_ADE_MASK BIT(3)
+#define MAX77620_SD_CFG1_ADE_DISABLE 0
+#define MAX77620_SD_CFG1_ADE_ENABLE BIT(3)
+#define MAX77620_SD_FPWM_MASK 0x04
+#define MAX77620_SD_FPWM_SHIFT 2
+#define MAX77620_SD_FSRADE_MASK 0x01
+#define MAX77620_SD_FSRADE_SHIFT 0
+#define MAX77620_SD_CFG1_FPWM_SD_MASK BIT(2)
+#define MAX77620_SD_CFG1_FPWM_SD_SKIP 0
+#define MAX77620_SD_CFG1_FPWM_SD_FPWM BIT(2)
+#define MAX77620_SD_CFG1_FSRADE_SD_MASK BIT(0)
+#define MAX77620_SD_CFG1_FSRADE_SD_DISABLE 0
+#define MAX77620_SD_CFG1_FSRADE_SD_ENABLE BIT(0)
+
+/* LDO_CNFG2 */
+#define MAX77620_LDO_POWER_MODE_MASK 0xC0
+#define MAX77620_LDO_POWER_MODE_SHIFT 6
+#define MAX77620_LDO_CFG2_ADE_MASK BIT(1)
+#define MAX77620_LDO_CFG2_ADE_DISABLE 0
+#define MAX77620_LDO_CFG2_ADE_ENABLE BIT(1)
+#define MAX77620_LDO_CFG2_SS_MASK BIT(0)
+#define MAX77620_LDO_CFG2_SS_FAST BIT(0)
+#define MAX77620_LDO_CFG2_SS_SLOW 0
+
+#define MAX77620_IRQ_TOP_GLBL_MASK BIT(7)
+#define MAX77620_IRQ_TOP_SD_MASK BIT(6)
+#define MAX77620_IRQ_TOP_LDO_MASK BIT(5)
+#define MAX77620_IRQ_TOP_GPIO_MASK BIT(4)
+#define MAX77620_IRQ_TOP_RTC_MASK BIT(3)
+#define MAX77620_IRQ_TOP_32K_MASK BIT(2)
+#define MAX77620_IRQ_TOP_ONOFF_MASK BIT(1)
+
+#define MAX77620_IRQ_LBM_MASK BIT(3)
+#define MAX77620_IRQ_TJALRM1_MASK BIT(2)
+#define MAX77620_IRQ_TJALRM2_MASK BIT(1)
+
+#define MAX77620_PWR_I2C_ADDR 0x3c
+#define MAX77620_RTC_I2C_ADDR 0x68
+
+#define MAX77620_CNFG_GPIO_DRV_MASK BIT(0)
+#define MAX77620_CNFG_GPIO_DRV_PUSHPULL BIT(0)
+#define MAX77620_CNFG_GPIO_DRV_OPENDRAIN 0
+#define MAX77620_CNFG_GPIO_DIR_MASK BIT(1)
+#define MAX77620_CNFG_GPIO_DIR_INPUT BIT(1)
+#define MAX77620_CNFG_GPIO_DIR_OUTPUT 0
+#define MAX77620_CNFG_GPIO_INPUT_VAL_MASK BIT(2)
+#define MAX77620_CNFG_GPIO_OUTPUT_VAL_MASK BIT(3)
+#define MAX77620_CNFG_GPIO_OUTPUT_VAL_HIGH BIT(3)
+#define MAX77620_CNFG_GPIO_OUTPUT_VAL_LOW 0
+#define MAX77620_CNFG_GPIO_INT_MASK (0x3 << 4)
+#define MAX77620_CNFG_GPIO_INT_FALLING BIT(4)
+#define MAX77620_CNFG_GPIO_INT_RISING BIT(5)
+#define MAX77620_CNFG_GPIO_DBNC_MASK (0x3 << 6)
+#define MAX77620_CNFG_GPIO_DBNC_None (0x0 << 6)
+#define MAX77620_CNFG_GPIO_DBNC_8ms (0x1 << 6)
+#define MAX77620_CNFG_GPIO_DBNC_16ms (0x2 << 6)
+#define MAX77620_CNFG_GPIO_DBNC_32ms (0x3 << 6)
+
+#define MAX77620_IRQ_LVL2_GPIO_EDGE0 BIT(0)
+#define MAX77620_IRQ_LVL2_GPIO_EDGE1 BIT(1)
+#define MAX77620_IRQ_LVL2_GPIO_EDGE2 BIT(2)
+#define MAX77620_IRQ_LVL2_GPIO_EDGE3 BIT(3)
+#define MAX77620_IRQ_LVL2_GPIO_EDGE4 BIT(4)
+#define MAX77620_IRQ_LVL2_GPIO_EDGE5 BIT(5)
+#define MAX77620_IRQ_LVL2_GPIO_EDGE6 BIT(6)
+#define MAX77620_IRQ_LVL2_GPIO_EDGE7 BIT(7)
+
+#define MAX77620_CNFG1_32K_OUT0_EN BIT(2)
+
+#define MAX77620_ONOFFCNFG1_SFT_RST BIT(7)
+#define MAX77620_ONOFFCNFG1_MRT_MASK 0x38
+#define MAX77620_ONOFFCNFG1_MRT_SHIFT 0x3
+#define MAX77620_ONOFFCNFG1_SLPEN BIT(2)
+#define MAX77620_ONOFFCNFG1_PWR_OFF BIT(1)
+#define MAX20024_ONOFFCNFG1_CLRSE 0x18
+
+#define MAX77620_ONOFFCNFG2_SFT_RST_WK BIT(7)
+#define MAX77620_ONOFFCNFG2_WD_RST_WK BIT(6)
+#define MAX77620_ONOFFCNFG2_SLP_LPM_MSK BIT(5)
+#define MAX77620_ONOFFCNFG2_WK_ALARM1 BIT(2)
+#define MAX77620_ONOFFCNFG2_WK_EN0 BIT(0)
+
+#define MAX77620_GLBLM_MASK BIT(0)
+
+#define MAX77620_WDTC_MASK 0x3
+#define MAX77620_WDTOFFC BIT(4)
+#define MAX77620_WDTSLPC BIT(3)
+#define MAX77620_WDTEN BIT(2)
+
+#define MAX77620_TWD_MASK 0x3
+#define MAX77620_TWD_2s 0x0
+#define MAX77620_TWD_16s 0x1
+#define MAX77620_TWD_64s 0x2
+#define MAX77620_TWD_128s 0x3
+
+#define MAX77620_CNFGGLBL1_LBDAC_EN BIT(7)
+#define MAX77620_CNFGGLBL1_MPPLD BIT(6)
+#define MAX77620_CNFGGLBL1_LBHYST (BIT(5) | BIT(4))
+#define MAX77620_CNFGGLBL1_LBDAC 0x0E
+#define MAX77620_CNFGGLBL1_LBRSTEN BIT(0)
+
+/* CNFG BBC registers */
+#define MAX77620_CNFGBBC_ENABLE BIT(0)
+#define MAX77620_CNFGBBC_CURRENT_MASK 0x06
+#define MAX77620_CNFGBBC_CURRENT_SHIFT 1
+#define MAX77620_CNFGBBC_VOLTAGE_MASK 0x18
+#define MAX77620_CNFGBBC_VOLTAGE_SHIFT 3
+#define MAX77620_CNFGBBC_LOW_CURRENT_DISABLE BIT(5)
+#define MAX77620_CNFGBBC_RESISTOR_MASK 0xC0
+#define MAX77620_CNFGBBC_RESISTOR_SHIFT 6
+
+#define MAX77620_FPS_COUNT 3
+
+/* Interrupts */
+enum {
+ MAX77620_IRQ_TOP_GLBL, /* Low-Battery */
+ MAX77620_IRQ_TOP_SD, /* SD power fail */
+ MAX77620_IRQ_TOP_LDO, /* LDO power fail */
+ MAX77620_IRQ_TOP_GPIO, /* TOP GPIO internal int to MAX77620 */
+ MAX77620_IRQ_TOP_RTC, /* RTC */
+ MAX77620_IRQ_TOP_32K, /* 32kHz oscillator */
+ MAX77620_IRQ_TOP_ONOFF, /* ON/OFF oscillator */
+ MAX77620_IRQ_LBT_MBATLOW, /* Thermal alarm status, > 120C */
+ MAX77620_IRQ_LBT_TJALRM1, /* Thermal alarm status, > 120C */
+ MAX77620_IRQ_LBT_TJALRM2, /* Thermal alarm status, > 140C */
+};
+
+/* GPIOs */
+enum {
+ MAX77620_GPIO0,
+ MAX77620_GPIO1,
+ MAX77620_GPIO2,
+ MAX77620_GPIO3,
+ MAX77620_GPIO4,
+ MAX77620_GPIO5,
+ MAX77620_GPIO6,
+ MAX77620_GPIO7,
+ MAX77620_GPIO_NR,
+};
+
+/* FPS Source */
+enum max77620_fps_src {
+ MAX77620_FPS_SRC_0,
+ MAX77620_FPS_SRC_1,
+ MAX77620_FPS_SRC_2,
+ MAX77620_FPS_SRC_NONE,
+ MAX77620_FPS_SRC_DEF,
+};
+
+enum max77620_chip_id {
+ MAX77620,
+ MAX20024,
+};
+
+struct max77620_chip {
+ struct device *dev;
+ struct regmap *rmap;
+
+ int chip_irq;
+ int irq_base;
+
+ /* chip id */
+ enum max77620_chip_id chip_id;
+
+ bool sleep_enable;
+ bool enable_global_lpm;
+ int shutdown_fps_period[MAX77620_FPS_COUNT];
+ int suspend_fps_period[MAX77620_FPS_COUNT];
+
+ struct regmap_irq_chip_data *top_irq_data;
+ struct regmap_irq_chip_data *gpio_irq_data;
+};
+
+#endif /* _MFD_MAX77620_H_ */
diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h
index 6bc4bcd488ac..5a23dd4df432 100644
--- a/include/linux/mfd/samsung/core.h
+++ b/include/linux/mfd/samsung/core.h
@@ -30,6 +30,9 @@
#define MIN_600_MV 600000
#define MIN_500_MV 500000
+/* Ramp delay in uV/us */
+#define RAMP_DELAY_12_MVUS 12000
+
/* Macros to represent steps for LDO/BUCK */
#define STEP_50_MV 50000
#define STEP_25_MV 25000
diff --git a/include/linux/mfd/samsung/s2mps11.h b/include/linux/mfd/samsung/s2mps11.h
index b288965e8101..2c14eeca46f0 100644
--- a/include/linux/mfd/samsung/s2mps11.h
+++ b/include/linux/mfd/samsung/s2mps11.h
@@ -173,10 +173,12 @@ enum s2mps11_regulators {
#define S2MPS11_LDO_VSEL_MASK 0x3F
#define S2MPS11_BUCK_VSEL_MASK 0xFF
+#define S2MPS11_BUCK9_VSEL_MASK 0x1F
#define S2MPS11_ENABLE_MASK (0x03 << S2MPS11_ENABLE_SHIFT)
#define S2MPS11_ENABLE_SHIFT 0x06
#define S2MPS11_LDO_N_VOLTAGES (S2MPS11_LDO_VSEL_MASK + 1)
#define S2MPS11_BUCK_N_VOLTAGES (S2MPS11_BUCK_VSEL_MASK + 1)
+#define S2MPS11_BUCK9_N_VOLTAGES (S2MPS11_BUCK9_VSEL_MASK + 1)
#define S2MPS11_RAMP_DELAY 25000 /* uV/us */
#define S2MPS11_CTRL1_PWRHOLD_MASK BIT(4)
diff --git a/include/linux/mfd/syscon.h b/include/linux/mfd/syscon.h
index 1088149be0c9..40a76b97b7ab 100644
--- a/include/linux/mfd/syscon.h
+++ b/include/linux/mfd/syscon.h
@@ -16,6 +16,7 @@
#define __LINUX_MFD_SYSCON_H__
#include <linux/err.h>
+#include <linux/errno.h>
struct device_node;
diff --git a/include/linux/mfd/syscon/exynos5-pmu.h b/include/linux/mfd/syscon/exynos5-pmu.h
index 9352adc95de6..76f30f940c70 100644
--- a/include/linux/mfd/syscon/exynos5-pmu.h
+++ b/include/linux/mfd/syscon/exynos5-pmu.h
@@ -38,6 +38,9 @@
/* Exynos5433 specific register definitions */
#define EXYNOS5433_USBHOST30_PHY_CONTROL (0x728)
+#define EXYNOS5433_MIPI_PHY0_CONTROL (0x710)
+#define EXYNOS5433_MIPI_PHY1_CONTROL (0x714)
+#define EXYNOS5433_MIPI_PHY2_CONTROL (0x718)
#define EXYNOS5_PHY_ENABLE BIT(0)
diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
index 238c8db953eb..c8e0164c5423 100644
--- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
+++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
@@ -95,6 +95,7 @@
#define IMX6Q_GPR0_DMAREQ_MUX_SEL0_IOMUX BIT(0)
#define IMX6Q_GPR1_PCIE_REQ_MASK (0x3 << 30)
+#define IMX6Q_GPR1_PCIE_SW_RST BIT(29)
#define IMX6Q_GPR1_PCIE_EXIT_L1 BIT(28)
#define IMX6Q_GPR1_PCIE_RDY_L23 BIT(27)
#define IMX6Q_GPR1_PCIE_ENTER_L1 BIT(26)
@@ -447,5 +448,11 @@
#define IMX6UL_GPR1_ENET2_CLK_OUTPUT (0x1 << 18)
#define IMX6UL_GPR1_ENET_CLK_DIR (0x3 << 17)
#define IMX6UL_GPR1_ENET_CLK_OUTPUT (0x3 << 17)
+#define IMX6UL_GPR1_SAI1_MCLK_DIR (0x1 << 19)
+#define IMX6UL_GPR1_SAI2_MCLK_DIR (0x1 << 20)
+#define IMX6UL_GPR1_SAI3_MCLK_DIR (0x1 << 21)
+#define IMX6UL_GPR1_SAI_MCLK_MASK (0x7 << 19)
+#define MCLK_DIR(x) (x == 1 ? IMX6UL_GPR1_SAI1_MCLK_DIR : x == 2 ? \
+ IMX6UL_GPR1_SAI2_MCLK_DIR : IMX6UL_GPR1_SAI3_MCLK_DIR)
#endif /* __LINUX_IMX6Q_IOMUXC_GPR_H */
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index 05d58ee5e6a7..7a26286db895 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -66,8 +66,8 @@
*/
#define TMIO_MMC_SDIO_IRQ (1 << 2)
-/* Some controllers don't need to wait 10ms for clock changes */
-#define TMIO_MMC_FAST_CLK_CHG (1 << 3)
+/* Some features are only available or tested on RCar Gen2 or later */
+#define TMIO_MMC_MIN_RCAR2 (1 << 3)
/*
* Some controllers require waiting for the SD bus to become
diff --git a/include/linux/mfd/twl6040.h b/include/linux/mfd/twl6040.h
index 8f9fc3d26e6d..8e95cd87cd74 100644
--- a/include/linux/mfd/twl6040.h
+++ b/include/linux/mfd/twl6040.h
@@ -134,6 +134,7 @@
#define TWL6040_HFDACENA (1 << 0)
#define TWL6040_HFPGAENA (1 << 1)
#define TWL6040_HFDRVENA (1 << 4)
+#define TWL6040_HFSWENA (1 << 6)
/* VIBCTLL/R (0x18/0x1A) fields */
diff --git a/include/linux/mfd/wm8400-private.h b/include/linux/mfd/wm8400-private.h
index 2de565b94d0c..4ee908f5b834 100644
--- a/include/linux/mfd/wm8400-private.h
+++ b/include/linux/mfd/wm8400-private.h
@@ -923,7 +923,6 @@ struct wm8400 {
#define WM8400_LINE_CMP_VTHD_SHIFT 0 /* LINE_CMP_VTHD - [3:0] */
#define WM8400_LINE_CMP_VTHD_WIDTH 4 /* LINE_CMP_VTHD - [3:0] */
-u16 wm8400_reg_read(struct wm8400 *wm8400, u8 reg);
int wm8400_block_read(struct wm8400 *wm8400, u8 reg, int count, u16 *data);
static inline int wm8400_set_bits(struct wm8400 *wm8400, u8 reg,
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index d1f904c8b2cb..80dec87a94f8 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -1058,7 +1058,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset)
{
- if (BITS_PER_LONG == 64 || buf->nbufs == 1)
+ if (buf->nbufs == 1)
return buf->direct.buf + offset;
else
return buf->page_list[offset >> PAGE_SHIFT].buf +
@@ -1098,7 +1098,7 @@ int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order,
void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db);
int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
- int size, int max_direct);
+ int size);
void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres,
int size);
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index b2c9fada8eac..2be976dd4966 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -53,6 +53,11 @@ struct mlx5_core_cq {
unsigned arm_sn;
struct mlx5_rsc_debug *dbg;
int pid;
+ struct {
+ struct list_head list;
+ void (*comp)(struct mlx5_core_cq *);
+ void *priv;
+ } tasklet_ctx;
};
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 8156e3c9239c..035abdf62cfe 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -59,6 +59,7 @@
#define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
#define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
#define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
+#define MLX5_ST_SZ_QW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 64)
#define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
#define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
#define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
@@ -392,6 +393,17 @@ enum {
MLX5_CAP_OFF_CMDIF_CSUM = 46,
};
+enum {
+ /*
+ * Max wqe size for rdma read is 512 bytes, so this
+ * limits our max_sge_rd as the wqe needs to fit:
+ * - ctrl segment (16 bytes)
+ * - rdma segment (16 bytes)
+ * - scatter elements (16 bytes each)
+ */
+ MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16
+};
+
struct mlx5_inbox_hdr {
__be16 opcode;
u8 rsvd[4];
@@ -644,7 +656,9 @@ struct mlx5_err_cqe {
};
struct mlx5_cqe64 {
- u8 rsvd0[4];
+ u8 outer_l3_tunneled;
+ u8 rsvd0;
+ __be16 wqe_id;
u8 lro_tcppsh_abort_dupack;
u8 lro_min_ttl;
__be16 lro_tcp_win;
@@ -657,7 +671,7 @@ struct mlx5_cqe64 {
__be16 slid;
__be32 flags_rqpn;
u8 hds_ip_ext;
- u8 l4_hdr_type_etc;
+ u8 l4_l3_hdr_type;
__be16 vlan_info;
__be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */
__be32 imm_inval_pkey;
@@ -671,6 +685,40 @@ struct mlx5_cqe64 {
u8 op_own;
};
+struct mlx5_mini_cqe8 {
+ union {
+ __be32 rx_hash_result;
+ struct {
+ __be16 checksum;
+ __be16 rsvd;
+ };
+ struct {
+ __be16 wqe_counter;
+ u8 s_wqe_opcode;
+ u8 reserved;
+ } s_wqe_info;
+ };
+ __be32 byte_cnt;
+};
+
+enum {
+ MLX5_NO_INLINE_DATA,
+ MLX5_INLINE_DATA32_SEG,
+ MLX5_INLINE_DATA64_SEG,
+ MLX5_COMPRESSED,
+};
+
+enum {
+ MLX5_CQE_FORMAT_CSUM = 0x1,
+};
+
+#define MLX5_MINI_CQE_ARRAY_SIZE 8
+
+static inline int mlx5_get_cqe_format(struct mlx5_cqe64 *cqe)
+{
+ return (cqe->op_own >> 2) & 0x3;
+}
+
static inline int get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
{
return (cqe->lro_tcppsh_abort_dupack >> 6) & 1;
@@ -678,12 +726,22 @@ static inline int get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
{
- return (cqe->l4_hdr_type_etc >> 4) & 0x7;
+ return (cqe->l4_l3_hdr_type >> 4) & 0x7;
+}
+
+static inline u8 get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe)
+{
+ return (cqe->l4_l3_hdr_type >> 2) & 0x3;
+}
+
+static inline u8 cqe_is_tunneled(struct mlx5_cqe64 *cqe)
+{
+ return cqe->outer_l3_tunneled & 0x1;
}
static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe)
{
- return !!(cqe->l4_hdr_type_etc & 0x1);
+ return !!(cqe->l4_l3_hdr_type & 0x1);
}
static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe)
@@ -696,6 +754,42 @@ static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe)
return (u64)lo | ((u64)hi << 32);
}
+struct mpwrq_cqe_bc {
+ __be16 filler_consumed_strides;
+ __be16 byte_cnt;
+};
+
+static inline u16 mpwrq_get_cqe_byte_cnt(struct mlx5_cqe64 *cqe)
+{
+ struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
+
+ return be16_to_cpu(bc->byte_cnt);
+}
+
+static inline u16 mpwrq_get_cqe_bc_consumed_strides(struct mpwrq_cqe_bc *bc)
+{
+ return 0x7fff & be16_to_cpu(bc->filler_consumed_strides);
+}
+
+static inline u16 mpwrq_get_cqe_consumed_strides(struct mlx5_cqe64 *cqe)
+{
+ struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
+
+ return mpwrq_get_cqe_bc_consumed_strides(bc);
+}
+
+static inline bool mpwrq_is_filler_cqe(struct mlx5_cqe64 *cqe)
+{
+ struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
+
+ return 0x8000 & be16_to_cpu(bc->filler_consumed_strides);
+}
+
+static inline u16 mpwrq_get_cqe_stride_index(struct mlx5_cqe64 *cqe)
+{
+ return be16_to_cpu(cqe->wqe_counter);
+}
+
enum {
CQE_L4_HDR_TYPE_NONE = 0x0,
CQE_L4_HDR_TYPE_TCP_NO_ACK = 0x1,
@@ -1289,6 +1383,18 @@ enum mlx5_cap_type {
#define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \
MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap)
+#define MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) \
+ MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_egress.cap)
+
+#define MLX5_CAP_ESW_EGRESS_ACL_MAX(mdev, cap) \
+ MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_egress.cap)
+
+#define MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) \
+ MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_ingress.cap)
+
+#define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \
+ MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap)
+
#define MLX5_CAP_ESW(mdev, cap) \
MLX5_GET(e_switch_cap, \
mdev->hca_caps_cur[MLX5_CAP_ESWITCH], cap)
@@ -1331,6 +1437,7 @@ enum {
MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5,
MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10,
MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11,
+ MLX5_PHYSICAL_LAYER_COUNTERS_GROUP = 0x12,
MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20,
};
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index dcd5ac8d3b14..80776d0c52dc 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -41,11 +41,17 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/radix-tree.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
#include <linux/mlx5/device.h>
#include <linux/mlx5/doorbell.h>
enum {
+ MLX5_RQ_BITMASK_VSD = 1 << 1,
+};
+
+enum {
MLX5_BOARD_ID_LEN = 64,
MLX5_MAX_NAME_LEN = 16,
};
@@ -112,9 +118,12 @@ enum {
MLX5_REG_PMPE = 0x5010,
MLX5_REG_PELC = 0x500e,
MLX5_REG_PVLC = 0x500f,
- MLX5_REG_PMLP = 0, /* TBD */
+ MLX5_REG_PCMR = 0x5041,
+ MLX5_REG_PMLP = 0x5002,
MLX5_REG_NODE_DESC = 0x6001,
MLX5_REG_HOST_ENDIANNESS = 0x7004,
+ MLX5_REG_MCIA = 0x9014,
+ MLX5_REG_MLCR = 0x902b,
};
enum {
@@ -304,6 +313,14 @@ struct mlx5_buf {
u8 page_shift;
};
+struct mlx5_eq_tasklet {
+ struct list_head list;
+ struct list_head process_list;
+ struct tasklet_struct task;
+ /* lock on completion tasklet list */
+ spinlock_t lock;
+};
+
struct mlx5_eq {
struct mlx5_core_dev *dev;
__be32 __iomem *doorbell;
@@ -317,6 +334,7 @@ struct mlx5_eq {
struct list_head list;
int index;
struct mlx5_rsc_debug *dbg;
+ struct mlx5_eq_tasklet tasklet_ctx;
};
struct mlx5_core_psv {
@@ -450,6 +468,17 @@ struct mlx5_irq_info {
char name[MLX5_MAX_IRQ_NAME];
};
+struct mlx5_fc_stats {
+ struct list_head list;
+ struct list_head addlist;
+ /* protect addlist add/splice operations */
+ spinlock_t addlist_lock;
+
+ struct workqueue_struct *wq;
+ struct delayed_work work;
+ unsigned long next_query;
+};
+
struct mlx5_eswitch;
struct mlx5_priv {
@@ -511,6 +540,10 @@ struct mlx5_priv {
unsigned long pci_dev_data;
struct mlx5_flow_root_namespace *root_ns;
struct mlx5_flow_root_namespace *fdb_root_ns;
+ struct mlx5_flow_root_namespace *esw_egress_root_ns;
+ struct mlx5_flow_root_namespace *esw_ingress_root_ns;
+
+ struct mlx5_fc_stats fc_stats;
};
enum mlx5_device_state {
@@ -519,8 +552,9 @@ enum mlx5_device_state {
};
enum mlx5_interface_state {
- MLX5_INTERFACE_STATE_DOWN,
- MLX5_INTERFACE_STATE_UP,
+ MLX5_INTERFACE_STATE_DOWN = BIT(0),
+ MLX5_INTERFACE_STATE_UP = BIT(1),
+ MLX5_INTERFACE_STATE_SHUTDOWN = BIT(2),
};
enum mlx5_pci_status {
@@ -544,7 +578,7 @@ struct mlx5_core_dev {
enum mlx5_device_state state;
/* sync interface state */
struct mutex intf_state_mutex;
- enum mlx5_interface_state interface_state;
+ unsigned long intf_state;
void (*event) (struct mlx5_core_dev *dev,
enum mlx5_dev_event event,
unsigned long param);
@@ -552,6 +586,9 @@ struct mlx5_core_dev {
struct mlx5_profile *profile;
atomic_t num_qps;
u32 issi;
+#ifdef CONFIG_RFS_ACCEL
+ struct cpu_rmap *rmap;
+#endif
};
struct mlx5_db {
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 8dec5508d93d..4b7a107d9c19 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -58,6 +58,8 @@ enum mlx5_flow_namespace_type {
MLX5_FLOW_NAMESPACE_LEFTOVERS,
MLX5_FLOW_NAMESPACE_ANCHOR,
MLX5_FLOW_NAMESPACE_FDB,
+ MLX5_FLOW_NAMESPACE_ESW_EGRESS,
+ MLX5_FLOW_NAMESPACE_ESW_INGRESS,
};
struct mlx5_flow_table;
@@ -71,6 +73,7 @@ struct mlx5_flow_destination {
u32 tir_num;
struct mlx5_flow_table *ft;
u32 vport_num;
+ struct mlx5_fc *counter;
};
};
@@ -82,12 +85,19 @@ struct mlx5_flow_table *
mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
int prio,
int num_flow_table_entries,
- int max_num_groups);
+ int max_num_groups,
+ u32 level);
struct mlx5_flow_table *
mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
int prio,
- int num_flow_table_entries);
+ int num_flow_table_entries,
+ u32 level);
+struct mlx5_flow_table *
+mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
+ int prio,
+ int num_flow_table_entries,
+ u32 level, u16 vport);
int mlx5_destroy_flow_table(struct mlx5_flow_table *ft);
/* inbox should be set with the following values:
@@ -113,4 +123,13 @@ mlx5_add_flow_rule(struct mlx5_flow_table *ft,
struct mlx5_flow_destination *dest);
void mlx5_del_flow_rule(struct mlx5_flow_rule *fr);
+int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
+ struct mlx5_flow_destination *dest);
+
+struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_rule *rule);
+struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
+void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
+void mlx5_fc_query_cached(struct mlx5_fc *counter,
+ u64 *bytes, u64 *packets, u64 *lastuse);
+
#endif
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index c15b8a864937..9a05cd7e5890 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -202,6 +202,9 @@ enum {
MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x936,
MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY = 0x937,
MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY = 0x938,
+ MLX5_CMD_OP_ALLOC_FLOW_COUNTER = 0x939,
+ MLX5_CMD_OP_DEALLOC_FLOW_COUNTER = 0x93a,
+ MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b,
MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c
};
@@ -265,7 +268,8 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
struct mlx5_ifc_flow_table_prop_layout_bits {
u8 ft_support[0x1];
- u8 reserved_at_1[0x2];
+ u8 reserved_at_1[0x1];
+ u8 flow_counter[0x1];
u8 flow_modify_en[0x1];
u8 modify_root[0x1];
u8 identified_miss_table_mode[0x1];
@@ -513,7 +517,9 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 max_lso_cap[0x5];
u8 reserved_at_10[0x4];
u8 rss_ind_tbl_cap[0x4];
- u8 reserved_at_18[0x3];
+ u8 reg_umr_sq[0x1];
+ u8 scatter_fcs[0x1];
+ u8 reserved_at_1a[0x1];
u8 tunnel_lso_const_out_ip_id[0x1];
u8 reserved_at_1c[0x2];
u8 tunnel_statless_gre[0x1];
@@ -648,7 +654,7 @@ struct mlx5_ifc_vector_calc_cap_bits {
enum {
MLX5_WQ_TYPE_LINKED_LIST = 0x0,
MLX5_WQ_TYPE_CYCLIC = 0x1,
- MLX5_WQ_TYPE_STRQ = 0x2,
+ MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ = 0x2,
};
enum {
@@ -750,21 +756,25 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 ets[0x1];
u8 nic_flow_table[0x1];
u8 eswitch_flow_table[0x1];
- u8 early_vf_enable;
- u8 reserved_at_1a8[0x2];
+ u8 early_vf_enable[0x1];
+ u8 reserved_at_1a9[0x2];
u8 local_ca_ack_delay[0x5];
- u8 reserved_at_1af[0x6];
+ u8 reserved_at_1af[0x2];
+ u8 ports_check[0x1];
+ u8 reserved_at_1b2[0x1];
+ u8 disable_link_up[0x1];
+ u8 beacon_led[0x1];
u8 port_type[0x2];
u8 num_ports[0x8];
- u8 reserved_at_1bf[0x3];
+ u8 reserved_at_1c0[0x3];
u8 log_max_msg[0x5];
- u8 reserved_at_1c7[0x4];
+ u8 reserved_at_1c8[0x4];
u8 max_tc[0x4];
- u8 reserved_at_1cf[0x6];
+ u8 reserved_at_1d0[0x6];
u8 rol_s[0x1];
u8 rol_g[0x1];
- u8 reserved_at_1d7[0x1];
+ u8 reserved_at_1d8[0x1];
u8 wol_s[0x1];
u8 wol_g[0x1];
u8 wol_a[0x1];
@@ -774,47 +784,48 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 wol_p[0x1];
u8 stat_rate_support[0x10];
- u8 reserved_at_1ef[0xc];
+ u8 reserved_at_1f0[0xc];
u8 cqe_version[0x4];
u8 compact_address_vector[0x1];
- u8 reserved_at_200[0x3];
+ u8 striding_rq[0x1];
+ u8 reserved_at_201[0x2];
u8 ipoib_basic_offloads[0x1];
- u8 reserved_at_204[0xa];
+ u8 reserved_at_205[0xa];
u8 drain_sigerr[0x1];
u8 cmdif_checksum[0x2];
u8 sigerr_cqe[0x1];
- u8 reserved_at_212[0x1];
+ u8 reserved_at_213[0x1];
u8 wq_signature[0x1];
u8 sctr_data_cqe[0x1];
- u8 reserved_at_215[0x1];
+ u8 reserved_at_216[0x1];
u8 sho[0x1];
u8 tph[0x1];
u8 rf[0x1];
u8 dct[0x1];
- u8 reserved_at_21a[0x1];
+ u8 reserved_at_21b[0x1];
u8 eth_net_offloads[0x1];
u8 roce[0x1];
u8 atomic[0x1];
- u8 reserved_at_21e[0x1];
+ u8 reserved_at_21f[0x1];
u8 cq_oi[0x1];
u8 cq_resize[0x1];
u8 cq_moderation[0x1];
- u8 reserved_at_222[0x3];
+ u8 reserved_at_223[0x3];
u8 cq_eq_remap[0x1];
u8 pg[0x1];
u8 block_lb_mc[0x1];
- u8 reserved_at_228[0x1];
+ u8 reserved_at_229[0x1];
u8 scqe_break_moderation[0x1];
- u8 reserved_at_22a[0x1];
+ u8 cq_period_start_from_cqe[0x1];
u8 cd[0x1];
- u8 reserved_at_22c[0x1];
+ u8 reserved_at_22d[0x1];
u8 apm[0x1];
u8 vector_calc[0x1];
- u8 reserved_at_22f[0x1];
+ u8 umr_ptr_rlky[0x1];
u8 imaicl[0x1];
- u8 reserved_at_231[0x4];
+ u8 reserved_at_232[0x4];
u8 qkv[0x1];
u8 pkv[0x1];
u8 set_deth_sqpn[0x1];
@@ -824,104 +835,109 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 uc[0x1];
u8 rc[0x1];
- u8 reserved_at_23f[0xa];
+ u8 reserved_at_240[0xa];
u8 uar_sz[0x6];
- u8 reserved_at_24f[0x8];
+ u8 reserved_at_250[0x8];
u8 log_pg_sz[0x8];
u8 bf[0x1];
- u8 reserved_at_260[0x1];
+ u8 reserved_at_261[0x1];
u8 pad_tx_eth_packet[0x1];
- u8 reserved_at_262[0x8];
+ u8 reserved_at_263[0x8];
u8 log_bf_reg_size[0x5];
- u8 reserved_at_26f[0x10];
+ u8 reserved_at_270[0x10];
- u8 reserved_at_27f[0x10];
+ u8 reserved_at_280[0x10];
u8 max_wqe_sz_sq[0x10];
- u8 reserved_at_29f[0x10];
+ u8 reserved_at_2a0[0x10];
u8 max_wqe_sz_rq[0x10];
- u8 reserved_at_2bf[0x10];
+ u8 reserved_at_2c0[0x10];
u8 max_wqe_sz_sq_dc[0x10];
- u8 reserved_at_2df[0x7];
+ u8 reserved_at_2e0[0x7];
u8 max_qp_mcg[0x19];
- u8 reserved_at_2ff[0x18];
+ u8 reserved_at_300[0x18];
u8 log_max_mcg[0x8];
- u8 reserved_at_31f[0x3];
+ u8 reserved_at_320[0x3];
u8 log_max_transport_domain[0x5];
- u8 reserved_at_327[0x3];
+ u8 reserved_at_328[0x3];
u8 log_max_pd[0x5];
- u8 reserved_at_32f[0xb];
+ u8 reserved_at_330[0xb];
u8 log_max_xrcd[0x5];
- u8 reserved_at_33f[0x20];
+ u8 reserved_at_340[0x20];
- u8 reserved_at_35f[0x3];
+ u8 reserved_at_360[0x3];
u8 log_max_rq[0x5];
- u8 reserved_at_367[0x3];
+ u8 reserved_at_368[0x3];
u8 log_max_sq[0x5];
- u8 reserved_at_36f[0x3];
+ u8 reserved_at_370[0x3];
u8 log_max_tir[0x5];
- u8 reserved_at_377[0x3];
+ u8 reserved_at_378[0x3];
u8 log_max_tis[0x5];
u8 basic_cyclic_rcv_wqe[0x1];
- u8 reserved_at_380[0x2];
+ u8 reserved_at_381[0x2];
u8 log_max_rmp[0x5];
- u8 reserved_at_387[0x3];
+ u8 reserved_at_388[0x3];
u8 log_max_rqt[0x5];
- u8 reserved_at_38f[0x3];
+ u8 reserved_at_390[0x3];
u8 log_max_rqt_size[0x5];
- u8 reserved_at_397[0x3];
+ u8 reserved_at_398[0x3];
u8 log_max_tis_per_sq[0x5];
- u8 reserved_at_39f[0x3];
+ u8 reserved_at_3a0[0x3];
u8 log_max_stride_sz_rq[0x5];
- u8 reserved_at_3a7[0x3];
+ u8 reserved_at_3a8[0x3];
u8 log_min_stride_sz_rq[0x5];
- u8 reserved_at_3af[0x3];
+ u8 reserved_at_3b0[0x3];
u8 log_max_stride_sz_sq[0x5];
- u8 reserved_at_3b7[0x3];
+ u8 reserved_at_3b8[0x3];
u8 log_min_stride_sz_sq[0x5];
- u8 reserved_at_3bf[0x1b];
+ u8 reserved_at_3c0[0x1b];
u8 log_max_wq_sz[0x5];
u8 nic_vport_change_event[0x1];
- u8 reserved_at_3e0[0xa];
+ u8 reserved_at_3e1[0xa];
u8 log_max_vlan_list[0x5];
- u8 reserved_at_3ef[0x3];
+ u8 reserved_at_3f0[0x3];
u8 log_max_current_mc_list[0x5];
- u8 reserved_at_3f7[0x3];
+ u8 reserved_at_3f8[0x3];
u8 log_max_current_uc_list[0x5];
- u8 reserved_at_3ff[0x80];
+ u8 reserved_at_400[0x80];
- u8 reserved_at_47f[0x3];
+ u8 reserved_at_480[0x3];
u8 log_max_l2_table[0x5];
- u8 reserved_at_487[0x8];
+ u8 reserved_at_488[0x8];
u8 log_uar_page_sz[0x10];
- u8 reserved_at_49f[0x20];
+ u8 reserved_at_4a0[0x20];
u8 device_frequency_mhz[0x20];
u8 device_frequency_khz[0x20];
- u8 reserved_at_4ff[0x5f];
- u8 cqe_zip[0x1];
- u8 cqe_zip_timeout[0x10];
- u8 cqe_zip_max_num[0x10];
+ u8 reserved_at_500[0x80];
- u8 reserved_at_57f[0x220];
+ u8 reserved_at_580[0x3f];
+ u8 cqe_compression[0x1];
+
+ u8 cqe_compression_timeout[0x10];
+ u8 cqe_compression_max_num[0x10];
+
+ u8 reserved_at_5e0[0x220];
};
enum mlx5_flow_destination_type {
MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0,
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2,
+
+ MLX5_FLOW_DESTINATION_TYPE_COUNTER = 0x100,
};
struct mlx5_ifc_dest_format_struct_bits {
@@ -931,6 +947,19 @@ struct mlx5_ifc_dest_format_struct_bits {
u8 reserved_at_20[0x20];
};
+struct mlx5_ifc_flow_counter_list_bits {
+ u8 reserved_at_0[0x10];
+ u8 flow_counter_id[0x10];
+
+ u8 reserved_at_20[0x20];
+};
+
+union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits {
+ struct mlx5_ifc_dest_format_struct_bits dest_format_struct;
+ struct mlx5_ifc_flow_counter_list_bits flow_counter_list;
+ u8 reserved_at_0[0x40];
+};
+
struct mlx5_ifc_fte_match_param_bits {
struct mlx5_ifc_fte_match_set_lyr_2_4_bits outer_headers;
@@ -997,7 +1026,13 @@ struct mlx5_ifc_wq_bits {
u8 reserved_at_118[0x3];
u8 log_wq_sz[0x5];
- u8 reserved_at_120[0x4e0];
+ u8 reserved_at_120[0x15];
+ u8 log_wqe_num_of_strides[0x3];
+ u8 two_byte_shift_en[0x1];
+ u8 reserved_at_139[0x4];
+ u8 log_wqe_stride_size[0x3];
+
+ u8 reserved_at_140[0x4c0];
struct mlx5_ifc_cmd_pas_bits pas[0];
};
@@ -1990,6 +2025,7 @@ enum {
MLX5_FLOW_CONTEXT_ACTION_ALLOW = 0x1,
MLX5_FLOW_CONTEXT_ACTION_DROP = 0x2,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 0x4,
+ MLX5_FLOW_CONTEXT_ACTION_COUNT = 0x8,
};
struct mlx5_ifc_flow_context_bits {
@@ -2006,13 +2042,16 @@ struct mlx5_ifc_flow_context_bits {
u8 reserved_at_80[0x8];
u8 destination_list_size[0x18];
- u8 reserved_at_a0[0x160];
+ u8 reserved_at_a0[0x8];
+ u8 flow_counter_list_size[0x18];
+
+ u8 reserved_at_c0[0x140];
struct mlx5_ifc_fte_match_param_bits match_value;
u8 reserved_at_1200[0x600];
- struct mlx5_ifc_dest_format_struct_bits destination[0];
+ union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits destination[0];
};
enum {
@@ -2196,7 +2235,8 @@ struct mlx5_ifc_sqc_bits {
u8 flush_in_error_en[0x1];
u8 reserved_at_4[0x4];
u8 state[0x4];
- u8 reserved_at_c[0x14];
+ u8 reg_umr[0x1];
+ u8 reserved_at_d[0x13];
u8 reserved_at_20[0x8];
u8 user_index[0x18];
@@ -2244,7 +2284,8 @@ enum {
struct mlx5_ifc_rqc_bits {
u8 rlky[0x1];
- u8 reserved_at_1[0x2];
+ u8 reserved_at_1[0x1];
+ u8 scatter_fcs[0x1];
u8 vsd[0x1];
u8 mem_rq_type[0x4];
u8 state[0x4];
@@ -2601,6 +2642,11 @@ enum {
MLX5_CQC_ST_FIRED = 0xa,
};
+enum {
+ MLX5_CQ_PERIOD_MODE_START_FROM_EQE = 0x0,
+ MLX5_CQ_PERIOD_MODE_START_FROM_CQE = 0x1,
+};
+
struct mlx5_ifc_cqc_bits {
u8 status[0x4];
u8 reserved_at_4[0x4];
@@ -2609,8 +2655,8 @@ struct mlx5_ifc_cqc_bits {
u8 reserved_at_c[0x1];
u8 scqe_break_moderation_en[0x1];
u8 oi[0x1];
- u8 reserved_at_f[0x2];
- u8 cqe_zip_en[0x1];
+ u8 cq_period_mode[0x2];
+ u8 cqe_comp_en[0x1];
u8 mini_cqe_res_format[0x2];
u8 st[0x4];
u8 reserved_at_18[0x8];
@@ -2984,7 +3030,11 @@ struct mlx5_ifc_set_fte_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x40];
+ u8 other_vport[0x1];
+ u8 reserved_at_41[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_at_60[0x20];
u8 table_type[0x8];
u8 reserved_at_88[0x18];
@@ -3910,6 +3960,34 @@ struct mlx5_ifc_query_flow_group_in_bits {
u8 reserved_at_e0[0x120];
};
+struct mlx5_ifc_query_flow_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+
+ struct mlx5_ifc_traffic_counter_bits flow_statistics[0];
+};
+
+struct mlx5_ifc_query_flow_counter_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x80];
+
+ u8 clear[0x1];
+ u8 reserved_at_c1[0xf];
+ u8 num_of_counters[0x10];
+
+ u8 reserved_at_e0[0x10];
+ u8 flow_counter_id[0x10];
+};
+
struct mlx5_ifc_query_esw_vport_context_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -5178,7 +5256,11 @@ struct mlx5_ifc_destroy_flow_table_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x40];
+ u8 other_vport[0x1];
+ u8 reserved_at_41[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_at_60[0x20];
u8 table_type[0x8];
u8 reserved_at_88[0x18];
@@ -5205,7 +5287,11 @@ struct mlx5_ifc_destroy_flow_group_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x40];
+ u8 other_vport[0x1];
+ u8 reserved_at_41[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_at_60[0x20];
u8 table_type[0x8];
u8 reserved_at_88[0x18];
@@ -5346,7 +5432,11 @@ struct mlx5_ifc_delete_fte_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x40];
+ u8 other_vport[0x1];
+ u8 reserved_at_41[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_at_60[0x20];
u8 table_type[0x8];
u8 reserved_at_88[0x18];
@@ -5471,6 +5561,28 @@ struct mlx5_ifc_dealloc_pd_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_dealloc_flow_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_dealloc_flow_counter_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 flow_counter_id[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
struct mlx5_ifc_create_xrc_srq_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -5792,7 +5904,11 @@ struct mlx5_ifc_create_flow_table_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x40];
+ u8 other_vport[0x1];
+ u8 reserved_at_41[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_at_60[0x20];
u8 table_type[0x8];
u8 reserved_at_88[0x18];
@@ -5836,7 +5952,11 @@ struct mlx5_ifc_create_flow_group_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x40];
+ u8 other_vport[0x1];
+ u8 reserved_at_41[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_at_60[0x20];
u8 table_type[0x8];
u8 reserved_at_88[0x18];
@@ -6190,6 +6310,28 @@ struct mlx5_ifc_alloc_pd_in_bits {
u8 reserved_at_40[0x40];
};
+struct mlx5_ifc_alloc_flow_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x10];
+ u8 flow_counter_id[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_alloc_flow_counter_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+};
+
struct mlx5_ifc_add_vxlan_udp_dport_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -6369,6 +6511,17 @@ struct mlx5_ifc_ptys_reg_bits {
u8 reserved_at_1a0[0x60];
};
+struct mlx5_ifc_mlcr_reg_bits {
+ u8 reserved_at_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_at_10[0x20];
+
+ u8 beacon_duration[0x10];
+ u8 reserved_at_40[0x10];
+
+ u8 beacon_remain[0x10];
+};
+
struct mlx5_ifc_ptas_reg_bits {
u8 reserved_at_0[0x20];
@@ -6778,6 +6931,16 @@ struct mlx5_ifc_pamp_reg_bits {
u8 index_data[18][0x10];
};
+struct mlx5_ifc_pcmr_reg_bits {
+ u8 reserved_at_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_at_10[0x2e];
+ u8 fcs_cap[0x1];
+ u8 reserved_at_3f[0x1f];
+ u8 fcs_chk[0x1];
+ u8 reserved_at_5f[0x1];
+};
+
struct mlx5_ifc_lane_2_module_mapping_bits {
u8 reserved_at_0[0x6];
u8 rx_lane[0x2];
@@ -7114,6 +7277,7 @@ union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_pspa_reg_bits pspa_reg;
struct mlx5_ifc_ptas_reg_bits ptas_reg;
struct mlx5_ifc_ptys_reg_bits ptys_reg;
+ struct mlx5_ifc_mlcr_reg_bits mlcr_reg;
struct mlx5_ifc_pude_reg_bits pude_reg;
struct mlx5_ifc_pvlc_reg_bits pvlc_reg;
struct mlx5_ifc_slrg_reg_bits slrg_reg;
@@ -7147,7 +7311,11 @@ struct mlx5_ifc_set_flow_table_root_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x40];
+ u8 other_vport[0x1];
+ u8 reserved_at_41[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_at_60[0x20];
u8 table_type[0x8];
u8 reserved_at_88[0x18];
@@ -7178,7 +7346,9 @@ struct mlx5_ifc_modify_flow_table_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x20];
+ u8 other_vport[0x1];
+ u8 reserved_at_41[0xf];
+ u8 vport_number[0x10];
u8 reserved_at_60[0x10];
u8 modify_field_select[0x10];
@@ -7244,4 +7414,34 @@ struct mlx5_ifc_qtct_reg_bits {
u8 tclass[0x3];
};
+struct mlx5_ifc_mcia_reg_bits {
+ u8 l[0x1];
+ u8 reserved_at_1[0x7];
+ u8 module[0x8];
+ u8 reserved_at_10[0x8];
+ u8 status[0x8];
+
+ u8 i2c_device_address[0x8];
+ u8 page_number[0x8];
+ u8 device_address[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 size[0x10];
+
+ u8 reserved_at_60[0x20];
+
+ u8 dword_0[0x20];
+ u8 dword_1[0x20];
+ u8 dword_2[0x20];
+ u8 dword_3[0x20];
+ u8 dword_4[0x20];
+ u8 dword_5[0x20];
+ u8 dword_6[0x20];
+ u8 dword_7[0x20];
+ u8 dword_8[0x20];
+ u8 dword_9[0x20];
+ u8 dword_10[0x20];
+ u8 dword_11[0x20];
+};
+
#endif /* MLX5_IFC_H */
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
index a1d145abd4eb..9851862c0ec5 100644
--- a/include/linux/mlx5/port.h
+++ b/include/linux/mlx5/port.h
@@ -35,6 +35,24 @@
#include <linux/mlx5/driver.h>
+enum mlx5_beacon_duration {
+ MLX5_BEACON_DURATION_OFF = 0x0,
+ MLX5_BEACON_DURATION_INF = 0xffff,
+};
+
+enum mlx5_module_id {
+ MLX5_MODULE_ID_SFP = 0x3,
+ MLX5_MODULE_ID_QSFP = 0xC,
+ MLX5_MODULE_ID_QSFP_PLUS = 0xD,
+ MLX5_MODULE_ID_QSFP28 = 0x11,
+};
+
+#define MLX5_EEPROM_MAX_BYTES 32
+#define MLX5_EEPROM_IDENTIFIER_BYTE_MASK 0x000000ff
+#define MLX5_I2C_ADDR_LOW 0x50
+#define MLX5_I2C_ADDR_HIGH 0x51
+#define MLX5_EEPROM_PAGE_LENGTH 256
+
int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
int ptys_size, int proto_mask, u8 local_port);
@@ -53,10 +71,11 @@ int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status status);
int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status *status);
+int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration);
-int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port);
-void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port);
-void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port);
+void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port);
+void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
u8 port);
int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
@@ -84,4 +103,10 @@ int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode);
int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode);
+int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable);
+void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
+ bool *enabled);
+int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
+ u16 offset, u16 size, u8 *data);
+
#endif /* __MLX5_PORT_H__ */
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index cf031a3f16c5..64221027bf1f 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -668,6 +668,12 @@ int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
struct mlx5_core_qp *sq);
void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
struct mlx5_core_qp *sq);
+int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id);
+int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id);
+int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
+ int reset, void *out, int out_size);
+int mlx5_core_query_out_of_buffer(struct mlx5_core_dev *dev, u16 counter_id,
+ u32 *out_of_buffer);
static inline const char *mlx5_qp_type_str(int type)
{
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index bd93e6323603..301da4a5e6bf 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -45,6 +45,8 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
u16 vport, u8 *addr);
int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
u16 vport, u8 *addr);
+int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu);
+int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu);
int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
u64 *system_image_guid);
int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a55e5be0894f..5df5feb49575 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -72,6 +72,10 @@ extern int mmap_rnd_compat_bits __read_mostly;
#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
#endif
+#ifndef page_to_virt
+#define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x)))
+#endif
+
/*
* To prevent common memory management code establishing
* a zero page mapping on a read fault.
@@ -299,6 +303,12 @@ struct vm_fault {
* is set (which is also implied by
* VM_FAULT_ERROR).
*/
+ void *entry; /* ->fault handler can alternatively
+ * return locked DAX entry. In that
+ * case handler should return
+ * VM_FAULT_DAX_LOCKED and fill in
+ * entry here.
+ */
/* for ->map_pages() only */
pgoff_t max_pgoff; /* map pages for offset from pgoff till
* max_pgoff inclusive */
@@ -443,14 +453,14 @@ unsigned long vmalloc_to_pfn(const void *addr);
* On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
* is no special casing required.
*/
-static inline int is_vmalloc_addr(const void *x)
+static inline bool is_vmalloc_addr(const void *x)
{
#ifdef CONFIG_MMU
unsigned long addr = (unsigned long)x;
return addr >= VMALLOC_START && addr < VMALLOC_END;
#else
- return 0;
+ return false;
#endif
}
#ifdef CONFIG_MMU
@@ -471,8 +481,7 @@ static inline atomic_t *compound_mapcount_ptr(struct page *page)
static inline int compound_mapcount(struct page *page)
{
- if (!PageCompound(page))
- return 0;
+ VM_BUG_ON_PAGE(!PageCompound(page), page);
page = compound_head(page);
return atomic_read(compound_mapcount_ptr(page)) + 1;
}
@@ -500,11 +509,20 @@ static inline int page_mapcount(struct page *page)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
int total_mapcount(struct page *page);
+int page_trans_huge_mapcount(struct page *page, int *total_mapcount);
#else
static inline int total_mapcount(struct page *page)
{
return page_mapcount(page);
}
+static inline int page_trans_huge_mapcount(struct page *page,
+ int *total_mapcount)
+{
+ int mapcount = page_mapcount(page);
+ if (total_mapcount)
+ *total_mapcount = mapcount;
+ return mapcount;
+}
#endif
static inline struct page *virt_to_head_page(const void *x)
@@ -584,7 +602,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
}
void do_set_pte(struct vm_area_struct *vma, unsigned long address,
- struct page *page, pte_t *pte, bool write, bool anon);
+ struct page *page, pte_t *pte, bool write, bool anon, bool old);
#endif
/*
@@ -721,7 +739,7 @@ static inline void get_page(struct page *page)
page = compound_head(page);
/*
* Getting a normal page or the head of a compound page
- * requires to already have an elevated page->_count.
+ * requires to already have an elevated page->_refcount.
*/
VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page);
page_ref_inc(page);
@@ -837,10 +855,7 @@ extern int page_cpupid_xchg_last(struct page *page, int cpupid);
static inline void page_cpupid_reset_last(struct page *page)
{
- int cpupid = (1 << LAST_CPUPID_SHIFT) - 1;
-
- page->flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
- page->flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
+ page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
}
#endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
#else /* !CONFIG_NUMA_BALANCING */
@@ -948,7 +963,7 @@ static inline struct mem_cgroup *page_memcg(struct page *page)
static __always_inline void *lowmem_page_address(const struct page *page)
{
- return __va(PFN_PHYS(page_to_pfn(page)));
+ return page_to_virt(page);
}
#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
@@ -1019,24 +1034,7 @@ static inline pgoff_t page_file_index(struct page *page)
return page->index;
}
-/*
- * Return true if this page is mapped into pagetables.
- * For compound page it returns true if any subpage of compound page is mapped.
- */
-static inline bool page_mapped(struct page *page)
-{
- int i;
- if (likely(!PageCompound(page)))
- return atomic_read(&page->_mapcount) >= 0;
- page = compound_head(page);
- if (atomic_read(compound_mapcount_ptr(page)) >= 0)
- return true;
- for (i = 0; i < hpage_nr_pages(page); i++) {
- if (atomic_read(&page[i]._mapcount) >= 0)
- return true;
- }
- return false;
-}
+bool page_mapped(struct page *page);
/*
* Return true only if the page has been allocated with
@@ -1084,6 +1082,7 @@ static inline void clear_page_pfmemalloc(struct page *page)
#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */
#define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */
#define VM_FAULT_FALLBACK 0x0800 /* huge page fault failed, fall back to small */
+#define VM_FAULT_DAX_LOCKED 0x1000 /* ->fault has locked DAX entry */
#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
@@ -1138,6 +1137,8 @@ struct zap_details {
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
pte_t pte);
+struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
+ pmd_t pmd);
int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
unsigned long size);
@@ -1769,7 +1770,7 @@ extern void free_highmem_page(struct page *page);
extern void adjust_managed_page_count(struct page *page, long count);
extern void mem_init_print_info(const char *str);
-extern void reserve_bootmem_region(unsigned long start, unsigned long end);
+extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end);
/* Free the reserved page into the buddy system, so it gets managed. */
static inline void __free_reserved_page(struct page *page)
@@ -2017,9 +2018,9 @@ static inline void mm_populate(unsigned long addr, unsigned long len) {}
#endif
/* These take the mm semaphore themselves */
-extern unsigned long vm_brk(unsigned long, unsigned long);
+extern int __must_check vm_brk(unsigned long, unsigned long);
extern int vm_munmap(unsigned long, size_t);
-extern unsigned long vm_mmap(struct file *, unsigned long,
+extern unsigned long __must_check vm_mmap(struct file *, unsigned long,
unsigned long, unsigned long,
unsigned long, unsigned long);
@@ -2392,6 +2393,9 @@ static inline bool page_is_guard(struct page *page)
return false;
page_ext = lookup_page_ext(page);
+ if (unlikely(!page_ext))
+ return false;
+
return test_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
}
#else
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 712e8c37a200..5bd29ba4f174 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -22,22 +22,34 @@ static inline int page_is_file_cache(struct page *page)
return !PageSwapBacked(page);
}
+static __always_inline void __update_lru_size(struct lruvec *lruvec,
+ enum lru_list lru, int nr_pages)
+{
+ __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, nr_pages);
+}
+
+static __always_inline void update_lru_size(struct lruvec *lruvec,
+ enum lru_list lru, int nr_pages)
+{
+#ifdef CONFIG_MEMCG
+ mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
+#else
+ __update_lru_size(lruvec, lru, nr_pages);
+#endif
+}
+
static __always_inline void add_page_to_lru_list(struct page *page,
struct lruvec *lruvec, enum lru_list lru)
{
- int nr_pages = hpage_nr_pages(page);
- mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
+ update_lru_size(lruvec, lru, hpage_nr_pages(page));
list_add(&page->lru, &lruvec->lists[lru]);
- __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, nr_pages);
}
static __always_inline void del_page_from_lru_list(struct page *page,
struct lruvec *lruvec, enum lru_list lru)
{
- int nr_pages = hpage_nr_pages(page);
- mem_cgroup_update_lru_size(lruvec, lru, -nr_pages);
list_del(&page->lru);
- __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, -nr_pages);
+ update_lru_size(lruvec, lru, -hpage_nr_pages(page));
}
/**
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index c2d75b4fa86c..ca3e517980a0 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -12,6 +12,7 @@
#include <linux/cpumask.h>
#include <linux/uprobes.h>
#include <linux/page-flags-layout.h>
+#include <linux/workqueue.h>
#include <asm/page.h>
#include <asm/mmu.h>
@@ -73,9 +74,9 @@ struct page {
unsigned long counters;
#else
/*
- * Keep _count separate from slub cmpxchg_double data.
- * As the rest of the double word is protected by
- * slab_lock but _count is not.
+ * Keep _refcount separate from slub cmpxchg_double
+ * data. As the rest of the double word is protected by
+ * slab_lock but _refcount is not.
*/
unsigned counters;
#endif
@@ -97,7 +98,11 @@ struct page {
};
int units; /* SLOB */
};
- atomic_t _count; /* Usage count, see below. */
+ /*
+ * Usage count, *USE WRAPPER FUNCTION*
+ * when manual accounting. See page_ref.h
+ */
+ atomic_t _refcount;
};
unsigned int active; /* SLAB */
};
@@ -248,7 +253,7 @@ struct page_frag_cache {
__u32 offset;
#endif
/* we maintain a pagecount bias, so that we dont dirty cache line
- * containing page->_count every time we allocate a fragment.
+ * containing page->_refcount every time we allocate a fragment.
*/
unsigned int pagecnt_bias;
bool pfmemalloc;
@@ -509,6 +514,9 @@ struct mm_struct {
#ifdef CONFIG_HUGETLB_PAGE
atomic_long_t hugetlb_usage;
#endif
+#ifdef CONFIG_MMU
+ struct work_struct async_put_work;
+#endif
};
static inline void mm_init_cpumask(struct mm_struct *mm)
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index 7b41c6db1bb6..f7ed271a1d54 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -36,7 +36,6 @@ enum {
EVENT_XFER_COMPLETE,
EVENT_DATA_COMPLETE,
EVENT_DATA_ERROR,
- EVENT_XFER_ERROR
};
struct mmc_data;
@@ -55,6 +54,7 @@ struct dw_mci_dma_slave {
/**
* struct dw_mci - MMC controller state shared between all slots
* @lock: Spinlock protecting the queue and associated data.
+ * @irq_lock: Spinlock protecting the INTMASK setting.
* @regs: Pointer to MMIO registers.
* @fifo_reg: Pointer to MMIO registers for data FIFO
* @sg: Scatterlist entry currently being processed by PIO code, if any.
@@ -65,6 +65,9 @@ struct dw_mci_dma_slave {
* @cmd: The command currently being sent to the card, or NULL.
* @data: The data currently being transferred, or NULL if no data
* transfer is in progress.
+ * @stop_abort: The command currently prepared for stoping transfer.
+ * @prev_blksz: The former transfer blksz record.
+ * @timing: Record of current ios timing.
* @use_dma: Whether DMA channel is initialized or not.
* @using_dma: Whether DMA is in use for the current transfer.
* @dma_64bit_address: Whether DMA supports 64-bit address mode or not.
@@ -72,7 +75,10 @@ struct dw_mci_dma_slave {
* @sg_cpu: Virtual address of DMA buffer.
* @dma_ops: Pointer to platform-specific DMA callbacks.
* @cmd_status: Snapshot of SR taken upon completion of the current
+ * @ring_size: Buffer size for idma descriptors.
* command. Only valid when EVENT_CMD_COMPLETE is pending.
+ * @dms: structure of slave-dma private data.
+ * @phy_regs: physical address of controller's register map
* @data_status: Snapshot of SR taken upon completion of the current
* data transfer. Only valid when EVENT_DATA_COMPLETE or
* EVENT_DATA_ERROR is pending.
@@ -80,7 +86,6 @@ struct dw_mci_dma_slave {
* to be sent.
* @dir_status: Direction of current transfer.
* @tasklet: Tasklet running the request state machine.
- * @card_tasklet: Tasklet handling card detect.
* @pending_events: Bitmask of events flagged by the interrupt handler
* to be processed by the tasklet.
* @completed_events: Bitmask of events which the state machine has
@@ -91,6 +96,7 @@ struct dw_mci_dma_slave {
* rate and timeout calculations.
* @current_speed: Configured rate of the controller.
* @num_slots: Number of slots available.
+ * @fifoth_val: The value of FIFOTH register.
* @verid: Denote Version ID.
* @dev: Device associated with the MMC controller.
* @pdata: Platform data associated with the MMC controller.
@@ -107,9 +113,11 @@ struct dw_mci_dma_slave {
* @push_data: Pointer to FIFO push function.
* @pull_data: Pointer to FIFO pull function.
* @quirks: Set of quirks that apply to specific versions of the IP.
+ * @vqmmc_enabled: Status of vqmmc, should be true or false.
* @irq_flags: The flags to be passed to request_irq.
* @irq: The irq value to be passed to request_irq.
* @sdio_id0: Number of slot0 in the SDIO interrupt registers.
+ * @cmd11_timer: Timer for SD3.0 voltage switch over scheme.
* @dto_timer: Timer for broken data transfer over scheme.
*
* Locking
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 8dd4d290ab0d..45cde8cd39f2 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -93,28 +93,39 @@ struct mmc_host_ops {
void (*pre_req)(struct mmc_host *host, struct mmc_request *req,
bool is_first_req);
void (*request)(struct mmc_host *host, struct mmc_request *req);
+
+ /*
+ * Avoid calling the next three functions too often or in a "fast
+ * path", since underlaying controller might implement them in an
+ * expensive and/or slow way. Also note that these functions might
+ * sleep, so don't call them in the atomic contexts!
+ */
+
+ /*
+ * Notes to the set_ios callback:
+ * ios->clock might be 0. For some controllers, setting 0Hz
+ * as any other frequency works. However, some controllers
+ * explicitly need to disable the clock. Otherwise e.g. voltage
+ * switching might fail because the SDCLK is not really quiet.
+ */
+ void (*set_ios)(struct mmc_host *host, struct mmc_ios *ios);
+
/*
- * Avoid calling these three functions too often or in a "fast path",
- * since underlaying controller might implement them in an expensive
- * and/or slow way.
- *
- * Also note that these functions might sleep, so don't call them
- * in the atomic contexts!
- *
* Return values for the get_ro callback should be:
* 0 for a read/write card
* 1 for a read-only card
* -ENOSYS when not supported (equal to NULL callback)
* or a negative errno value when something bad happened
- *
+ */
+ int (*get_ro)(struct mmc_host *host);
+
+ /*
* Return values for the get_cd callback should be:
* 0 for a absent card
* 1 for a present card
* -ENOSYS when not supported (equal to NULL callback)
* or a negative errno value when something bad happened
*/
- void (*set_ios)(struct mmc_host *host, struct mmc_ios *ios);
- int (*get_ro)(struct mmc_host *host);
int (*get_cd)(struct mmc_host *host);
void (*enable_sdio_irq)(struct mmc_host *host, int enable);
@@ -318,6 +329,7 @@ struct mmc_host {
unsigned int can_retune:1; /* re-tuning can be used */
unsigned int doing_retune:1; /* re-tuning in progress */
unsigned int retune_now:1; /* do re-tuning at next req */
+ unsigned int retune_paused:1; /* re-tuning is temporarily disabled */
int rescan_disable; /* disable card detection */
int rescan_entered; /* used with nonremovable devices */
@@ -515,4 +527,7 @@ static inline void mmc_retune_recheck(struct mmc_host *host)
host->retune_now = 1;
}
+void mmc_retune_pause(struct mmc_host *host);
+void mmc_retune_unpause(struct mmc_host *host);
+
#endif /* LINUX_MMC_HOST_H */
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index 83430f2ea757..0d126aeb3ec0 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -36,6 +36,7 @@
#define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6
#define SDIO_DEVICE_ID_BROADCOM_4345 0x4345
#define SDIO_DEVICE_ID_BROADCOM_4354 0x4354
+#define SDIO_DEVICE_ID_BROADCOM_4356 0x4356
#define SDIO_VENDOR_ID_INTEL 0x0089
#define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX 0x1402
diff --git a/include/linux/mmc/sh_mobile_sdhi.h b/include/linux/mmc/sh_mobile_sdhi.h
deleted file mode 100644
index 95d6f0314a7d..000000000000
--- a/include/linux/mmc/sh_mobile_sdhi.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef LINUX_MMC_SH_MOBILE_SDHI_H
-#define LINUX_MMC_SH_MOBILE_SDHI_H
-
-#include <linux/types.h>
-
-#define SH_MOBILE_SDHI_IRQ_CARD_DETECT "card_detect"
-#define SH_MOBILE_SDHI_IRQ_SDCARD "sdcard"
-#define SH_MOBILE_SDHI_IRQ_SDIO "sdio"
-
-#endif /* LINUX_MMC_SH_MOBILE_SDHI_H */
diff --git a/include/linux/mmc/tmio.h b/include/linux/mmc/tmio.h
deleted file mode 100644
index 5f5cd80e9765..000000000000
--- a/include/linux/mmc/tmio.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * include/linux/mmc/tmio.h
- *
- * Copyright (C) 2016 Sang Engineering, Wolfram Sang
- * Copyright (C) 2015-16 Renesas Electronics Corporation
- * Copyright (C) 2007 Ian Molton
- * Copyright (C) 2004 Ian Molton
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Driver for the MMC / SD / SDIO cell found in:
- *
- * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3
- */
-#ifndef LINUX_MMC_TMIO_H
-#define LINUX_MMC_TMIO_H
-
-#define CTL_SD_CMD 0x00
-#define CTL_ARG_REG 0x04
-#define CTL_STOP_INTERNAL_ACTION 0x08
-#define CTL_XFER_BLK_COUNT 0xa
-#define CTL_RESPONSE 0x0c
-#define CTL_STATUS 0x1c
-#define CTL_STATUS2 0x1e
-#define CTL_IRQ_MASK 0x20
-#define CTL_SD_CARD_CLK_CTL 0x24
-#define CTL_SD_XFER_LEN 0x26
-#define CTL_SD_MEM_CARD_OPT 0x28
-#define CTL_SD_ERROR_DETAIL_STATUS 0x2c
-#define CTL_SD_DATA_PORT 0x30
-#define CTL_TRANSACTION_CTL 0x34
-#define CTL_SDIO_STATUS 0x36
-#define CTL_SDIO_IRQ_MASK 0x38
-#define CTL_DMA_ENABLE 0xd8
-#define CTL_RESET_SD 0xe0
-#define CTL_VERSION 0xe2
-#define CTL_SDIO_REGS 0x100
-#define CTL_CLK_AND_WAIT_CTL 0x138
-#define CTL_RESET_SDIO 0x1e0
-
-/* Definitions for values the CTRL_STATUS register can take. */
-#define TMIO_STAT_CMDRESPEND 0x00000001
-#define TMIO_STAT_DATAEND 0x00000004
-#define TMIO_STAT_CARD_REMOVE 0x00000008
-#define TMIO_STAT_CARD_INSERT 0x00000010
-#define TMIO_STAT_SIGSTATE 0x00000020
-#define TMIO_STAT_WRPROTECT 0x00000080
-#define TMIO_STAT_CARD_REMOVE_A 0x00000100
-#define TMIO_STAT_CARD_INSERT_A 0x00000200
-#define TMIO_STAT_SIGSTATE_A 0x00000400
-#define TMIO_STAT_CMD_IDX_ERR 0x00010000
-#define TMIO_STAT_CRCFAIL 0x00020000
-#define TMIO_STAT_STOPBIT_ERR 0x00040000
-#define TMIO_STAT_DATATIMEOUT 0x00080000
-#define TMIO_STAT_RXOVERFLOW 0x00100000
-#define TMIO_STAT_TXUNDERRUN 0x00200000
-#define TMIO_STAT_CMDTIMEOUT 0x00400000
-#define TMIO_STAT_RXRDY 0x01000000
-#define TMIO_STAT_TXRQ 0x02000000
-#define TMIO_STAT_ILL_FUNC 0x20000000
-#define TMIO_STAT_CMD_BUSY 0x40000000
-#define TMIO_STAT_ILL_ACCESS 0x80000000
-
-#define CLK_CTL_DIV_MASK 0xff
-#define CLK_CTL_SCLKEN BIT(8)
-
-#define TMIO_BBS 512 /* Boot block size */
-
-#endif /* LINUX_MMC_TMIO_H */
diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h
index 70fffeba7495..a4441784503b 100644
--- a/include/linux/mmu_context.h
+++ b/include/linux/mmu_context.h
@@ -1,9 +1,16 @@
#ifndef _LINUX_MMU_CONTEXT_H
#define _LINUX_MMU_CONTEXT_H
+#include <asm/mmu_context.h>
+
struct mm_struct;
void use_mm(struct mm_struct *mm);
void unuse_mm(struct mm_struct *mm);
+/* Architectures that care about IRQ state in switch_mm can override this. */
+#ifndef switch_mm_irqs_off
+# define switch_mm_irqs_off switch_mm
+#endif
+
#endif
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index c60df9257cc7..02069c23486d 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -85,13 +85,6 @@ extern int page_group_by_mobility_disabled;
get_pfnblock_flags_mask(page, page_to_pfn(page), \
PB_migrate_end, MIGRATETYPE_MASK)
-static inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn)
-{
- BUILD_BUG_ON(PB_migrate_end - PB_migrate != 2);
- return get_pfnblock_flags_mask(page, pfn, PB_migrate_end,
- MIGRATETYPE_MASK);
-}
-
struct free_area {
struct list_head free_list[MIGRATE_TYPES];
unsigned long nr_free;
@@ -746,8 +739,12 @@ static inline bool is_dev_zone(const struct zone *zone)
extern struct mutex zonelists_mutex;
void build_all_zonelists(pg_data_t *pgdat, struct zone *zone);
void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
+bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
+ int classzone_idx, unsigned int alloc_flags,
+ long free_pages);
bool zone_watermark_ok(struct zone *z, unsigned int order,
- unsigned long mark, int classzone_idx, int alloc_flags);
+ unsigned long mark, int classzone_idx,
+ unsigned int alloc_flags);
bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
unsigned long mark, int classzone_idx);
enum memmap_context {
@@ -828,10 +825,7 @@ static inline int is_highmem_idx(enum zone_type idx)
static inline int is_highmem(struct zone *zone)
{
#ifdef CONFIG_HIGHMEM
- int zone_off = (char *)zone - (char *)zone->zone_pgdat->node_zones;
- return zone_off == ZONE_HIGHMEM * sizeof(*zone) ||
- (zone_off == ZONE_MOVABLE * sizeof(*zone) &&
- zone_movable_is_highmem());
+ return is_highmem_idx(zone_idx(zone));
#else
return 0;
#endif
@@ -922,6 +916,10 @@ static inline int zonelist_node_idx(struct zoneref *zoneref)
#endif /* CONFIG_NUMA */
}
+struct zoneref *__next_zones_zonelist(struct zoneref *z,
+ enum zone_type highest_zoneidx,
+ nodemask_t *nodes);
+
/**
* next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point
* @z - The cursor used as a starting point for the search
@@ -934,9 +932,14 @@ static inline int zonelist_node_idx(struct zoneref *zoneref)
* being examined. It should be advanced by one before calling
* next_zones_zonelist again.
*/
-struct zoneref *next_zones_zonelist(struct zoneref *z,
+static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
enum zone_type highest_zoneidx,
- nodemask_t *nodes);
+ nodemask_t *nodes)
+{
+ if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx))
+ return z;
+ return __next_zones_zonelist(z, highest_zoneidx, nodes);
+}
/**
* first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist
@@ -952,13 +955,10 @@ struct zoneref *next_zones_zonelist(struct zoneref *z,
*/
static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
enum zone_type highest_zoneidx,
- nodemask_t *nodes,
- struct zone **zone)
+ nodemask_t *nodes)
{
- struct zoneref *z = next_zones_zonelist(zonelist->_zonerefs,
+ return next_zones_zonelist(zonelist->_zonerefs,
highest_zoneidx, nodes);
- *zone = zonelist_zone(z);
- return z;
}
/**
@@ -973,10 +973,17 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
* within a given nodemask
*/
#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
- for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \
+ for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \
+ zone; \
+ z = next_zones_zonelist(++z, highidx, nodemask), \
+ zone = zonelist_zone(z))
+
+#define for_next_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
+ for (zone = z->zone; \
zone; \
z = next_zones_zonelist(++z, highidx, nodemask), \
- zone = zonelist_zone(z)) \
+ zone = zonelist_zone(z))
+
/**
* for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index
@@ -1056,7 +1063,7 @@ struct mem_section {
unsigned long *pageblock_flags;
#ifdef CONFIG_PAGE_EXTENSION
/*
- * If !SPARSEMEM, pgdat doesn't have page_ext pointer. We use
+ * If SPARSEMEM, pgdat doesn't have page_ext pointer. We use
* section. (see page_ext.h about this.)
*/
struct page_ext *page_ext;
diff --git a/include/linux/module.h b/include/linux/module.h
index 2bb0c3085706..3daf2b3a09d2 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -330,6 +330,15 @@ struct mod_kallsyms {
char *strtab;
};
+#ifdef CONFIG_LIVEPATCH
+struct klp_modinfo {
+ Elf_Ehdr hdr;
+ Elf_Shdr *sechdrs;
+ char *secstrings;
+ unsigned int symndx;
+};
+#endif
+
struct module {
enum module_state state;
@@ -456,7 +465,11 @@ struct module {
#endif
#ifdef CONFIG_LIVEPATCH
+ bool klp; /* Is this a livepatch module? */
bool klp_alive;
+
+ /* Elf information */
+ struct klp_modinfo *klp_info;
#endif
#ifdef CONFIG_MODULE_UNLOAD
@@ -630,6 +643,18 @@ static inline bool module_requested_async_probing(struct module *module)
return module && module->async_probe_requested;
}
+#ifdef CONFIG_LIVEPATCH
+static inline bool is_livepatch_module(struct module *mod)
+{
+ return mod->klp;
+}
+#else /* !CONFIG_LIVEPATCH */
+static inline bool is_livepatch_module(struct module *mod)
+{
+ return false;
+}
+#endif /* CONFIG_LIVEPATCH */
+
#else /* !CONFIG_MODULES... */
/* Given an address, look for it in the exception tables. */
diff --git a/include/linux/mtd/fsmc.h b/include/linux/mtd/fsmc.h
index c8be32e9fc49..ad3c3488073c 100644
--- a/include/linux/mtd/fsmc.h
+++ b/include/linux/mtd/fsmc.h
@@ -103,24 +103,6 @@
#define FSMC_BUSY_WAIT_TIMEOUT (1 * HZ)
-/*
- * There are 13 bytes of ecc for every 512 byte block in FSMC version 8
- * and it has to be read consecutively and immediately after the 512
- * byte data block for hardware to generate the error bit offsets
- * Managing the ecc bytes in the following way is easier. This way is
- * similar to oobfree structure maintained already in u-boot nand driver
- */
-#define MAX_ECCPLACE_ENTRIES 32
-
-struct fsmc_nand_eccplace {
- uint8_t offset;
- uint8_t length;
-};
-
-struct fsmc_eccplace {
- struct fsmc_nand_eccplace eccplace[MAX_ECCPLACE_ENTRIES];
-};
-
struct fsmc_nand_timings {
uint8_t tclr;
uint8_t tar;
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index 5e0eb7ccabd4..3aa56e3104bb 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -122,18 +122,13 @@
#endif
#ifdef CONFIG_MTD_MAP_BANK_WIDTH_32
-# ifdef map_bankwidth
-# undef map_bankwidth
-# define map_bankwidth(map) ((map)->bankwidth)
-# undef map_bankwidth_is_large
-# define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8)
-# undef map_words
-# define map_words(map) map_calc_words(map)
-# else
-# define map_bankwidth(map) 32
-# define map_bankwidth_is_large(map) (1)
-# define map_words(map) map_calc_words(map)
-# endif
+/* always use indirect access for 256-bit to preserve kernel stack */
+# undef map_bankwidth
+# define map_bankwidth(map) ((map)->bankwidth)
+# undef map_bankwidth_is_large
+# define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8)
+# undef map_words
+# define map_words(map) map_calc_words(map)
#define map_bankwidth_is_32(map) (map_bankwidth(map) == 32)
#undef MAX_MAP_BANKWIDTH
#define MAX_MAP_BANKWIDTH 32
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 771272187316..29a170612203 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -96,16 +96,35 @@ struct mtd_oob_ops {
#define MTD_MAX_OOBFREE_ENTRIES_LARGE 32
#define MTD_MAX_ECCPOS_ENTRIES_LARGE 640
+/**
+ * struct mtd_oob_region - oob region definition
+ * @offset: region offset
+ * @length: region length
+ *
+ * This structure describes a region of the OOB area, and is used
+ * to retrieve ECC or free bytes sections.
+ * Each section is defined by an offset within the OOB area and a
+ * length.
+ */
+struct mtd_oob_region {
+ u32 offset;
+ u32 length;
+};
+
/*
- * Internal ECC layout control structure. For historical reasons, there is a
- * similar, smaller struct nand_ecclayout_user (in mtd-abi.h) that is retained
- * for export to user-space via the ECCGETLAYOUT ioctl.
- * nand_ecclayout should be expandable in the future simply by the above macros.
+ * struct mtd_ooblayout_ops - NAND OOB layout operations
+ * @ecc: function returning an ECC region in the OOB area.
+ * Should return -ERANGE if %section exceeds the total number of
+ * ECC sections.
+ * @free: function returning a free region in the OOB area.
+ * Should return -ERANGE if %section exceeds the total number of
+ * free sections.
*/
-struct nand_ecclayout {
- __u32 eccbytes;
- __u32 eccpos[MTD_MAX_ECCPOS_ENTRIES_LARGE];
- struct nand_oobfree oobfree[MTD_MAX_OOBFREE_ENTRIES_LARGE];
+struct mtd_ooblayout_ops {
+ int (*ecc)(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobecc);
+ int (*free)(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobfree);
};
struct module; /* only needed for owner field in mtd_info */
@@ -166,8 +185,8 @@ struct mtd_info {
const char *name;
int index;
- /* ECC layout structure pointer - read only! */
- struct nand_ecclayout *ecclayout;
+ /* OOB layout description */
+ const struct mtd_ooblayout_ops *ooblayout;
/* the ecc step size. */
unsigned int ecc_step_size;
@@ -253,6 +272,30 @@ struct mtd_info {
int usecount;
};
+int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobecc);
+int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
+ int *section,
+ struct mtd_oob_region *oobregion);
+int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
+ const u8 *oobbuf, int start, int nbytes);
+int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
+ u8 *oobbuf, int start, int nbytes);
+int mtd_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobfree);
+int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
+ const u8 *oobbuf, int start, int nbytes);
+int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
+ u8 *oobbuf, int start, int nbytes);
+int mtd_ooblayout_count_freebytes(struct mtd_info *mtd);
+int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd);
+
+static inline void mtd_set_ooblayout(struct mtd_info *mtd,
+ const struct mtd_ooblayout_ops *ooblayout)
+{
+ mtd->ooblayout = ooblayout;
+}
+
static inline void mtd_set_of_node(struct mtd_info *mtd,
struct device_node *np)
{
@@ -283,17 +326,7 @@ int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
const u_char *buf);
int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops);
-
-static inline int mtd_write_oob(struct mtd_info *mtd, loff_t to,
- struct mtd_oob_ops *ops)
-{
- ops->retlen = ops->oobretlen = 0;
- if (!mtd->_write_oob)
- return -EOPNOTSUPP;
- if (!(mtd->flags & MTD_WRITEABLE))
- return -EROFS;
- return mtd->_write_oob(mtd, to, ops);
-}
+int mtd_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops);
int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
struct otp_info *buf);
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 56574ba36555..fbe8e164a4ee 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -116,9 +116,14 @@ typedef enum {
NAND_ECC_HW,
NAND_ECC_HW_SYNDROME,
NAND_ECC_HW_OOB_FIRST,
- NAND_ECC_SOFT_BCH,
} nand_ecc_modes_t;
+enum nand_ecc_algo {
+ NAND_ECC_UNKNOWN,
+ NAND_ECC_HAMMING,
+ NAND_ECC_BCH,
+};
+
/*
* Constants for Hardware ECC
*/
@@ -458,6 +463,7 @@ struct nand_hw_control {
/**
* struct nand_ecc_ctrl - Control structure for ECC
* @mode: ECC mode
+ * @algo: ECC algorithm
* @steps: number of ECC steps per page
* @size: data bytes per ECC step
* @bytes: ECC bytes per step
@@ -466,7 +472,6 @@ struct nand_hw_control {
* @prepad: padding information for syndrome based ECC generators
* @postpad: padding information for syndrome based ECC generators
* @options: ECC specific options (see NAND_ECC_XXX flags defined above)
- * @layout: ECC layout control struct pointer
* @priv: pointer to private ECC control data
* @hwctl: function to control hardware ECC generator. Must only
* be provided if an hardware ECC is available
@@ -508,6 +513,7 @@ struct nand_hw_control {
*/
struct nand_ecc_ctrl {
nand_ecc_modes_t mode;
+ enum nand_ecc_algo algo;
int steps;
int size;
int bytes;
@@ -516,7 +522,6 @@ struct nand_ecc_ctrl {
int prepad;
int postpad;
unsigned int options;
- struct nand_ecclayout *layout;
void *priv;
void (*hwctl)(struct mtd_info *mtd, int mode);
int (*calculate)(struct mtd_info *mtd, const uint8_t *dat,
@@ -740,6 +745,9 @@ struct nand_chip {
void *priv;
};
+extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops;
+extern const struct mtd_ooblayout_ops nand_ooblayout_lp_ops;
+
static inline void nand_set_flash_node(struct nand_chip *chip,
struct device_node *np)
{
@@ -1070,4 +1078,18 @@ int nand_check_erased_ecc_chunk(void *data, int datalen,
void *ecc, int ecclen,
void *extraoob, int extraooblen,
int threshold);
+
+/* Default write_oob implementation */
+int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page);
+
+/* Default write_oob syndrome implementation */
+int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
+ int page);
+
+/* Default read_oob implementation */
+int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page);
+
+/* Default read_oob syndrome implementation */
+int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
+ int page);
#endif /* __LINUX_MTD_NAND_H */
diff --git a/include/linux/mtd/onenand.h b/include/linux/mtd/onenand.h
index 4596503c9da9..0aaa98b219a4 100644
--- a/include/linux/mtd/onenand.h
+++ b/include/linux/mtd/onenand.h
@@ -80,7 +80,6 @@ struct onenand_bufferram {
* @page_buf: [INTERN] page main data buffer
* @oob_buf: [INTERN] page oob data buffer
* @subpagesize: [INTERN] holds the subpagesize
- * @ecclayout: [REPLACEABLE] the default ecc placement scheme
* @bbm: [REPLACEABLE] pointer to Bad Block Management
* @priv: [OPTIONAL] pointer to private chip date
*/
@@ -134,7 +133,6 @@ struct onenand_chip {
#endif
int subpagesize;
- struct nand_ecclayout *ecclayout;
void *bbm;
diff --git a/include/linux/mtd/sharpsl.h b/include/linux/mtd/sharpsl.h
index 25f4d2a845c1..65e91d0fa981 100644
--- a/include/linux/mtd/sharpsl.h
+++ b/include/linux/mtd/sharpsl.h
@@ -14,7 +14,7 @@
struct sharpsl_nand_platform_data {
struct nand_bbt_descr *badblock_pattern;
- struct nand_ecclayout *ecc_layout;
+ const struct mtd_ooblayout_ops *ecc_layout;
struct mtd_partition *partitions;
unsigned int nr_partitions;
};
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index 3c36113a88e1..7f041bd88b82 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -21,6 +21,7 @@
* Sometimes these are the same as CFI IDs, but sometimes they aren't.
*/
#define SNOR_MFR_ATMEL CFI_MFR_ATMEL
+#define SNOR_MFR_GIGADEVICE 0xc8
#define SNOR_MFR_INTEL CFI_MFR_INTEL
#define SNOR_MFR_MICRON CFI_MFR_ST /* ST Micro <--> Micron */
#define SNOR_MFR_MACRONIX CFI_MFR_MACRONIX
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 77d01700daf7..d3d0398f2a1b 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -45,6 +45,8 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
#define LOOKUP_ROOT 0x2000
#define LOOKUP_EMPTY 0x4000
+extern int path_pts(struct path *path);
+
extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty);
static inline int user_path_at(int dfd, const char __user *name, unsigned flags,
@@ -79,6 +81,8 @@ extern int kern_path_mountpoint(int, const char *, struct path *, unsigned int);
extern struct dentry *lookup_one_len(const char *, struct dentry *, int);
extern struct dentry *lookup_one_len_unlocked(const char *, struct dentry *, int);
+struct qstr;
+extern struct dentry *lookup_hash(const struct qstr *, struct dentry *);
extern int follow_down_one(struct path *);
extern int follow_down(struct path *);
diff --git a/include/linux/nd.h b/include/linux/nd.h
index 5489ab756d1a..aee2761d294c 100644
--- a/include/linux/nd.h
+++ b/include/linux/nd.h
@@ -15,6 +15,7 @@
#include <linux/fs.h>
#include <linux/ndctl.h>
#include <linux/device.h>
+#include <linux/badblocks.h>
enum nvdimm_event {
NVDIMM_REVALIDATE_POISON,
@@ -55,13 +56,19 @@ static inline struct nd_namespace_common *to_ndns(struct device *dev)
}
/**
- * struct nd_namespace_io - infrastructure for loading an nd_pmem instance
+ * struct nd_namespace_io - device representation of a persistent memory range
* @dev: namespace device created by the nd region driver
* @res: struct resource conversion of a NFIT SPA table
+ * @size: cached resource_size(@res) for fast path size checks
+ * @addr: virtual address to access the namespace range
+ * @bb: badblocks list for the namespace range
*/
struct nd_namespace_io {
struct nd_namespace_common common;
struct resource res;
+ resource_size_t size;
+ void __pmem *addr;
+ struct badblocks bb;
};
/**
@@ -82,6 +89,7 @@ struct nd_namespace_pmem {
* @uuid: namespace name supplied in the dimm label
* @id: ida allocated id
* @lbasize: blk namespaces have a native sector size when btt not present
+ * @size: sum of all the resource ranges allocated to this namespace
* @num_resources: number of dpa extents to claim
* @res: discontiguous dpa extents for given dimm
*/
@@ -91,6 +99,7 @@ struct nd_namespace_blk {
u8 *uuid;
int id;
unsigned long lbasize;
+ resource_size_t size;
int num_resources;
struct resource **res;
};
diff --git a/include/linux/net.h b/include/linux/net.h
index 49175e4ced11..9aa49a05fe38 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -218,8 +218,7 @@ int sock_create_lite(int family, int type, int proto, struct socket **res);
struct socket *sock_alloc(void);
void sock_release(struct socket *sock);
int sock_sendmsg(struct socket *sock, struct msghdr *msg);
-int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
- int flags);
+int sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags);
struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname);
struct socket *sockfd_lookup(int fd, int *err);
struct socket *sock_from_file(struct file *file, int *err);
@@ -246,7 +245,15 @@ do { \
net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__)
#define net_info_ratelimited(fmt, ...) \
net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__)
-#if defined(DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG)
+#define net_dbg_ratelimited(fmt, ...) \
+do { \
+ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
+ if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
+ net_ratelimit()) \
+ __dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__); \
+} while (0)
+#elif defined(DEBUG)
#define net_dbg_ratelimited(fmt, ...) \
net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__)
#else
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index a734bf43d190..aa7b2400f98c 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -39,14 +39,19 @@ enum {
NETIF_F_UFO_BIT, /* ... UDPv4 fragmentation */
NETIF_F_GSO_ROBUST_BIT, /* ... ->SKB_GSO_DODGY */
NETIF_F_TSO_ECN_BIT, /* ... TCP ECN support */
+ NETIF_F_TSO_MANGLEID_BIT, /* ... IPV4 ID mangling allowed */
NETIF_F_TSO6_BIT, /* ... TCPv6 segmentation */
NETIF_F_FSO_BIT, /* ... FCoE segmentation */
NETIF_F_GSO_GRE_BIT, /* ... GRE with TSO */
NETIF_F_GSO_GRE_CSUM_BIT, /* ... GRE with csum with TSO */
- NETIF_F_GSO_IPIP_BIT, /* ... IPIP tunnel with TSO */
- NETIF_F_GSO_SIT_BIT, /* ... SIT tunnel with TSO */
+ NETIF_F_GSO_IPXIP4_BIT, /* ... IP4 or IP6 over IP4 with TSO */
+ NETIF_F_GSO_IPXIP6_BIT, /* ... IP4 or IP6 over IP6 with TSO */
NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */
NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT,/* ... UDP TUNNEL with TSO & CSUM */
+ NETIF_F_GSO_PARTIAL_BIT, /* ... Only segment inner-most L4
+ * in hardware and all other
+ * headers in software.
+ */
NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */
/**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */
NETIF_F_GSO_TUNNEL_REMCSUM_BIT,
@@ -116,10 +121,12 @@ enum {
#define NETIF_F_RXALL __NETIF_F(RXALL)
#define NETIF_F_GSO_GRE __NETIF_F(GSO_GRE)
#define NETIF_F_GSO_GRE_CSUM __NETIF_F(GSO_GRE_CSUM)
-#define NETIF_F_GSO_IPIP __NETIF_F(GSO_IPIP)
-#define NETIF_F_GSO_SIT __NETIF_F(GSO_SIT)
+#define NETIF_F_GSO_IPXIP4 __NETIF_F(GSO_IPXIP4)
+#define NETIF_F_GSO_IPXIP6 __NETIF_F(GSO_IPXIP6)
#define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL)
#define NETIF_F_GSO_UDP_TUNNEL_CSUM __NETIF_F(GSO_UDP_TUNNEL_CSUM)
+#define NETIF_F_TSO_MANGLEID __NETIF_F(TSO_MANGLEID)
+#define NETIF_F_GSO_PARTIAL __NETIF_F(GSO_PARTIAL)
#define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM)
#define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER)
#define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX)
@@ -145,10 +152,6 @@ enum {
#define NETIF_F_GSO_MASK (__NETIF_F_BIT(NETIF_F_GSO_LAST + 1) - \
__NETIF_F_BIT(NETIF_F_GSO_SHIFT))
-/* List of features with software fallbacks. */
-#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | \
- NETIF_F_TSO6 | NETIF_F_UFO)
-
/* List of IP checksum features. Note that NETIF_F_ HW_CSUM should not be
* set in features when NETIF_F_IP_CSUM or NETIF_F_IPV6_CSUM are set--
* this would be contradictory
@@ -156,11 +159,15 @@ enum {
#define NETIF_F_CSUM_MASK (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \
NETIF_F_HW_CSUM)
-#define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
+#define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | \
+ NETIF_F_TSO_ECN | NETIF_F_TSO_MANGLEID)
#define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \
NETIF_F_FSO)
+/* List of features with software fallbacks. */
+#define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | NETIF_F_UFO)
+
/*
* If one device supports one of these features, then enable them
* for all in netdev_increment_features.
@@ -193,8 +200,8 @@ enum {
#define NETIF_F_GSO_ENCAP_ALL (NETIF_F_GSO_GRE | \
NETIF_F_GSO_GRE_CSUM | \
- NETIF_F_GSO_IPIP | \
- NETIF_F_GSO_SIT | \
+ NETIF_F_GSO_IPXIP4 | \
+ NETIF_F_GSO_IPXIP6 | \
NETIF_F_GSO_UDP_TUNNEL | \
NETIF_F_GSO_UDP_TUNNEL_CSUM)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 8395308a2445..f45929ce8157 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -106,7 +106,6 @@ enum netdev_tx {
__NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */
NETDEV_TX_OK = 0x00, /* driver took care of packet */
NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/
- NETDEV_TX_LOCKED = 0x20, /* driver tx lock was already taken */
};
typedef enum netdev_tx netdev_tx_t;
@@ -570,28 +569,27 @@ struct netdev_queue {
#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
int numa_node;
#endif
+ unsigned long tx_maxrate;
+ /*
+ * Number of TX timeouts for this queue
+ * (/sys/class/net/DEV/Q/trans_timeout)
+ */
+ unsigned long trans_timeout;
/*
* write-mostly part
*/
spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
int xmit_lock_owner;
/*
- * please use this field instead of dev->trans_start
+ * Time (in jiffies) of last Tx
*/
unsigned long trans_start;
- /*
- * Number of TX timeouts for this queue
- * (/sys/class/net/DEV/Q/trans_timeout)
- */
- unsigned long trans_timeout;
-
unsigned long state;
#ifdef CONFIG_BQL
struct dql dql;
#endif
- unsigned long tx_maxrate;
} ____cacheline_aligned_in_smp;
static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
@@ -831,7 +829,6 @@ struct tc_to_netdev {
* the queue before that can happen; it's for obsolete devices and weird
* corner cases, but the stack really does a non-trivial amount
* of useless work if you return NETDEV_TX_BUSY.
- * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
* Required; cannot be NULL.
*
* netdev_features_t (*ndo_fix_features)(struct net_device *dev,
@@ -1548,7 +1545,6 @@ enum netdev_priv_flags {
*
* @offload_fwd_mark: Offload device fwding mark
*
- * @trans_start: Time (in jiffies) of last Tx
* @watchdog_timeo: Represents the timeout that is used by
* the watchdog (see dev_watchdog())
* @watchdog_timer: List of timers
@@ -1586,8 +1582,6 @@ enum netdev_priv_flags {
* @gso_max_size: Maximum size of generic segmentation offload
* @gso_max_segs: Maximum number of segments that can be passed to the
* NIC for GSO
- * @gso_min_segs: Minimum number of segments that can be passed to the
- * NIC for GSO
*
* @dcbnl_ops: Data Center Bridging netlink ops
* @num_tc: Number of traffic classes in the net device
@@ -1656,6 +1650,7 @@ struct net_device {
netdev_features_t vlan_features;
netdev_features_t hw_enc_features;
netdev_features_t mpls_features;
+ netdev_features_t gso_partial_features;
int ifindex;
int group;
@@ -1798,13 +1793,6 @@ struct net_device {
#endif
/* These may be needed for future network-power-down code. */
-
- /*
- * trans_start here is expensive for high speed devices on SMP,
- * please use netdev_queue->trans_start instead.
- */
- unsigned long trans_start;
-
struct timer_list watchdog_timer;
int __percpu *pcpu_refcnt;
@@ -1858,7 +1846,7 @@ struct net_device {
unsigned int gso_max_size;
#define GSO_MAX_SEGS 65535
u16 gso_max_segs;
- u16 gso_min_segs;
+
#ifdef CONFIG_DCB
const struct dcbnl_rtnl_ops *dcbnl_ops;
#endif
@@ -2123,7 +2111,10 @@ struct napi_gro_cb {
/* Used in GRE, set in fou/gue_gro_receive */
u8 is_fou:1;
- /* 6 bit hole */
+ /* Used to determine if flush_id can be ignored */
+ u8 is_atomic:1;
+
+ /* 5 bit hole */
/* used to support CHECKSUM_COMPLETE for tunneling protocols */
__wsum csum;
@@ -2162,23 +2153,6 @@ struct packet_offload {
struct list_head list;
};
-struct udp_offload;
-
-struct udp_offload_callbacks {
- struct sk_buff **(*gro_receive)(struct sk_buff **head,
- struct sk_buff *skb,
- struct udp_offload *uoff);
- int (*gro_complete)(struct sk_buff *skb,
- int nhoff,
- struct udp_offload *uoff);
-};
-
-struct udp_offload {
- __be16 port;
- u8 ipproto;
- struct udp_offload_callbacks callbacks;
-};
-
/* often modified stats are per-CPU, other are shared (netdev->stats) */
struct pcpu_sw_netstats {
u64 rx_packets;
@@ -2259,6 +2233,8 @@ struct netdev_lag_lower_state_info {
#define NETDEV_BONDING_INFO 0x0019
#define NETDEV_PRECHANGEUPPER 0x001A
#define NETDEV_CHANGELOWERSTATE 0x001B
+#define NETDEV_OFFLOAD_PUSH_VXLAN 0x001C
+#define NETDEV_OFFLOAD_PUSH_GENEVE 0x001D
int register_netdevice_notifier(struct notifier_block *nb);
int unregister_netdevice_notifier(struct notifier_block *nb);
@@ -2750,7 +2726,6 @@ struct softnet_data {
/* stats */
unsigned int processed;
unsigned int time_squeeze;
- unsigned int cpu_collision;
unsigned int received_rps;
#ifdef CONFIG_RPS
struct softnet_data *rps_ipi_list;
@@ -2763,11 +2738,15 @@ struct softnet_data {
struct sk_buff *completion_queue;
#ifdef CONFIG_RPS
- /* Elements below can be accessed between CPUs for RPS */
+ /* input_queue_head should be written by cpu owning this struct,
+ * and only read by other cpus. Worth using a cache line.
+ */
+ unsigned int input_queue_head ____cacheline_aligned_in_smp;
+
+ /* Elements below can be accessed between CPUs for RPS/RFS */
struct call_single_data csd ____cacheline_aligned_in_smp;
struct softnet_data *rps_ipi_next;
unsigned int cpu;
- unsigned int input_queue_head;
unsigned int input_queue_tail;
#endif
unsigned int dropped;
@@ -2804,7 +2783,7 @@ static inline void netif_tx_schedule_all(struct net_device *dev)
netif_schedule_queue(netdev_get_tx_queue(dev, i));
}
-static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
+static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
{
clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
}
@@ -2854,7 +2833,7 @@ static inline void netif_tx_wake_all_queues(struct net_device *dev)
}
}
-static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
+static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
{
set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
}
@@ -3276,7 +3255,10 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, int *ret);
int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
-bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb);
+bool is_skb_forwardable(const struct net_device *dev,
+ const struct sk_buff *skb);
+
+void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
extern int netdev_budget;
@@ -3493,6 +3475,15 @@ static inline void txq_trans_update(struct netdev_queue *txq)
txq->trans_start = jiffies;
}
+/* legacy drivers only, netdev_start_xmit() sets txq->trans_start */
+static inline void netif_trans_update(struct net_device *dev)
+{
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
+
+ if (txq->trans_start != jiffies)
+ txq->trans_start = jiffies;
+}
+
/**
* netif_tx_lock - grab network device transmit lock
* @dev: network device
@@ -3768,7 +3759,6 @@ void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
extern int netdev_max_backlog;
extern int netdev_tstamp_prequeue;
extern int weight_p;
-extern int bpf_jit_enable;
bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
@@ -4004,21 +3994,23 @@ netdev_features_t netif_skb_features(struct sk_buff *skb);
static inline bool net_gso_ok(netdev_features_t features, int gso_type)
{
- netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT;
+ netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT;
/* check flags correspondence */
BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT));
- BUILD_BUG_ON(SKB_GSO_IPIP != (NETIF_F_GSO_IPIP >> NETIF_F_GSO_SHIFT));
- BUILD_BUG_ON(SKB_GSO_SIT != (NETIF_F_GSO_SIT >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_IPXIP4 != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_IPXIP6 != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
return (features & feature) == feature;
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index f48b8a664b0f..83b9a2e0d8d4 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -351,7 +351,8 @@ ip_set_put_skbinfo(struct sk_buff *skb, struct ip_set_skbinfo *skbinfo)
return ((skbinfo->skbmark || skbinfo->skbmarkmask) &&
nla_put_net64(skb, IPSET_ATTR_SKBMARK,
cpu_to_be64((u64)skbinfo->skbmark << 32 |
- skbinfo->skbmarkmask))) ||
+ skbinfo->skbmarkmask),
+ IPSET_ATTR_PAD)) ||
(skbinfo->skbprio &&
nla_put_net32(skb, IPSET_ATTR_SKBPRIO,
cpu_to_be32(skbinfo->skbprio))) ||
@@ -374,9 +375,11 @@ static inline bool
ip_set_put_counter(struct sk_buff *skb, struct ip_set_counter *counter)
{
return nla_put_net64(skb, IPSET_ATTR_BYTES,
- cpu_to_be64(ip_set_get_bytes(counter))) ||
+ cpu_to_be64(ip_set_get_bytes(counter)),
+ IPSET_ATTR_PAD) ||
nla_put_net64(skb, IPSET_ATTR_PACKETS,
- cpu_to_be64(ip_set_get_packets(counter)));
+ cpu_to_be64(ip_set_get_packets(counter)),
+ IPSET_ATTR_PAD);
}
static inline void
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 80a305b85323..dc4f58a3cdcc 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -242,11 +242,18 @@ void xt_unregister_match(struct xt_match *target);
int xt_register_matches(struct xt_match *match, unsigned int n);
void xt_unregister_matches(struct xt_match *match, unsigned int n);
+int xt_check_entry_offsets(const void *base, const char *elems,
+ unsigned int target_offset,
+ unsigned int next_offset);
+
int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
bool inv_proto);
int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
bool inv_proto);
+void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
+ struct xt_counters_info *info, bool compat);
+
struct xt_table *xt_register_table(struct net *net,
const struct xt_table *table,
struct xt_table_info *bootstrap,
@@ -373,16 +380,16 @@ static inline unsigned long ifname_compare_aligned(const char *_a,
* allows us to return 0 for single core systems without forcing
* callers to deal with SMP vs. NONSMP issues.
*/
-static inline u64 xt_percpu_counter_alloc(void)
+static inline unsigned long xt_percpu_counter_alloc(void)
{
if (nr_cpu_ids > 1) {
void __percpu *res = __alloc_percpu(sizeof(struct xt_counters),
sizeof(struct xt_counters));
if (res == NULL)
- return (u64) -ENOMEM;
+ return -ENOMEM;
- return (u64) (__force unsigned long) res;
+ return (__force unsigned long) res;
}
return 0;
@@ -480,7 +487,7 @@ void xt_compat_init_offsets(u_int8_t af, unsigned int number);
int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
int xt_compat_match_offset(const struct xt_match *match);
-int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
+void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
unsigned int *size);
int xt_compat_match_to_user(const struct xt_entry_match *m,
void __user **dstptr, unsigned int *size);
@@ -490,6 +497,9 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
unsigned int *size);
int xt_compat_target_to_user(const struct xt_entry_target *t,
void __user **dstptr, unsigned int *size);
+int xt_compat_check_entry_offsets(const void *base, const char *elems,
+ unsigned int target_offset,
+ unsigned int next_offset);
#endif /* CONFIG_COMPAT */
#endif /* _X_TABLES_H */
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 011433478a14..bfed6b367350 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -50,12 +50,27 @@ struct nfs4_label {
typedef struct { char data[NFS4_VERIFIER_SIZE]; } nfs4_verifier;
-struct nfs_stateid4 {
- __be32 seqid;
- char other[NFS4_STATEID_OTHER_SIZE];
-} __attribute__ ((packed));
+struct nfs4_stateid_struct {
+ union {
+ char data[NFS4_STATEID_SIZE];
+ struct {
+ __be32 seqid;
+ char other[NFS4_STATEID_OTHER_SIZE];
+ } __attribute__ ((packed));
+ };
+
+ enum {
+ NFS4_INVALID_STATEID_TYPE = 0,
+ NFS4_SPECIAL_STATEID_TYPE,
+ NFS4_OPEN_STATEID_TYPE,
+ NFS4_LOCK_STATEID_TYPE,
+ NFS4_DELEGATION_STATEID_TYPE,
+ NFS4_LAYOUT_STATEID_TYPE,
+ NFS4_PNFS_DS_STATEID_TYPE,
+ } type;
+};
-typedef struct nfs_stateid4 nfs4_stateid;
+typedef struct nfs4_stateid_struct nfs4_stateid;
enum nfs_opnum4 {
OP_ACCESS = 3,
@@ -504,6 +519,7 @@ enum {
NFSPROC4_CLNT_DEALLOCATE,
NFSPROC4_CLNT_LAYOUTSTATS,
NFSPROC4_CLNT_CLONE,
+ NFSPROC4_CLNT_COPY,
};
/* nfs41 types */
@@ -621,7 +637,9 @@ enum pnfs_update_layout_reason {
PNFS_UPDATE_LAYOUT_IO_TEST_FAIL,
PNFS_UPDATE_LAYOUT_FOUND_CACHED,
PNFS_UPDATE_LAYOUT_RETURN,
+ PNFS_UPDATE_LAYOUT_RETRY,
PNFS_UPDATE_LAYOUT_BLOCKED,
+ PNFS_UPDATE_LAYOUT_INVALID_OPEN,
PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET,
};
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 67300f8e5f2f..d71278c3c5bd 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -163,11 +163,9 @@ struct nfs_inode {
/* Open contexts for shared mmap writes */
struct list_head open_files;
- /* Number of in-flight sillydelete RPC calls */
- atomic_t silly_count;
- /* List of deferred sillydelete requests */
- struct hlist_head silly_list;
- wait_queue_head_t waitqueue;
+ /* Readers: in-flight sillydelete RPC calls */
+ /* Writers: rmdir */
+ struct rw_semaphore rmdir_sem;
#if IS_ENABLED(CONFIG_NFS_V4)
struct nfs4_cached_acl *nfs4_acl;
@@ -445,10 +443,9 @@ static inline struct rpc_cred *nfs_file_cred(struct file *file)
/*
* linux/fs/nfs/direct.c
*/
-extern ssize_t nfs_direct_IO(struct kiocb *, struct iov_iter *, loff_t);
+extern ssize_t nfs_direct_IO(struct kiocb *, struct iov_iter *);
extern ssize_t nfs_file_direct_read(struct kiocb *iocb,
- struct iov_iter *iter,
- loff_t pos);
+ struct iov_iter *iter);
extern ssize_t nfs_file_direct_write(struct kiocb *iocb,
struct iov_iter *iter);
@@ -492,9 +489,6 @@ extern void nfs_release_automount_timer(void);
* linux/fs/nfs/unlink.c
*/
extern void nfs_complete_unlink(struct dentry *dentry, struct inode *);
-extern void nfs_wait_on_sillyrename(struct dentry *dentry);
-extern void nfs_block_sillyrename(struct dentry *dentry);
-extern void nfs_unblock_sillyrename(struct dentry *dentry);
/*
* linux/fs/nfs/write.c
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 7fcc13c8cf1f..14a762d2734d 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -246,5 +246,6 @@ struct nfs_server {
#define NFS_CAP_DEALLOCATE (1U << 21)
#define NFS_CAP_LAYOUTSTATS (1U << 22)
#define NFS_CAP_CLONE (1U << 23)
+#define NFS_CAP_COPY (1U << 24)
#endif
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index d320906cf13e..c304a11b5b1a 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -233,7 +233,6 @@ struct nfs4_layoutget_args {
struct inode *inode;
struct nfs_open_context *ctx;
nfs4_stateid stateid;
- unsigned long timestamp;
struct nfs4_layoutdriver_data layout;
};
@@ -251,7 +250,6 @@ struct nfs4_layoutget {
struct nfs4_layoutget_res res;
struct rpc_cred *cred;
gfp_t gfp_flags;
- long timeout;
};
struct nfs4_getdeviceinfo_args {
@@ -1343,6 +1341,32 @@ struct nfs42_falloc_res {
const struct nfs_server *falloc_server;
};
+struct nfs42_copy_args {
+ struct nfs4_sequence_args seq_args;
+
+ struct nfs_fh *src_fh;
+ nfs4_stateid src_stateid;
+ u64 src_pos;
+
+ struct nfs_fh *dst_fh;
+ nfs4_stateid dst_stateid;
+ u64 dst_pos;
+
+ u64 count;
+};
+
+struct nfs42_write_res {
+ u64 count;
+ struct nfs_writeverf verifier;
+};
+
+struct nfs42_copy_res {
+ struct nfs4_sequence_res seq_res;
+ struct nfs42_write_res write_res;
+ bool consecutive;
+ bool synchronous;
+};
+
struct nfs42_seek_args {
struct nfs4_sequence_args seq_args;
@@ -1431,7 +1455,7 @@ struct nfs_commit_completion_ops {
};
struct nfs_commit_info {
- spinlock_t *lock; /* inode->i_lock */
+ struct inode *inode; /* Needed for inode->i_lock */
struct nfs_mds_commit_info *mds;
struct pnfs_ds_commit_info *ds;
struct nfs_direct_req *dreq; /* O_DIRECT request */
@@ -1468,10 +1492,10 @@ struct nfs_pgio_completion_ops {
};
struct nfs_unlinkdata {
- struct hlist_node list;
struct nfs_removeargs args;
struct nfs_removeres res;
- struct inode *dir;
+ struct dentry *dentry;
+ wait_queue_head_t wq;
struct rpc_cred *cred;
struct nfs_fattr dir_attr;
long timeout;
diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h
index e9fcf90b270d..5988dd57ba66 100644
--- a/include/linux/nilfs2_fs.h
+++ b/include/linux/nilfs2_fs.h
@@ -13,12 +13,7 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Written by Koji Sato <koji@osrg.net>
- * Ryusuke Konishi <ryusuke@osrg.net>
+ * Written by Koji Sato and Ryusuke Konishi.
*/
/*
* linux/include/linux/ext2_fs.h
@@ -132,10 +127,14 @@ struct nilfs_super_root {
#define NILFS_MOUNT_ERRORS_RO 0x0020 /* Remount fs ro on errors */
#define NILFS_MOUNT_ERRORS_PANIC 0x0040 /* Panic on errors */
#define NILFS_MOUNT_BARRIER 0x1000 /* Use block barriers */
-#define NILFS_MOUNT_STRICT_ORDER 0x2000 /* Apply strict in-order
- semantics also for data */
-#define NILFS_MOUNT_NORECOVERY 0x4000 /* Disable write access during
- mount-time recovery */
+#define NILFS_MOUNT_STRICT_ORDER 0x2000 /*
+ * Apply strict in-order
+ * semantics also for data
+ */
+#define NILFS_MOUNT_NORECOVERY 0x4000 /*
+ * Disable write access during
+ * mount-time recovery
+ */
#define NILFS_MOUNT_DISCARD 0x8000 /* Issue DISCARD requests */
@@ -147,16 +146,20 @@ struct nilfs_super_block {
__le16 s_minor_rev_level; /* minor revision level */
__le16 s_magic; /* Magic signature */
- __le16 s_bytes; /* Bytes count of CRC calculation
- for this structure. s_reserved
- is excluded. */
+ __le16 s_bytes; /*
+ * Bytes count of CRC calculation
+ * for this structure. s_reserved
+ * is excluded.
+ */
__le16 s_flags; /* flags */
__le32 s_crc_seed; /* Seed value of CRC calculation */
/*10*/ __le32 s_sum; /* Check sum of super block */
- __le32 s_log_block_size; /* Block size represented as follows
- blocksize =
- 1 << (s_log_block_size + 10) */
+ __le32 s_log_block_size; /*
+ * Block size represented as follows
+ * blocksize =
+ * 1 << (s_log_block_size + 10)
+ */
__le64 s_nsegments; /* Number of segments in filesystem */
/*20*/ __le64 s_dev_size; /* block device size in bytes */
__le64 s_first_data_block; /* 1st seg disk block number */
@@ -168,8 +171,10 @@ struct nilfs_super_block {
__le64 s_last_seq; /* seq. number of seg written last */
/*50*/ __le64 s_free_blocks_count; /* Free blocks count */
- __le64 s_ctime; /* Creation time (execution time of
- newfs) */
+ __le64 s_ctime; /*
+ * Creation time (execution time of
+ * newfs)
+ */
/*60*/ __le64 s_mtime; /* Mount time */
__le64 s_wtime; /* Write time */
/*70*/ __le16 s_mnt_count; /* Mount count */
@@ -193,8 +198,10 @@ struct nilfs_super_block {
/*A8*/ char s_volume_name[80]; /* volume name */
/*F8*/ __le32 s_c_interval; /* Commit interval of segment */
- __le32 s_c_block_max; /* Threshold of data amount for
- the segment construction */
+ __le32 s_c_block_max; /*
+ * Threshold of data amount for
+ * the segment construction
+ */
/*100*/ __le64 s_feature_compat; /* Compatible feature set */
__le64 s_feature_compat_ro; /* Read-only compatible feature set */
__le64 s_feature_incompat; /* Incompatible feature set */
@@ -247,12 +254,18 @@ struct nilfs_super_block {
#define NILFS_SB_OFFSET_BYTES 1024 /* byte offset of nilfs superblock */
-#define NILFS_SEG_MIN_BLOCKS 16 /* Minimum number of blocks in
- a full segment */
-#define NILFS_PSEG_MIN_BLOCKS 2 /* Minimum number of blocks in
- a partial segment */
-#define NILFS_MIN_NRSVSEGS 8 /* Minimum number of reserved
- segments */
+#define NILFS_SEG_MIN_BLOCKS 16 /*
+ * Minimum number of blocks in
+ * a full segment
+ */
+#define NILFS_PSEG_MIN_BLOCKS 2 /*
+ * Minimum number of blocks in
+ * a partial segment
+ */
+#define NILFS_MIN_NRSVSEGS 8 /*
+ * Minimum number of reserved
+ * segments
+ */
/*
* We call DAT, cpfile, and sufile root metadata files. Inodes of
@@ -327,9 +340,9 @@ enum {
~NILFS_DIR_ROUND)
#define NILFS_MAX_REC_LEN ((1<<16)-1)
-static inline unsigned nilfs_rec_len_from_disk(__le16 dlen)
+static inline unsigned int nilfs_rec_len_from_disk(__le16 dlen)
{
- unsigned len = le16_to_cpu(dlen);
+ unsigned int len = le16_to_cpu(dlen);
#if !defined(__KERNEL__) || (PAGE_SIZE >= 65536)
if (len == NILFS_MAX_REC_LEN)
@@ -338,7 +351,7 @@ static inline unsigned nilfs_rec_len_from_disk(__le16 dlen)
return len;
}
-static inline __le16 nilfs_rec_len_to_disk(unsigned len)
+static inline __le16 nilfs_rec_len_to_disk(unsigned int len)
{
#if !defined(__KERNEL__) || (PAGE_SIZE >= 65536)
if (len == (1 << 16))
@@ -518,9 +531,11 @@ struct nilfs_checkpoint {
__le64 cp_inodes_count;
__le64 cp_blocks_count;
- /* Do not change the byte offset of ifile inode.
- To keep the compatibility of the disk format,
- additional fields should be added behind cp_ifile_inode. */
+ /*
+ * Do not change the byte offset of ifile inode.
+ * To keep the compatibility of the disk format,
+ * additional fields should be added behind cp_ifile_inode.
+ */
struct nilfs_inode cp_ifile_inode;
};
diff --git a/include/linux/nl802154.h b/include/linux/nl802154.h
index 167342c2ce6b..0f6f6607f592 100644
--- a/include/linux/nl802154.h
+++ b/include/linux/nl802154.h
@@ -92,6 +92,8 @@ enum {
IEEE802154_ATTR_LLSEC_DEV_OVERRIDE,
IEEE802154_ATTR_LLSEC_DEV_KEY_MODE,
+ IEEE802154_ATTR_PAD,
+
__IEEE802154_ATTR_MAX,
};
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index 6e85889cf9ab..f746e44d4046 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -43,8 +43,10 @@
*
* int first_node(mask) Number lowest set bit, or MAX_NUMNODES
* int next_node(node, mask) Next node past 'node', or MAX_NUMNODES
+ * int next_node_in(node, mask) Next node past 'node', or wrap to first,
+ * or MAX_NUMNODES
* int first_unset_node(mask) First node not set in mask, or
- * MAX_NUMNODES.
+ * MAX_NUMNODES
*
* nodemask_t nodemask_of_node(node) Return nodemask with bit 'node' set
* NODE_MASK_ALL Initializer - all bits set
@@ -259,6 +261,13 @@ static inline int __next_node(int n, const nodemask_t *srcp)
return min_t(int,MAX_NUMNODES,find_next_bit(srcp->bits, MAX_NUMNODES, n+1));
}
+/*
+ * Find the next present node in src, starting after node n, wrapping around to
+ * the first node in src if needed. Returns MAX_NUMNODES if src is empty.
+ */
+#define next_node_in(n, src) __next_node_in((n), &(src))
+int __next_node_in(int node, const nodemask_t *srcp);
+
static inline void init_nodemask_of_node(nodemask_t *mask, int node)
{
nodes_clear(*mask);
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index a55986f6fe38..7d51b2904cb7 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -21,13 +21,13 @@ enum {
NVME_REG_CAP = 0x0000, /* Controller Capabilities */
NVME_REG_VS = 0x0008, /* Version */
NVME_REG_INTMS = 0x000c, /* Interrupt Mask Set */
- NVME_REG_INTMC = 0x0010, /* Interrupt Mask Set */
+ NVME_REG_INTMC = 0x0010, /* Interrupt Mask Clear */
NVME_REG_CC = 0x0014, /* Controller Configuration */
NVME_REG_CSTS = 0x001c, /* Controller Status */
NVME_REG_NSSR = 0x0020, /* NVM Subsystem Reset */
NVME_REG_AQA = 0x0024, /* Admin Queue Attributes */
NVME_REG_ASQ = 0x0028, /* Admin SQ Base Address */
- NVME_REG_ACQ = 0x0030, /* Admin SQ Base Address */
+ NVME_REG_ACQ = 0x0030, /* Admin CQ Base Address */
NVME_REG_CMBLOC = 0x0038, /* Controller Memory Buffer Location */
NVME_REG_CMBSZ = 0x003c, /* Controller Memory Buffer Size */
};
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
index a4fcc90b0f20..cd93416d762e 100644
--- a/include/linux/nvmem-provider.h
+++ b/include/linux/nvmem-provider.h
@@ -14,6 +14,10 @@
struct nvmem_device;
struct nvmem_cell_info;
+typedef int (*nvmem_reg_read_t)(void *priv, unsigned int offset,
+ void *val, size_t bytes);
+typedef int (*nvmem_reg_write_t)(void *priv, unsigned int offset,
+ void *val, size_t bytes);
struct nvmem_config {
struct device *dev;
@@ -24,6 +28,12 @@ struct nvmem_config {
int ncells;
bool read_only;
bool root_only;
+ nvmem_reg_read_t reg_read;
+ nvmem_reg_write_t reg_write;
+ int size;
+ int word_size;
+ int stride;
+ void *priv;
/* To be only used by old driver/misc/eeprom drivers */
bool compat;
struct device *base_dev;
diff --git a/include/linux/of.h b/include/linux/of.h
index 7fcb681baadf..c7292e8ea080 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -75,6 +75,23 @@ struct of_phandle_args {
uint32_t args[MAX_PHANDLE_ARGS];
};
+struct of_phandle_iterator {
+ /* Common iterator information */
+ const char *cells_name;
+ int cell_count;
+ const struct device_node *parent;
+
+ /* List size information */
+ const __be32 *list_end;
+ const __be32 *phandle_end;
+
+ /* Current position state */
+ const __be32 *cur;
+ uint32_t cur_count;
+ phandle phandle;
+ struct device_node *node;
+};
+
struct of_reconfig_data {
struct device_node *dn;
struct property *prop;
@@ -133,7 +150,7 @@ void of_core_init(void);
static inline bool is_of_node(struct fwnode_handle *fwnode)
{
- return fwnode && fwnode->type == FWNODE_OF;
+ return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_OF;
}
static inline struct device_node *to_of_node(struct fwnode_handle *fwnode)
@@ -334,6 +351,18 @@ extern int of_parse_phandle_with_fixed_args(const struct device_node *np,
extern int of_count_phandle_with_args(const struct device_node *np,
const char *list_name, const char *cells_name);
+/* phandle iterator functions */
+extern int of_phandle_iterator_init(struct of_phandle_iterator *it,
+ const struct device_node *np,
+ const char *list_name,
+ const char *cells_name,
+ int cell_count);
+
+extern int of_phandle_iterator_next(struct of_phandle_iterator *it);
+extern int of_phandle_iterator_args(struct of_phandle_iterator *it,
+ uint32_t *args,
+ int size);
+
extern void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align));
extern int of_alias_get_id(struct device_node *np, const char *stem);
extern int of_alias_get_highest_id(const char *stem);
@@ -608,6 +637,27 @@ static inline int of_count_phandle_with_args(struct device_node *np,
return -ENOSYS;
}
+static inline int of_phandle_iterator_init(struct of_phandle_iterator *it,
+ const struct device_node *np,
+ const char *list_name,
+ const char *cells_name,
+ int cell_count)
+{
+ return -ENOSYS;
+}
+
+static inline int of_phandle_iterator_next(struct of_phandle_iterator *it)
+{
+ return -ENOSYS;
+}
+
+static inline int of_phandle_iterator_args(struct of_phandle_iterator *it,
+ uint32_t *args,
+ int size)
+{
+ return 0;
+}
+
static inline int of_alias_get_id(struct device_node *np, const char *stem)
{
return -ENOSYS;
@@ -685,6 +735,15 @@ static inline int of_node_to_nid(struct device_node *device)
}
#endif
+#ifdef CONFIG_OF_NUMA
+extern int of_numa_init(void);
+#else
+static inline int of_numa_init(void)
+{
+ return -ENOSYS;
+}
+#endif
+
static inline struct device_node *of_find_matching_node(
struct device_node *from,
const struct of_device_id *matches)
@@ -868,6 +927,12 @@ static inline int of_property_read_s32(const struct device_node *np,
return of_property_read_u32(np, propname, (u32*) out_value);
}
+#define of_for_each_phandle(it, err, np, ln, cn, cc) \
+ for (of_phandle_iterator_init((it), (np), (ln), (cn), (cc)), \
+ err = of_phandle_iterator_next(it); \
+ err == 0; \
+ err = of_phandle_iterator_next(it))
+
#define of_property_for_each_u32(np, propname, prop, p, u) \
for (prop = of_find_property(np, propname, NULL), \
p = of_prop_next_u32(prop, NULL, &u); \
diff --git a/include/linux/of_address.h b/include/linux/of_address.h
index 01c0a556448b..37864734ca50 100644
--- a/include/linux/of_address.h
+++ b/include/linux/of_address.h
@@ -47,10 +47,6 @@ void __iomem *of_io_request_and_map(struct device_node *device,
extern const __be32 *of_get_address(struct device_node *dev, int index,
u64 *size, unsigned int *flags);
-extern int pci_register_io_range(phys_addr_t addr, resource_size_t size);
-extern unsigned long pci_address_to_pio(phys_addr_t addr);
-extern phys_addr_t pci_pio_to_address(unsigned long pio);
-
extern int of_pci_range_parser_init(struct of_pci_range_parser *parser,
struct device_node *node);
extern struct of_pci_range *of_pci_range_parser_one(
@@ -86,11 +82,6 @@ static inline const __be32 *of_get_address(struct device_node *dev, int index,
return NULL;
}
-static inline phys_addr_t pci_pio_to_address(unsigned long pio)
-{
- return 0;
-}
-
static inline int of_pci_range_parser_init(struct of_pci_range_parser *parser,
struct device_node *node)
{
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index 2fbe8682a66f..901ec01c9fba 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -37,8 +37,9 @@ extern bool of_fdt_is_big_endian(const void *blob,
unsigned long node);
extern int of_fdt_match(const void *blob, unsigned long node,
const char *const *compat);
-extern void of_fdt_unflatten_tree(const unsigned long *blob,
- struct device_node **mynodes);
+extern void *of_fdt_unflatten_tree(const unsigned long *blob,
+ struct device_node *dad,
+ struct device_node **mynodes);
/* TBD: Temporary export of fdt globals - remove when code fully merged */
extern int __initdata dt_root_addr_cells;
diff --git a/include/linux/of_graph.h b/include/linux/of_graph.h
index f8bcd0e21a26..bb3a5a2cd570 100644
--- a/include/linux/of_graph.h
+++ b/include/linux/of_graph.h
@@ -15,6 +15,7 @@
#define __LINUX_OF_GRAPH_H
#include <linux/types.h>
+#include <linux/errno.h>
/**
* struct of_endpoint - the OF graph endpoint data structure
diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h
index ffbe4707d4aa..bd02b44902d0 100644
--- a/include/linux/of_iommu.h
+++ b/include/linux/of_iommu.h
@@ -12,7 +12,7 @@ extern int of_get_dma_window(struct device_node *dn, const char *prefix,
size_t *size);
extern void of_iommu_init(void);
-extern struct iommu_ops *of_iommu_configure(struct device *dev,
+extern const struct iommu_ops *of_iommu_configure(struct device *dev,
struct device_node *master_np);
#else
@@ -25,7 +25,7 @@ static inline int of_get_dma_window(struct device_node *dn, const char *prefix,
}
static inline void of_iommu_init(void) { }
-static inline struct iommu_ops *of_iommu_configure(struct device *dev,
+static inline const struct iommu_ops *of_iommu_configure(struct device *dev,
struct device_node *master_np)
{
return NULL;
@@ -33,8 +33,8 @@ static inline struct iommu_ops *of_iommu_configure(struct device *dev,
#endif /* CONFIG_OF_IOMMU */
-void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops);
-struct iommu_ops *of_iommu_get_ops(struct device_node *np);
+void of_iommu_set_ops(struct device_node *np, const struct iommu_ops *ops);
+const struct iommu_ops *of_iommu_get_ops(struct device_node *np);
extern struct of_device_id __iommu_of_table;
diff --git a/include/linux/of_mtd.h b/include/linux/of_mtd.h
deleted file mode 100644
index e266caa36402..000000000000
--- a/include/linux/of_mtd.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright 2012 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
- *
- * OF helpers for mtd.
- *
- * This file is released under the GPLv2
- */
-
-#ifndef __LINUX_OF_MTD_H
-#define __LINUX_OF_MTD_H
-
-#ifdef CONFIG_OF_MTD
-
-#include <linux/of.h>
-int of_get_nand_ecc_mode(struct device_node *np);
-int of_get_nand_ecc_step_size(struct device_node *np);
-int of_get_nand_ecc_strength(struct device_node *np);
-int of_get_nand_bus_width(struct device_node *np);
-bool of_get_nand_on_flash_bbt(struct device_node *np);
-
-#else /* CONFIG_OF_MTD */
-
-static inline int of_get_nand_ecc_mode(struct device_node *np)
-{
- return -ENOSYS;
-}
-
-static inline int of_get_nand_ecc_step_size(struct device_node *np)
-{
- return -ENOSYS;
-}
-
-static inline int of_get_nand_ecc_strength(struct device_node *np)
-{
- return -ENOSYS;
-}
-
-static inline int of_get_nand_bus_width(struct device_node *np)
-{
- return -ENOSYS;
-}
-
-static inline bool of_get_nand_on_flash_bbt(struct device_node *np)
-{
- return false;
-}
-
-#endif /* CONFIG_OF_MTD */
-
-#endif /* __LINUX_OF_MTD_H */
diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h
index d833eb4dd446..9e9d79e8efa5 100644
--- a/include/linux/omap-gpmc.h
+++ b/include/linux/omap-gpmc.h
@@ -7,161 +7,53 @@
* option) any later version.
*/
-/* Maximum Number of Chip Selects */
-#define GPMC_CS_NUM 8
+#include <linux/platform_data/gpmc-omap.h>
#define GPMC_CONFIG_WP 0x00000005
-#define GPMC_IRQ_FIFOEVENTENABLE 0x01
-#define GPMC_IRQ_COUNT_EVENT 0x02
-
-#define GPMC_BURST_4 4 /* 4 word burst */
-#define GPMC_BURST_8 8 /* 8 word burst */
-#define GPMC_BURST_16 16 /* 16 word burst */
-#define GPMC_DEVWIDTH_8BIT 1 /* 8-bit device width */
-#define GPMC_DEVWIDTH_16BIT 2 /* 16-bit device width */
-#define GPMC_MUX_AAD 1 /* Addr-Addr-Data multiplex */
-#define GPMC_MUX_AD 2 /* Addr-Data multiplex */
-
-/* bool type time settings */
-struct gpmc_bool_timings {
- bool cycle2cyclediffcsen;
- bool cycle2cyclesamecsen;
- bool we_extra_delay;
- bool oe_extra_delay;
- bool adv_extra_delay;
- bool cs_extra_delay;
- bool time_para_granularity;
-};
+/* IRQ numbers in GPMC IRQ domain for legacy boot use */
+#define GPMC_IRQ_FIFOEVENTENABLE 0
+#define GPMC_IRQ_COUNT_EVENT 1
-/*
- * Note that all values in this struct are in nanoseconds except sync_clk
- * (which is in picoseconds), while the register values are in gpmc_fck cycles.
+/**
+ * gpmc_nand_ops - Interface between NAND and GPMC
+ * @nand_write_buffer_empty: get the NAND write buffer empty status.
*/
-struct gpmc_timings {
- /* Minimum clock period for synchronous mode (in picoseconds) */
- u32 sync_clk;
-
- /* Chip-select signal timings corresponding to GPMC_CS_CONFIG2 */
- u32 cs_on; /* Assertion time */
- u32 cs_rd_off; /* Read deassertion time */
- u32 cs_wr_off; /* Write deassertion time */
-
- /* ADV signal timings corresponding to GPMC_CONFIG3 */
- u32 adv_on; /* Assertion time */
- u32 adv_rd_off; /* Read deassertion time */
- u32 adv_wr_off; /* Write deassertion time */
- u32 adv_aad_mux_on; /* ADV assertion time for AAD */
- u32 adv_aad_mux_rd_off; /* ADV read deassertion time for AAD */
- u32 adv_aad_mux_wr_off; /* ADV write deassertion time for AAD */
-
- /* WE signals timings corresponding to GPMC_CONFIG4 */
- u32 we_on; /* WE assertion time */
- u32 we_off; /* WE deassertion time */
-
- /* OE signals timings corresponding to GPMC_CONFIG4 */
- u32 oe_on; /* OE assertion time */
- u32 oe_off; /* OE deassertion time */
- u32 oe_aad_mux_on; /* OE assertion time for AAD */
- u32 oe_aad_mux_off; /* OE deassertion time for AAD */
-
- /* Access time and cycle time timings corresponding to GPMC_CONFIG5 */
- u32 page_burst_access; /* Multiple access word delay */
- u32 access; /* Start-cycle to first data valid delay */
- u32 rd_cycle; /* Total read cycle time */
- u32 wr_cycle; /* Total write cycle time */
-
- u32 bus_turnaround;
- u32 cycle2cycle_delay;
-
- u32 wait_monitoring;
- u32 clk_activation;
-
- /* The following are only on OMAP3430 */
- u32 wr_access; /* WRACCESSTIME */
- u32 wr_data_mux_bus; /* WRDATAONADMUXBUS */
-
- struct gpmc_bool_timings bool_timings;
+struct gpmc_nand_ops {
+ bool (*nand_writebuffer_empty)(void);
};
-/* Device timings in picoseconds */
-struct gpmc_device_timings {
- u32 t_ceasu; /* address setup to CS valid */
- u32 t_avdasu; /* address setup to ADV valid */
- /* XXX: try to combine t_avdp_r & t_avdp_w. Issue is
- * of tusb using these timings even for sync whilst
- * ideally for adv_rd/(wr)_off it should have considered
- * t_avdh instead. This indirectly necessitates r/w
- * variations of t_avdp as it is possible to have one
- * sync & other async
- */
- u32 t_avdp_r; /* ADV low time (what about t_cer ?) */
- u32 t_avdp_w;
- u32 t_aavdh; /* address hold time */
- u32 t_oeasu; /* address setup to OE valid */
- u32 t_aa; /* access time from ADV assertion */
- u32 t_iaa; /* initial access time */
- u32 t_oe; /* access time from OE assertion */
- u32 t_ce; /* access time from CS asertion */
- u32 t_rd_cycle; /* read cycle time */
- u32 t_cez_r; /* read CS deassertion to high Z */
- u32 t_cez_w; /* write CS deassertion to high Z */
- u32 t_oez; /* OE deassertion to high Z */
- u32 t_weasu; /* address setup to WE valid */
- u32 t_wpl; /* write assertion time */
- u32 t_wph; /* write deassertion time */
- u32 t_wr_cycle; /* write cycle time */
-
- u32 clk;
- u32 t_bacc; /* burst access valid clock to output delay */
- u32 t_ces; /* CS setup time to clk */
- u32 t_avds; /* ADV setup time to clk */
- u32 t_avdh; /* ADV hold time from clk */
- u32 t_ach; /* address hold time from clk */
- u32 t_rdyo; /* clk to ready valid */
-
- u32 t_ce_rdyz; /* XXX: description ?, or use t_cez instead */
- u32 t_ce_avd; /* CS on to ADV on delay */
-
- /* XXX: check the possibility of combining
- * cyc_aavhd_oe & cyc_aavdh_we
- */
- u8 cyc_aavdh_oe;/* read address hold time in cycles */
- u8 cyc_aavdh_we;/* write address hold time in cycles */
- u8 cyc_oe; /* access time from OE assertion in cycles */
- u8 cyc_wpl; /* write deassertion time in cycles */
- u32 cyc_iaa; /* initial access time in cycles */
-
- /* extra delays */
- bool ce_xdelay;
- bool avd_xdelay;
- bool oe_xdelay;
- bool we_xdelay;
-};
+struct gpmc_nand_regs;
-struct gpmc_settings {
- bool burst_wrap; /* enables wrap bursting */
- bool burst_read; /* enables read page/burst mode */
- bool burst_write; /* enables write page/burst mode */
- bool device_nand; /* device is NAND */
- bool sync_read; /* enables synchronous reads */
- bool sync_write; /* enables synchronous writes */
- bool wait_on_read; /* monitor wait on reads */
- bool wait_on_write; /* monitor wait on writes */
- u32 burst_len; /* page/burst length */
- u32 device_width; /* device bus width (8 or 16 bit) */
- u32 mux_add_data; /* multiplex address & data */
- u32 wait_pin; /* wait-pin to be used */
-};
+#if IS_ENABLED(CONFIG_OMAP_GPMC)
+struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs,
+ int cs);
+#else
+static inline gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs,
+ int cs)
+{
+ return NULL;
+}
+#endif /* CONFIG_OMAP_GPMC */
+
+/*--------------------------------*/
+
+/* deprecated APIs */
+#if IS_ENABLED(CONFIG_OMAP_GPMC)
+void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs);
+#else
+static inline void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs)
+{
+}
+#endif /* CONFIG_OMAP_GPMC */
+/*--------------------------------*/
extern int gpmc_calc_timings(struct gpmc_timings *gpmc_t,
struct gpmc_settings *gpmc_s,
struct gpmc_device_timings *dev_t);
-struct gpmc_nand_regs;
struct device_node;
-extern void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs);
extern int gpmc_get_client_irq(unsigned irq_config);
extern unsigned int gpmc_ticks_to_ns(unsigned int ticks);
diff --git a/include/linux/omap-mailbox.h b/include/linux/omap-mailbox.h
index 587bbdd31f5a..c726bd833761 100644
--- a/include/linux/omap-mailbox.h
+++ b/include/linux/omap-mailbox.h
@@ -21,8 +21,6 @@ struct mbox_client;
struct mbox_chan *omap_mbox_request_channel(struct mbox_client *cl,
const char *chan_name);
-void omap_mbox_save_ctx(struct mbox_chan *chan);
-void omap_mbox_restore_ctx(struct mbox_chan *chan);
void omap_mbox_enable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq);
void omap_mbox_disable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq);
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 628a43242a34..83469522690a 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -50,28 +50,33 @@ enum oom_scan_t {
OOM_SCAN_SELECT, /* always select this thread first */
};
-/* Thread is the potential origin of an oom condition; kill first on oom */
-#define OOM_FLAG_ORIGIN ((__force oom_flags_t)0x1)
-
extern struct mutex oom_lock;
static inline void set_current_oom_origin(void)
{
- current->signal->oom_flags |= OOM_FLAG_ORIGIN;
+ current->signal->oom_flag_origin = true;
}
static inline void clear_current_oom_origin(void)
{
- current->signal->oom_flags &= ~OOM_FLAG_ORIGIN;
+ current->signal->oom_flag_origin = false;
}
static inline bool oom_task_origin(const struct task_struct *p)
{
- return !!(p->signal->oom_flags & OOM_FLAG_ORIGIN);
+ return p->signal->oom_flag_origin;
}
extern void mark_oom_victim(struct task_struct *tsk);
+#ifdef CONFIG_MMU
+extern void try_oom_reaper(struct task_struct *tsk);
+#else
+static inline void try_oom_reaper(struct task_struct *tsk)
+{
+}
+#endif
+
extern unsigned long oom_badness(struct task_struct *p,
struct mem_cgroup *memcg, const nodemask_t *nodemask,
unsigned long totalpages);
@@ -102,13 +107,24 @@ extern struct task_struct *find_lock_task_mm(struct task_struct *p);
static inline bool task_will_free_mem(struct task_struct *task)
{
+ struct signal_struct *sig = task->signal;
+
/*
* A coredumping process may sleep for an extended period in exit_mm(),
* so the oom killer cannot assume that the process will promptly exit
* and release memory.
*/
- return (task->flags & PF_EXITING) &&
- !(task->signal->flags & SIGNAL_GROUP_COREDUMP);
+ if (sig->flags & SIGNAL_GROUP_COREDUMP)
+ return false;
+
+ if (!(task->flags & PF_EXITING))
+ return false;
+
+ /* Make sure that the whole thread group is going down */
+ if (!thread_group_empty(task) && !(sig->flags & SIGNAL_GROUP_EXIT))
+ return false;
+
+ return true;
}
/* sysctls */
diff --git a/include/linux/padata.h b/include/linux/padata.h
index 438694650471..113ee626a4dc 100644
--- a/include/linux/padata.h
+++ b/include/linux/padata.h
@@ -175,11 +175,6 @@ extern int padata_do_parallel(struct padata_instance *pinst,
extern void padata_do_serial(struct padata_priv *padata);
extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
cpumask_var_t cpumask);
-extern int padata_set_cpumasks(struct padata_instance *pinst,
- cpumask_var_t pcpumask,
- cpumask_var_t cbcpumask);
-extern int padata_add_cpu(struct padata_instance *pinst, int cpu, int mask);
-extern int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask);
extern int padata_start(struct padata_instance *pinst);
extern void padata_stop(struct padata_instance *pinst);
extern int padata_register_cpumask_notifier(struct padata_instance *pinst,
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index f4ed4f1b0c77..e5a32445f930 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -371,10 +371,15 @@ PAGEFLAG(Idle, idle, PF_ANY)
#define PAGE_MAPPING_KSM 2
#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
+static __always_inline int PageAnonHead(struct page *page)
+{
+ return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
+}
+
static __always_inline int PageAnon(struct page *page)
{
page = compound_head(page);
- return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
+ return PageAnonHead(page);
}
#ifdef CONFIG_KSM
@@ -474,7 +479,7 @@ static inline void ClearPageCompound(struct page *page)
}
#endif
-#define PG_head_mask ((1L << PG_head))
+#define PG_head_mask ((1UL << PG_head))
#ifdef CONFIG_HUGETLB_PAGE
int PageHuge(struct page *page);
@@ -517,6 +522,27 @@ static inline int PageTransCompound(struct page *page)
}
/*
+ * PageTransCompoundMap is the same as PageTransCompound, but it also
+ * guarantees the primary MMU has the entire compound page mapped
+ * through pmd_trans_huge, which in turn guarantees the secondary MMUs
+ * can also map the entire compound page. This allows the secondary
+ * MMUs to call get_user_pages() only once for each compound page and
+ * to immediately map the entire compound page with a single secondary
+ * MMU fault. If there will be a pmd split later, the secondary MMUs
+ * will get an update through the MMU notifier invalidation through
+ * split_huge_pmd().
+ *
+ * Unlike PageTransCompound, this is safe to be called only while
+ * split_huge_pmd() cannot run from under us, like if protected by the
+ * MMU notifier, otherwise it may result in page->_mapcount < 0 false
+ * positives.
+ */
+static inline int PageTransCompoundMap(struct page *page)
+{
+ return PageTransCompound(page) && atomic_read(&page->_mapcount) < 0;
+}
+
+/*
* PageTransTail returns true for both transparent huge pages
* and hugetlbfs pages, so it should only be called when it's known
* that hugetlbfs pages aren't involved.
@@ -559,6 +585,7 @@ static inline int TestClearPageDoubleMap(struct page *page)
#else
TESTPAGEFLAG_FALSE(TransHuge)
TESTPAGEFLAG_FALSE(TransCompound)
+TESTPAGEFLAG_FALSE(TransCompoundMap)
TESTPAGEFLAG_FALSE(TransTail)
TESTPAGEFLAG_FALSE(DoubleMap)
TESTSETFLAG_FALSE(DoubleMap)
@@ -643,7 +670,7 @@ static inline void ClearPageSlabPfmemalloc(struct page *page)
}
#ifdef CONFIG_MMU
-#define __PG_MLOCKED (1 << PG_mlocked)
+#define __PG_MLOCKED (1UL << PG_mlocked)
#else
#define __PG_MLOCKED 0
#endif
@@ -653,11 +680,11 @@ static inline void ClearPageSlabPfmemalloc(struct page *page)
* these flags set. It they are, there is a problem.
*/
#define PAGE_FLAGS_CHECK_AT_FREE \
- (1 << PG_lru | 1 << PG_locked | \
- 1 << PG_private | 1 << PG_private_2 | \
- 1 << PG_writeback | 1 << PG_reserved | \
- 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \
- 1 << PG_unevictable | __PG_MLOCKED)
+ (1UL << PG_lru | 1UL << PG_locked | \
+ 1UL << PG_private | 1UL << PG_private_2 | \
+ 1UL << PG_writeback | 1UL << PG_reserved | \
+ 1UL << PG_slab | 1UL << PG_swapcache | 1UL << PG_active | \
+ 1UL << PG_unevictable | __PG_MLOCKED)
/*
* Flags checked when a page is prepped for return by the page allocator.
@@ -668,10 +695,10 @@ static inline void ClearPageSlabPfmemalloc(struct page *page)
* alloc-free cycle to prevent from reusing the page.
*/
#define PAGE_FLAGS_CHECK_AT_PREP \
- (((1 << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
+ (((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
#define PAGE_FLAGS_PRIVATE \
- (1 << PG_private | 1 << PG_private_2)
+ (1UL << PG_private | 1UL << PG_private_2)
/**
* page_has_private - Determine if page has private stuff
* @page: The page to be checked
diff --git a/include/linux/page_idle.h b/include/linux/page_idle.h
index bf268fa92c5b..fec40271339f 100644
--- a/include/linux/page_idle.h
+++ b/include/linux/page_idle.h
@@ -46,33 +46,62 @@ extern struct page_ext_operations page_idle_ops;
static inline bool page_is_young(struct page *page)
{
- return test_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags);
+ struct page_ext *page_ext = lookup_page_ext(page);
+
+ if (unlikely(!page_ext))
+ return false;
+
+ return test_bit(PAGE_EXT_YOUNG, &page_ext->flags);
}
static inline void set_page_young(struct page *page)
{
- set_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags);
+ struct page_ext *page_ext = lookup_page_ext(page);
+
+ if (unlikely(!page_ext))
+ return;
+
+ set_bit(PAGE_EXT_YOUNG, &page_ext->flags);
}
static inline bool test_and_clear_page_young(struct page *page)
{
- return test_and_clear_bit(PAGE_EXT_YOUNG,
- &lookup_page_ext(page)->flags);
+ struct page_ext *page_ext = lookup_page_ext(page);
+
+ if (unlikely(!page_ext))
+ return false;
+
+ return test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags);
}
static inline bool page_is_idle(struct page *page)
{
- return test_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
+ struct page_ext *page_ext = lookup_page_ext(page);
+
+ if (unlikely(!page_ext))
+ return false;
+
+ return test_bit(PAGE_EXT_IDLE, &page_ext->flags);
}
static inline void set_page_idle(struct page *page)
{
- set_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
+ struct page_ext *page_ext = lookup_page_ext(page);
+
+ if (unlikely(!page_ext))
+ return;
+
+ set_bit(PAGE_EXT_IDLE, &page_ext->flags);
}
static inline void clear_page_idle(struct page *page)
{
- clear_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
+ struct page_ext *page_ext = lookup_page_ext(page);
+
+ if (unlikely(!page_ext))
+ return;
+
+ clear_bit(PAGE_EXT_IDLE, &page_ext->flags);
}
#endif /* CONFIG_64BIT */
diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h
index e596d5d9540e..8b5e0a9f2431 100644
--- a/include/linux/page_ref.h
+++ b/include/linux/page_ref.h
@@ -63,17 +63,17 @@ static inline void __page_ref_unfreeze(struct page *page, int v)
static inline int page_ref_count(struct page *page)
{
- return atomic_read(&page->_count);
+ return atomic_read(&page->_refcount);
}
static inline int page_count(struct page *page)
{
- return atomic_read(&compound_head(page)->_count);
+ return atomic_read(&compound_head(page)->_refcount);
}
static inline void set_page_count(struct page *page, int v)
{
- atomic_set(&page->_count, v);
+ atomic_set(&page->_refcount, v);
if (page_ref_tracepoint_active(__tracepoint_page_ref_set))
__page_ref_set(page, v);
}
@@ -89,35 +89,35 @@ static inline void init_page_count(struct page *page)
static inline void page_ref_add(struct page *page, int nr)
{
- atomic_add(nr, &page->_count);
+ atomic_add(nr, &page->_refcount);
if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
__page_ref_mod(page, nr);
}
static inline void page_ref_sub(struct page *page, int nr)
{
- atomic_sub(nr, &page->_count);
+ atomic_sub(nr, &page->_refcount);
if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
__page_ref_mod(page, -nr);
}
static inline void page_ref_inc(struct page *page)
{
- atomic_inc(&page->_count);
+ atomic_inc(&page->_refcount);
if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
__page_ref_mod(page, 1);
}
static inline void page_ref_dec(struct page *page)
{
- atomic_dec(&page->_count);
+ atomic_dec(&page->_refcount);
if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
__page_ref_mod(page, -1);
}
static inline int page_ref_sub_and_test(struct page *page, int nr)
{
- int ret = atomic_sub_and_test(nr, &page->_count);
+ int ret = atomic_sub_and_test(nr, &page->_refcount);
if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test))
__page_ref_mod_and_test(page, -nr, ret);
@@ -126,7 +126,7 @@ static inline int page_ref_sub_and_test(struct page *page, int nr)
static inline int page_ref_dec_and_test(struct page *page)
{
- int ret = atomic_dec_and_test(&page->_count);
+ int ret = atomic_dec_and_test(&page->_refcount);
if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test))
__page_ref_mod_and_test(page, -1, ret);
@@ -135,7 +135,7 @@ static inline int page_ref_dec_and_test(struct page *page)
static inline int page_ref_dec_return(struct page *page)
{
- int ret = atomic_dec_return(&page->_count);
+ int ret = atomic_dec_return(&page->_refcount);
if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return))
__page_ref_mod_and_return(page, -1, ret);
@@ -144,7 +144,7 @@ static inline int page_ref_dec_return(struct page *page)
static inline int page_ref_add_unless(struct page *page, int nr, int u)
{
- int ret = atomic_add_unless(&page->_count, nr, u);
+ int ret = atomic_add_unless(&page->_refcount, nr, u);
if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_unless))
__page_ref_mod_unless(page, nr, ret);
@@ -153,7 +153,7 @@ static inline int page_ref_add_unless(struct page *page, int nr, int u)
static inline int page_ref_freeze(struct page *page, int count)
{
- int ret = likely(atomic_cmpxchg(&page->_count, count, 0) == count);
+ int ret = likely(atomic_cmpxchg(&page->_refcount, count, 0) == count);
if (page_ref_tracepoint_active(__tracepoint_page_ref_freeze))
__page_ref_freeze(page, count, ret);
@@ -165,7 +165,7 @@ static inline void page_ref_unfreeze(struct page *page, int count)
VM_BUG_ON_PAGE(page_count(page) != 0, page);
VM_BUG_ON(count == 0);
- atomic_set(&page->_count, count);
+ atomic_set(&page->_refcount, count);
if (page_ref_tracepoint_active(__tracepoint_page_ref_unfreeze))
__page_ref_unfreeze(page, count);
}
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 7e1ab155c67c..97354102794d 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -90,12 +90,12 @@ void release_pages(struct page **pages, int nr, bool cold);
/*
* speculatively take a reference to a page.
- * If the page is free (_count == 0), then _count is untouched, and 0
- * is returned. Otherwise, _count is incremented by 1 and 1 is returned.
+ * If the page is free (_refcount == 0), then _refcount is untouched, and 0
+ * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned.
*
* This function must be called inside the same rcu_read_lock() section as has
* been used to lookup the page in the pagecache radix-tree (or page table):
- * this allows allocators to use a synchronize_rcu() to stabilize _count.
+ * this allows allocators to use a synchronize_rcu() to stabilize _refcount.
*
* Unless an RCU grace period has passed, the count of all pages coming out
* of the allocator must be considered unstable. page_count may return higher
@@ -111,7 +111,7 @@ void release_pages(struct page **pages, int nr, bool cold);
* 2. conditionally increment refcount
* 3. check the page is still in pagecache (if no, goto 1)
*
- * Remove-side that cares about stability of _count (eg. reclaim) has the
+ * Remove-side that cares about stability of _refcount (eg. reclaim) has the
* following (with tree_lock held for write):
* A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
* B. remove page from pagecache
@@ -518,33 +518,27 @@ void page_endio(struct page *page, int rw, int err);
extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
/*
- * Fault a userspace page into pagetables. Return non-zero on a fault.
- *
- * This assumes that two userspace pages are always sufficient.
+ * Fault one or two userspace pages into pagetables.
+ * Return -EINVAL if more than two pages would be needed.
+ * Return non-zero on a fault.
*/
static inline int fault_in_pages_writeable(char __user *uaddr, int size)
{
- int ret;
+ int span, ret;
if (unlikely(size == 0))
return 0;
+ span = offset_in_page(uaddr) + size;
+ if (span > 2 * PAGE_SIZE)
+ return -EINVAL;
/*
* Writing zeroes into userspace here is OK, because we know that if
* the zero gets there, we'll be overwriting it.
*/
ret = __put_user(0, uaddr);
- if (ret == 0) {
- char __user *end = uaddr + size - 1;
-
- /*
- * If the page was already mapped, this will get a cache miss
- * for sure, so try to avoid doing it.
- */
- if (((unsigned long)uaddr & PAGE_MASK) !=
- ((unsigned long)end & PAGE_MASK))
- ret = __put_user(0, end);
- }
+ if (ret == 0 && span > PAGE_SIZE)
+ ret = __put_user(0, uaddr + size - 1);
return ret;
}
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 932ec74909c6..b67e4df20801 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -166,8 +166,6 @@ enum pci_dev_flags {
PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) (1 << 2),
/* Flag for quirk use to store if quirk-specific ACS is enabled */
PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = (__force pci_dev_flags_t) (1 << 3),
- /* Flag to indicate the device uses dma_alias_devfn */
- PCI_DEV_FLAGS_DMA_ALIAS_DEVFN = (__force pci_dev_flags_t) (1 << 4),
/* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */
PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5),
/* Do not use bus resets for device */
@@ -273,7 +271,7 @@ struct pci_dev {
u8 rom_base_reg; /* which config register controls the ROM */
u8 pin; /* which interrupt pin this device uses */
u16 pcie_flags_reg; /* cached PCIe Capabilities Register */
- u8 dma_alias_devfn;/* devfn of DMA alias, if any */
+ unsigned long *dma_alias_mask;/* mask of enabled devfn aliases */
struct pci_driver *driver; /* which driver has allocated this device */
u64 dma_mask; /* Mask of the bits of bus address this
@@ -1165,6 +1163,9 @@ int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
void *alignf_data);
+int pci_register_io_range(phys_addr_t addr, resource_size_t size);
+unsigned long pci_address_to_pio(phys_addr_t addr);
+phys_addr_t pci_pio_to_address(unsigned long pio);
int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
@@ -1481,6 +1482,8 @@ static inline int pci_request_regions(struct pci_dev *dev, const char *res_name)
{ return -EIO; }
static inline void pci_release_regions(struct pci_dev *dev) { }
+static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; }
+
static inline void pci_block_cfg_access(struct pci_dev *dev) { }
static inline int pci_block_cfg_access_in_atomic(struct pci_dev *dev)
{ return 0; }
@@ -1664,7 +1667,7 @@ enum pci_fixup_pass {
#ifdef CONFIG_PCI_QUIRKS
void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags);
-void pci_dev_specific_enable_acs(struct pci_dev *dev);
+int pci_dev_specific_enable_acs(struct pci_dev *dev);
#else
static inline void pci_fixup_device(enum pci_fixup_pass pass,
struct pci_dev *dev) { }
@@ -1673,7 +1676,10 @@ static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev,
{
return -ENOTTY;
}
-static inline void pci_dev_specific_enable_acs(struct pci_dev *dev) { }
+static inline int pci_dev_specific_enable_acs(struct pci_dev *dev)
+{
+ return -ENOTTY;
+}
#endif
void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
@@ -1989,6 +1995,8 @@ static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev)
}
#endif
+void pci_add_dma_alias(struct pci_dev *dev, u8 devfn);
+bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2);
int pci_for_each_dma_alias(struct pci_dev *pdev,
int (*fn)(struct pci_dev *pdev,
u16 alias, void *data), void *data);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 247da8c95860..c58752fe16c4 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2604,6 +2604,24 @@
#define PCI_DEVICE_ID_INTEL_82441 0x1237
#define PCI_DEVICE_ID_INTEL_82380FB 0x124b
#define PCI_DEVICE_ID_INTEL_82439 0x1250
+#define PCI_DEVICE_ID_INTEL_LIGHT_RIDGE 0x1513 /* Tbt 1 Gen 1 */
+#define PCI_DEVICE_ID_INTEL_EAGLE_RIDGE 0x151a
+#define PCI_DEVICE_ID_INTEL_LIGHT_PEAK 0x151b
+#define PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C 0x1547 /* Tbt 1 Gen 2 */
+#define PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C 0x1548
+#define PCI_DEVICE_ID_INTEL_PORT_RIDGE 0x1549
+#define PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_NHI 0x1566 /* Tbt 1 Gen 3 */
+#define PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE 0x1567
+#define PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_NHI 0x1568
+#define PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE 0x1569
+#define PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI 0x156a /* Thunderbolt 2 */
+#define PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE 0x156b
+#define PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI 0x156c
+#define PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE 0x156d
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI 0x1575 /* Thunderbolt 3 */
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE 0x1576
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI 0x1577
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE 0x1578
#define PCI_DEVICE_ID_INTEL_80960_RP 0x1960
#define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21
#define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30
diff --git a/include/linux/pcieport_if.h b/include/linux/pcieport_if.h
index 4f1089f2cc98..afcd130ab3a9 100644
--- a/include/linux/pcieport_if.h
+++ b/include/linux/pcieport_if.h
@@ -21,6 +21,8 @@
#define PCIE_PORT_SERVICE_HP (1 << PCIE_PORT_SERVICE_HP_SHIFT)
#define PCIE_PORT_SERVICE_VC_SHIFT 3 /* Virtual Channel */
#define PCIE_PORT_SERVICE_VC (1 << PCIE_PORT_SERVICE_VC_SHIFT)
+#define PCIE_PORT_SERVICE_DPC_SHIFT 4 /* Downstream Port Containment */
+#define PCIE_PORT_SERVICE_DPC (1 << PCIE_PORT_SERVICE_DPC_SHIFT)
struct pcie_device {
int irq; /* Service IRQ/MSI/MSI-X Vector */
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 4bc6dafb703e..56939d3f6e53 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -129,7 +129,4 @@ extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
(typeof(type) __percpu *)__alloc_percpu(sizeof(type), \
__alignof__(type))
-/* To avoid include hell, as printk can not declare this, we declare it here */
-DECLARE_PER_CPU(printk_func_t, printk_func);
-
#endif /* __LINUX_PERCPU_H */
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 4196c90a3c88..d28ac05c7f92 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -105,6 +105,8 @@ struct arm_pmu {
struct mutex reserve_mutex;
u64 max_period;
bool secure_access; /* 32-bit ARM only */
+#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
+ DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
struct platform_device *plat_device;
struct pmu_hw_events __percpu *hw_events;
struct notifier_block hotplug_nb;
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index f291275ffd71..1a827cecd62f 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -58,7 +58,15 @@ struct perf_guest_info_callbacks {
struct perf_callchain_entry {
__u64 nr;
- __u64 ip[PERF_MAX_STACK_DEPTH];
+ __u64 ip[0]; /* /proc/sys/kernel/perf_event_max_stack */
+};
+
+struct perf_callchain_entry_ctx {
+ struct perf_callchain_entry *entry;
+ u32 max_stack;
+ u32 nr;
+ short contexts;
+ bool contexts_maxed;
};
struct perf_raw_record {
@@ -151,6 +159,15 @@ struct hw_perf_event {
*/
struct task_struct *target;
+ /*
+ * PMU would store hardware filter configuration
+ * here.
+ */
+ void *addr_filters;
+
+ /* Last sync'ed generation of filters */
+ unsigned long addr_filters_gen;
+
/*
* hw_perf_event::state flags; used to track the PERF_EF_* state.
*/
@@ -216,6 +233,7 @@ struct perf_event;
#define PERF_PMU_CAP_AUX_SW_DOUBLEBUF 0x08
#define PERF_PMU_CAP_EXCLUSIVE 0x10
#define PERF_PMU_CAP_ITRACE 0x20
+#define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x40
/**
* struct pmu - generic performance monitoring unit
@@ -240,6 +258,9 @@ struct pmu {
int task_ctx_nr;
int hrtimer_interval_ms;
+ /* number of address filters this PMU can do */
+ unsigned int nr_addr_filters;
+
/*
* Fully disable/enable this PMU, can be used to protect from the PMI
* as well as for lazy/batch writing of the MSRs.
@@ -393,12 +414,71 @@ struct pmu {
void (*free_aux) (void *aux); /* optional */
/*
+ * Validate address range filters: make sure the HW supports the
+ * requested configuration and number of filters; return 0 if the
+ * supplied filters are valid, -errno otherwise.
+ *
+ * Runs in the context of the ioctl()ing process and is not serialized
+ * with the rest of the PMU callbacks.
+ */
+ int (*addr_filters_validate) (struct list_head *filters);
+ /* optional */
+
+ /*
+ * Synchronize address range filter configuration:
+ * translate hw-agnostic filters into hardware configuration in
+ * event::hw::addr_filters.
+ *
+ * Runs as a part of filter sync sequence that is done in ->start()
+ * callback by calling perf_event_addr_filters_sync().
+ *
+ * May (and should) traverse event::addr_filters::list, for which its
+ * caller provides necessary serialization.
+ */
+ void (*addr_filters_sync) (struct perf_event *event);
+ /* optional */
+
+ /*
* Filter events for PMU-specific reasons.
*/
int (*filter_match) (struct perf_event *event); /* optional */
};
/**
+ * struct perf_addr_filter - address range filter definition
+ * @entry: event's filter list linkage
+ * @inode: object file's inode for file-based filters
+ * @offset: filter range offset
+ * @size: filter range size
+ * @range: 1: range, 0: address
+ * @filter: 1: filter/start, 0: stop
+ *
+ * This is a hardware-agnostic filter configuration as specified by the user.
+ */
+struct perf_addr_filter {
+ struct list_head entry;
+ struct inode *inode;
+ unsigned long offset;
+ unsigned long size;
+ unsigned int range : 1,
+ filter : 1;
+};
+
+/**
+ * struct perf_addr_filters_head - container for address range filters
+ * @list: list of filters for this event
+ * @lock: spinlock that serializes accesses to the @list and event's
+ * (and its children's) filter generations.
+ *
+ * A child event will use parent's @list (and therefore @lock), so they are
+ * bundled together; see perf_event_addr_filters().
+ */
+struct perf_addr_filters_head {
+ struct list_head list;
+ raw_spinlock_t lock;
+};
+
+/**
* enum perf_event_active_state - the states of a event
*/
enum perf_event_active_state {
@@ -566,6 +646,12 @@ struct perf_event {
atomic_t event_limit;
+ /* address range filters */
+ struct perf_addr_filters_head addr_filters;
+ /* vma address array for file-based filders */
+ unsigned long *addr_filters_offs;
+ unsigned long addr_filters_gen;
+
void (*destroy)(struct perf_event *);
struct rcu_head rcu_head;
@@ -834,9 +920,25 @@ extern int perf_event_overflow(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs);
+extern void perf_event_output_forward(struct perf_event *event,
+ struct perf_sample_data *data,
+ struct pt_regs *regs);
+extern void perf_event_output_backward(struct perf_event *event,
+ struct perf_sample_data *data,
+ struct pt_regs *regs);
extern void perf_event_output(struct perf_event *event,
- struct perf_sample_data *data,
- struct pt_regs *regs);
+ struct perf_sample_data *data,
+ struct pt_regs *regs);
+
+static inline bool
+is_default_overflow_handler(struct perf_event *event)
+{
+ if (likely(event->overflow_handler == perf_event_output_forward))
+ return true;
+ if (unlikely(event->overflow_handler == perf_event_output_backward))
+ return true;
+ return false;
+}
extern void
perf_event_header__init_id(struct perf_event_header *header,
@@ -882,8 +984,6 @@ static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned lo
*/
static inline void perf_fetch_caller_regs(struct pt_regs *regs)
{
- memset(regs, 0, sizeof(*regs));
-
perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
}
@@ -969,18 +1069,36 @@ extern void perf_event_fork(struct task_struct *tsk);
/* Callchains */
DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
-extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs);
-extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs);
+extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
+extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
extern struct perf_callchain_entry *
get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
- bool crosstask, bool add_mark);
+ u32 max_stack, bool crosstask, bool add_mark);
extern int get_callchain_buffers(void);
extern void put_callchain_buffers(void);
-static inline int perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
+extern int sysctl_perf_event_max_stack;
+extern int sysctl_perf_event_max_contexts_per_stack;
+
+static inline int perf_callchain_store_context(struct perf_callchain_entry_ctx *ctx, u64 ip)
+{
+ if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) {
+ struct perf_callchain_entry *entry = ctx->entry;
+ entry->ip[entry->nr++] = ip;
+ ++ctx->contexts;
+ return 0;
+ } else {
+ ctx->contexts_maxed = true;
+ return -1; /* no more room, stop walking the stack */
+ }
+}
+
+static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64 ip)
{
- if (entry->nr < PERF_MAX_STACK_DEPTH) {
+ if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) {
+ struct perf_callchain_entry *entry = ctx->entry;
entry->ip[entry->nr++] = ip;
+ ++ctx->nr;
return 0;
} else {
return -1; /* no more room, stop walking the stack */
@@ -1001,6 +1119,8 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
+int perf_event_max_stack_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
static inline bool perf_paranoid_tracepoint_raw(void)
{
@@ -1018,7 +1138,7 @@ static inline bool perf_paranoid_kernel(void)
}
extern void perf_event_init(void);
-extern void perf_tp_event(u64 addr, u64 count, void *record,
+extern void perf_tp_event(u16 event_type, u64 count, void *record,
int entry_size, struct pt_regs *regs,
struct hlist_head *head, int rctx,
struct task_struct *task);
@@ -1045,8 +1165,41 @@ static inline bool has_aux(struct perf_event *event)
return event->pmu->setup_aux;
}
+static inline bool is_write_backward(struct perf_event *event)
+{
+ return !!event->attr.write_backward;
+}
+
+static inline bool has_addr_filter(struct perf_event *event)
+{
+ return event->pmu->nr_addr_filters;
+}
+
+/*
+ * An inherited event uses parent's filters
+ */
+static inline struct perf_addr_filters_head *
+perf_event_addr_filters(struct perf_event *event)
+{
+ struct perf_addr_filters_head *ifh = &event->addr_filters;
+
+ if (event->parent)
+ ifh = &event->parent->addr_filters;
+
+ return ifh;
+}
+
+extern void perf_event_addr_filters_sync(struct perf_event *event);
+
extern int perf_output_begin(struct perf_output_handle *handle,
struct perf_event *event, unsigned int size);
+extern int perf_output_begin_forward(struct perf_output_handle *handle,
+ struct perf_event *event,
+ unsigned int size);
+extern int perf_output_begin_backward(struct perf_output_handle *handle,
+ struct perf_event *event,
+ unsigned int size);
+
extern void perf_output_end(struct perf_output_handle *handle);
extern unsigned int perf_output_copy(struct perf_output_handle *handle,
const void *buf, unsigned int len);
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 2abd7918f64f..2d24b283aa2d 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -805,6 +805,10 @@ void phy_start_machine(struct phy_device *phydev);
void phy_stop_machine(struct phy_device *phydev);
int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd);
+int phy_ethtool_ksettings_get(struct phy_device *phydev,
+ struct ethtool_link_ksettings *cmd);
+int phy_ethtool_ksettings_set(struct phy_device *phydev,
+ const struct ethtool_link_ksettings *cmd);
int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd);
int phy_start_interrupts(struct phy_device *phydev);
void phy_print_status(struct phy_device *phydev);
@@ -825,6 +829,10 @@ int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data);
int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol);
void phy_ethtool_get_wol(struct phy_device *phydev,
struct ethtool_wolinfo *wol);
+int phy_ethtool_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd);
+int phy_ethtool_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd);
int __init mdio_bus_init(void);
void mdio_bus_exit(void);
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index 8cf05e341cff..a810f2a18842 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -77,6 +77,7 @@ struct phy {
*/
struct phy_provider {
struct device *dev;
+ struct device_node *children;
struct module *owner;
struct list_head list;
struct phy * (*of_xlate)(struct device *dev,
@@ -93,10 +94,16 @@ struct phy_lookup {
#define to_phy(a) (container_of((a), struct phy, dev))
#define of_phy_provider_register(dev, xlate) \
- __of_phy_provider_register((dev), THIS_MODULE, (xlate))
+ __of_phy_provider_register((dev), NULL, THIS_MODULE, (xlate))
#define devm_of_phy_provider_register(dev, xlate) \
- __devm_of_phy_provider_register((dev), THIS_MODULE, (xlate))
+ __devm_of_phy_provider_register((dev), NULL, THIS_MODULE, (xlate))
+
+#define of_phy_provider_register_full(dev, children, xlate) \
+ __of_phy_provider_register(dev, children, THIS_MODULE, xlate)
+
+#define devm_of_phy_provider_register_full(dev, children, xlate) \
+ __devm_of_phy_provider_register(dev, children, THIS_MODULE, xlate)
static inline void phy_set_drvdata(struct phy *phy, void *data)
{
@@ -147,11 +154,13 @@ struct phy *devm_phy_create(struct device *dev, struct device_node *node,
void phy_destroy(struct phy *phy);
void devm_phy_destroy(struct device *dev, struct phy *phy);
struct phy_provider *__of_phy_provider_register(struct device *dev,
- struct module *owner, struct phy * (*of_xlate)(struct device *dev,
- struct of_phandle_args *args));
+ struct device_node *children, struct module *owner,
+ struct phy * (*of_xlate)(struct device *dev,
+ struct of_phandle_args *args));
struct phy_provider *__devm_of_phy_provider_register(struct device *dev,
- struct module *owner, struct phy * (*of_xlate)(struct device *dev,
- struct of_phandle_args *args));
+ struct device_node *children, struct module *owner,
+ struct phy * (*of_xlate)(struct device *dev,
+ struct of_phandle_args *args));
void of_phy_provider_unregister(struct phy_provider *phy_provider);
void devm_of_phy_provider_unregister(struct device *dev,
struct phy_provider *phy_provider);
@@ -312,15 +321,17 @@ static inline void devm_phy_destroy(struct device *dev, struct phy *phy)
}
static inline struct phy_provider *__of_phy_provider_register(
- struct device *dev, struct module *owner, struct phy * (*of_xlate)(
- struct device *dev, struct of_phandle_args *args))
+ struct device *dev, struct device_node *children, struct module *owner,
+ struct phy * (*of_xlate)(struct device *dev,
+ struct of_phandle_args *args))
{
return ERR_PTR(-ENOSYS);
}
static inline struct phy_provider *__devm_of_phy_provider_register(struct device
- *dev, struct module *owner, struct phy * (*of_xlate)(struct device *dev,
- struct of_phandle_args *args))
+ *dev, struct device_node *children, struct module *owner,
+ struct phy * (*of_xlate)(struct device *dev,
+ struct of_phandle_args *args))
{
return ERR_PTR(-ENOSYS);
}
diff --git a/include/linux/phy/tegra/xusb.h b/include/linux/phy/tegra/xusb.h
new file mode 100644
index 000000000000..8e1a57a78d9f
--- /dev/null
+++ b/include/linux/phy/tegra/xusb.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef PHY_TEGRA_XUSB_H
+#define PHY_TEGRA_XUSB_H
+
+struct tegra_xusb_padctl;
+struct device;
+
+struct tegra_xusb_padctl *tegra_xusb_padctl_get(struct device *dev);
+void tegra_xusb_padctl_put(struct tegra_xusb_padctl *padctl);
+
+int tegra_xusb_padctl_usb3_save_context(struct tegra_xusb_padctl *padctl,
+ unsigned int port);
+int tegra_xusb_padctl_hsic_set_idle(struct tegra_xusb_padctl *padctl,
+ unsigned int port, bool idle);
+int tegra_xusb_padctl_usb3_set_lfps_detect(struct tegra_xusb_padctl *padctl,
+ unsigned int port, bool enable);
+
+#endif /* PHY_TEGRA_XUSB_H */
diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h
index 9ba59fcba549..a42e57da270d 100644
--- a/include/linux/pinctrl/pinctrl.h
+++ b/include/linux/pinctrl/pinctrl.h
@@ -144,6 +144,12 @@ struct pinctrl_desc {
extern struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
struct device *dev, void *driver_data);
extern void pinctrl_unregister(struct pinctrl_dev *pctldev);
+extern struct pinctrl_dev *devm_pinctrl_register(struct device *dev,
+ struct pinctrl_desc *pctldesc,
+ void *driver_data);
+extern void devm_pinctrl_unregister(struct device *dev,
+ struct pinctrl_dev *pctldev);
+
extern bool pin_is_valid(struct pinctrl_dev *pctldev, int pin);
extern void pinctrl_add_gpio_range(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range);
diff --git a/include/linux/platform_data/at24.h b/include/linux/platform_data/at24.h
index dc9a13e5acda..be830b141d83 100644
--- a/include/linux/platform_data/at24.h
+++ b/include/linux/platform_data/at24.h
@@ -26,7 +26,7 @@
*
* An example in pseudo code for a setup() callback:
*
- * void get_mac_addr(struct mvmem_device *nvmem, void *context)
+ * void get_mac_addr(struct nvmem_device *nvmem, void *context)
* {
* u8 *mac_addr = ethernet_pdata->mac_addr;
* off_t offset = context;
diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h
index 03b6095d3b18..d15d8ba8cc24 100644
--- a/include/linux/platform_data/dma-dw.h
+++ b/include/linux/platform_data/dma-dw.h
@@ -21,15 +21,15 @@
* @dma_dev: required DMA master device
* @src_id: src request line
* @dst_id: dst request line
- * @src_master: src master for transfers on allocated channel.
- * @dst_master: dest master for transfers on allocated channel.
+ * @m_master: memory master for transfers on allocated channel
+ * @p_master: peripheral master for transfers on allocated channel
*/
struct dw_dma_slave {
struct device *dma_dev;
u8 src_id;
u8 dst_id;
- u8 src_master;
- u8 dst_master;
+ u8 m_master;
+ u8 p_master;
};
/**
@@ -43,7 +43,7 @@ struct dw_dma_slave {
* @block_size: Maximum block size supported by the controller
* @nr_masters: Number of AHB masters supported by the controller
* @data_width: Maximum data width supported by hardware per AHB master
- * (0 - 8bits, 1 - 16bits, ..., 5 - 256bits)
+ * (in bytes, power of 2)
*/
struct dw_dma_platform_data {
unsigned int nr_channels;
@@ -55,7 +55,7 @@ struct dw_dma_platform_data {
#define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */
#define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */
unsigned char chan_priority;
- unsigned short block_size;
+ unsigned int block_size;
unsigned char nr_masters;
unsigned char data_width[DW_DMA_MAX_NR_MASTERS];
};
diff --git a/include/linux/platform_data/gpio-dwapb.h b/include/linux/platform_data/gpio-dwapb.h
index 28702c849af1..2dc7f4a8ab09 100644
--- a/include/linux/platform_data/gpio-dwapb.h
+++ b/include/linux/platform_data/gpio-dwapb.h
@@ -15,8 +15,7 @@
#define GPIO_DW_APB_H
struct dwapb_port_property {
- struct device_node *node;
- const char *name;
+ struct fwnode_handle *fwnode;
unsigned int idx;
unsigned int ngpio;
unsigned int gpio_base;
diff --git a/include/linux/platform_data/gpmc-omap.h b/include/linux/platform_data/gpmc-omap.h
new file mode 100644
index 000000000000..67ccdb0e1606
--- /dev/null
+++ b/include/linux/platform_data/gpmc-omap.h
@@ -0,0 +1,172 @@
+/*
+ * OMAP GPMC Platform data
+ *
+ * Copyright (C) 2014 Texas Instruments, Inc. - http://www.ti.com
+ * Roger Quadros <rogerq@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#ifndef _GPMC_OMAP_H_
+#define _GPMC_OMAP_H_
+
+/* Maximum Number of Chip Selects */
+#define GPMC_CS_NUM 8
+
+/* bool type time settings */
+struct gpmc_bool_timings {
+ bool cycle2cyclediffcsen;
+ bool cycle2cyclesamecsen;
+ bool we_extra_delay;
+ bool oe_extra_delay;
+ bool adv_extra_delay;
+ bool cs_extra_delay;
+ bool time_para_granularity;
+};
+
+/*
+ * Note that all values in this struct are in nanoseconds except sync_clk
+ * (which is in picoseconds), while the register values are in gpmc_fck cycles.
+ */
+struct gpmc_timings {
+ /* Minimum clock period for synchronous mode (in picoseconds) */
+ u32 sync_clk;
+
+ /* Chip-select signal timings corresponding to GPMC_CS_CONFIG2 */
+ u32 cs_on; /* Assertion time */
+ u32 cs_rd_off; /* Read deassertion time */
+ u32 cs_wr_off; /* Write deassertion time */
+
+ /* ADV signal timings corresponding to GPMC_CONFIG3 */
+ u32 adv_on; /* Assertion time */
+ u32 adv_rd_off; /* Read deassertion time */
+ u32 adv_wr_off; /* Write deassertion time */
+ u32 adv_aad_mux_on; /* ADV assertion time for AAD */
+ u32 adv_aad_mux_rd_off; /* ADV read deassertion time for AAD */
+ u32 adv_aad_mux_wr_off; /* ADV write deassertion time for AAD */
+
+ /* WE signals timings corresponding to GPMC_CONFIG4 */
+ u32 we_on; /* WE assertion time */
+ u32 we_off; /* WE deassertion time */
+
+ /* OE signals timings corresponding to GPMC_CONFIG4 */
+ u32 oe_on; /* OE assertion time */
+ u32 oe_off; /* OE deassertion time */
+ u32 oe_aad_mux_on; /* OE assertion time for AAD */
+ u32 oe_aad_mux_off; /* OE deassertion time for AAD */
+
+ /* Access time and cycle time timings corresponding to GPMC_CONFIG5 */
+ u32 page_burst_access; /* Multiple access word delay */
+ u32 access; /* Start-cycle to first data valid delay */
+ u32 rd_cycle; /* Total read cycle time */
+ u32 wr_cycle; /* Total write cycle time */
+
+ u32 bus_turnaround;
+ u32 cycle2cycle_delay;
+
+ u32 wait_monitoring;
+ u32 clk_activation;
+
+ /* The following are only on OMAP3430 */
+ u32 wr_access; /* WRACCESSTIME */
+ u32 wr_data_mux_bus; /* WRDATAONADMUXBUS */
+
+ struct gpmc_bool_timings bool_timings;
+};
+
+/* Device timings in picoseconds */
+struct gpmc_device_timings {
+ u32 t_ceasu; /* address setup to CS valid */
+ u32 t_avdasu; /* address setup to ADV valid */
+ /* XXX: try to combine t_avdp_r & t_avdp_w. Issue is
+ * of tusb using these timings even for sync whilst
+ * ideally for adv_rd/(wr)_off it should have considered
+ * t_avdh instead. This indirectly necessitates r/w
+ * variations of t_avdp as it is possible to have one
+ * sync & other async
+ */
+ u32 t_avdp_r; /* ADV low time (what about t_cer ?) */
+ u32 t_avdp_w;
+ u32 t_aavdh; /* address hold time */
+ u32 t_oeasu; /* address setup to OE valid */
+ u32 t_aa; /* access time from ADV assertion */
+ u32 t_iaa; /* initial access time */
+ u32 t_oe; /* access time from OE assertion */
+ u32 t_ce; /* access time from CS asertion */
+ u32 t_rd_cycle; /* read cycle time */
+ u32 t_cez_r; /* read CS deassertion to high Z */
+ u32 t_cez_w; /* write CS deassertion to high Z */
+ u32 t_oez; /* OE deassertion to high Z */
+ u32 t_weasu; /* address setup to WE valid */
+ u32 t_wpl; /* write assertion time */
+ u32 t_wph; /* write deassertion time */
+ u32 t_wr_cycle; /* write cycle time */
+
+ u32 clk;
+ u32 t_bacc; /* burst access valid clock to output delay */
+ u32 t_ces; /* CS setup time to clk */
+ u32 t_avds; /* ADV setup time to clk */
+ u32 t_avdh; /* ADV hold time from clk */
+ u32 t_ach; /* address hold time from clk */
+ u32 t_rdyo; /* clk to ready valid */
+
+ u32 t_ce_rdyz; /* XXX: description ?, or use t_cez instead */
+ u32 t_ce_avd; /* CS on to ADV on delay */
+
+ /* XXX: check the possibility of combining
+ * cyc_aavhd_oe & cyc_aavdh_we
+ */
+ u8 cyc_aavdh_oe;/* read address hold time in cycles */
+ u8 cyc_aavdh_we;/* write address hold time in cycles */
+ u8 cyc_oe; /* access time from OE assertion in cycles */
+ u8 cyc_wpl; /* write deassertion time in cycles */
+ u32 cyc_iaa; /* initial access time in cycles */
+
+ /* extra delays */
+ bool ce_xdelay;
+ bool avd_xdelay;
+ bool oe_xdelay;
+ bool we_xdelay;
+};
+
+#define GPMC_BURST_4 4 /* 4 word burst */
+#define GPMC_BURST_8 8 /* 8 word burst */
+#define GPMC_BURST_16 16 /* 16 word burst */
+#define GPMC_DEVWIDTH_8BIT 1 /* 8-bit device width */
+#define GPMC_DEVWIDTH_16BIT 2 /* 16-bit device width */
+#define GPMC_MUX_AAD 1 /* Addr-Addr-Data multiplex */
+#define GPMC_MUX_AD 2 /* Addr-Data multiplex */
+
+struct gpmc_settings {
+ bool burst_wrap; /* enables wrap bursting */
+ bool burst_read; /* enables read page/burst mode */
+ bool burst_write; /* enables write page/burst mode */
+ bool device_nand; /* device is NAND */
+ bool sync_read; /* enables synchronous reads */
+ bool sync_write; /* enables synchronous writes */
+ bool wait_on_read; /* monitor wait on reads */
+ bool wait_on_write; /* monitor wait on writes */
+ u32 burst_len; /* page/burst length */
+ u32 device_width; /* device bus width (8 or 16 bit) */
+ u32 mux_add_data; /* multiplex address & data */
+ u32 wait_pin; /* wait-pin to be used */
+};
+
+/* Data for each chip select */
+struct gpmc_omap_cs_data {
+ bool valid; /* data is valid */
+ bool is_nand; /* device within this CS is NAND */
+ struct gpmc_settings *settings;
+ struct gpmc_device_timings *device_timings;
+ struct gpmc_timings *gpmc_timings;
+ struct platform_device *pdev; /* device within this CS region */
+ unsigned int pdata_size;
+};
+
+struct gpmc_omap_platform_data {
+ struct gpmc_omap_cs_data cs[GPMC_CS_NUM];
+};
+
+#endif /* _GPMC_OMAP_H */
diff --git a/include/linux/platform_data/invensense_mpu6050.h b/include/linux/platform_data/invensense_mpu6050.h
index ad3aa7b95f35..554b59801aa8 100644
--- a/include/linux/platform_data/invensense_mpu6050.h
+++ b/include/linux/platform_data/invensense_mpu6050.h
@@ -16,13 +16,16 @@
/**
* struct inv_mpu6050_platform_data - Platform data for the mpu driver
- * @orientation: Orientation matrix of the chip
+ * @orientation: Orientation matrix of the chip (deprecated in favor of
+ * mounting matrix retrieved from device-tree)
*
* Contains platform specific information on how to configure the MPU6050 to
* work on this platform. The orientation matricies are 3x3 rotation matricies
* that are applied to the data to rotate from the mounting orientation to the
* platform orientation. The values must be one of 0, 1, or -1 and each row and
* column should have exactly 1 non-zero value.
+ *
+ * Deprecated in favor of mounting matrix retrieved from device-tree.
*/
struct inv_mpu6050_platform_data {
__s8 orientation[9];
diff --git a/include/linux/platform_data/mailbox-omap.h b/include/linux/platform_data/mailbox-omap.h
deleted file mode 100644
index 4631dbb4255e..000000000000
--- a/include/linux/platform_data/mailbox-omap.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * mailbox-omap.h
- *
- * Copyright (C) 2013 Texas Instruments, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _PLAT_MAILBOX_H
-#define _PLAT_MAILBOX_H
-
-/* Interrupt register configuration types */
-#define MBOX_INTR_CFG_TYPE1 (0)
-#define MBOX_INTR_CFG_TYPE2 (1)
-
-/**
- * struct omap_mbox_dev_info - OMAP mailbox device attribute info
- * @name: name of the mailbox device
- * @tx_id: mailbox queue id used for transmitting messages
- * @rx_id: mailbox queue id on which messages are received
- * @irq_id: irq identifier number to use from the hwmod data
- * @usr_id: mailbox user id for identifying the interrupt into
- * the MPU interrupt controller.
- */
-struct omap_mbox_dev_info {
- const char *name;
- u32 tx_id;
- u32 rx_id;
- u32 irq_id;
- u32 usr_id;
-};
-
-/**
- * struct omap_mbox_pdata - OMAP mailbox platform data
- * @intr_type: type of interrupt configuration registers used
- while programming mailbox queue interrupts
- * @num_users: number of users (processor devices) that the mailbox
- * h/w block can interrupt
- * @num_fifos: number of h/w fifos within the mailbox h/w block
- * @info_cnt: number of mailbox devices for the platform
- * @info: array of mailbox device attributes
- */
-struct omap_mbox_pdata {
- u32 intr_type;
- u32 num_users;
- u32 num_fifos;
- u32 info_cnt;
- struct omap_mbox_dev_info *info;
-};
-
-#endif /* _PLAT_MAILBOX_H */
diff --git a/include/linux/platform_data/media/ir-rx51.h b/include/linux/platform_data/media/ir-rx51.h
index 104aa892f31b..3038120ca46e 100644
--- a/include/linux/platform_data/media/ir-rx51.h
+++ b/include/linux/platform_data/media/ir-rx51.h
@@ -5,6 +5,7 @@ struct lirc_rx51_platform_data {
int pwm_timer;
int(*set_max_mpu_wakeup_lat)(struct device *dev, long t);
+ struct pwm_omap_dmtimer_pdata *dmtimer;
};
#endif
diff --git a/include/linux/platform_data/mtd-nand-omap2.h b/include/linux/platform_data/mtd-nand-omap2.h
index 090bbab0130a..17d57a18bac5 100644
--- a/include/linux/platform_data/mtd-nand-omap2.h
+++ b/include/linux/platform_data/mtd-nand-omap2.h
@@ -45,7 +45,6 @@ enum omap_ecc {
};
struct gpmc_nand_regs {
- void __iomem *gpmc_status;
void __iomem *gpmc_nand_command;
void __iomem *gpmc_nand_address;
void __iomem *gpmc_nand_data;
@@ -64,21 +63,24 @@ struct gpmc_nand_regs {
void __iomem *gpmc_bch_result4[GPMC_BCH_NUM_REMAINDER];
void __iomem *gpmc_bch_result5[GPMC_BCH_NUM_REMAINDER];
void __iomem *gpmc_bch_result6[GPMC_BCH_NUM_REMAINDER];
+ /* Deprecated. Do not use */
+ void __iomem *gpmc_status;
};
struct omap_nand_platform_data {
int cs;
struct mtd_partition *parts;
int nr_parts;
- bool dev_ready;
bool flash_bbt;
enum nand_io xfer_type;
int devsize;
enum omap_ecc ecc_opt;
- struct gpmc_nand_regs reg;
- /* for passing the partitions */
- struct device_node *of_node;
struct device_node *elm_of_node;
+
+ /* deprecated */
+ struct gpmc_nand_regs reg;
+ struct device_node *of_node;
+ bool dev_ready;
};
#endif
diff --git a/include/linux/platform_data/pwm_omap_dmtimer.h b/include/linux/platform_data/pwm_omap_dmtimer.h
index 59384217208f..e7d521e48855 100644
--- a/include/linux/platform_data/pwm_omap_dmtimer.h
+++ b/include/linux/platform_data/pwm_omap_dmtimer.h
@@ -35,6 +35,16 @@
#ifndef __PWM_OMAP_DMTIMER_PDATA_H
#define __PWM_OMAP_DMTIMER_PDATA_H
+/* clock sources */
+#define PWM_OMAP_DMTIMER_SRC_SYS_CLK 0x00
+#define PWM_OMAP_DMTIMER_SRC_32_KHZ 0x01
+#define PWM_OMAP_DMTIMER_SRC_EXT_CLK 0x02
+
+/* timer interrupt enable bits */
+#define PWM_OMAP_DMTIMER_INT_CAPTURE (1 << 2)
+#define PWM_OMAP_DMTIMER_INT_OVERFLOW (1 << 1)
+#define PWM_OMAP_DMTIMER_INT_MATCH (1 << 0)
+
/* trigger types */
#define PWM_OMAP_DMTIMER_TRIGGER_NONE 0x00
#define PWM_OMAP_DMTIMER_TRIGGER_OVERFLOW 0x01
@@ -45,15 +55,23 @@ typedef struct omap_dm_timer pwm_omap_dmtimer;
struct pwm_omap_dmtimer_pdata {
pwm_omap_dmtimer *(*request_by_node)(struct device_node *np);
+ pwm_omap_dmtimer *(*request_specific)(int timer_id);
+ pwm_omap_dmtimer *(*request)(void);
+
int (*free)(pwm_omap_dmtimer *timer);
void (*enable)(pwm_omap_dmtimer *timer);
void (*disable)(pwm_omap_dmtimer *timer);
+ int (*get_irq)(pwm_omap_dmtimer *timer);
+ int (*set_int_enable)(pwm_omap_dmtimer *timer, unsigned int value);
+ int (*set_int_disable)(pwm_omap_dmtimer *timer, u32 mask);
+
struct clk *(*get_fclk)(pwm_omap_dmtimer *timer);
int (*start)(pwm_omap_dmtimer *timer);
int (*stop)(pwm_omap_dmtimer *timer);
+ int (*set_source)(pwm_omap_dmtimer *timer, int source);
int (*set_load)(pwm_omap_dmtimer *timer, int autoreload,
unsigned int value);
@@ -63,7 +81,10 @@ struct pwm_omap_dmtimer_pdata {
int toggle, int trigger);
int (*set_prescaler)(pwm_omap_dmtimer *timer, int prescaler);
+ unsigned int (*read_counter)(pwm_omap_dmtimer *timer);
int (*write_counter)(pwm_omap_dmtimer *timer, unsigned int value);
+ unsigned int (*read_status)(pwm_omap_dmtimer *timer);
+ int (*write_status)(pwm_omap_dmtimer *timer, unsigned int value);
};
#endif /* __PWM_OMAP_DMTIMER_PDATA_H */
diff --git a/include/linux/platform_data/st_sensors_pdata.h b/include/linux/platform_data/st_sensors_pdata.h
index 753839187ba0..79b0e4cdb814 100644
--- a/include/linux/platform_data/st_sensors_pdata.h
+++ b/include/linux/platform_data/st_sensors_pdata.h
@@ -16,9 +16,11 @@
* @drdy_int_pin: Redirect DRDY on pin 1 (1) or pin 2 (2).
* Available only for accelerometer and pressure sensors.
* Accelerometer DRDY on LSM330 available only on pin 1 (see datasheet).
+ * @open_drain: set the interrupt line to be open drain if possible.
*/
struct st_sensors_platform_data {
u8 drdy_int_pin;
+ bool open_drain;
};
#endif /* ST_SENSORS_PDATA_H */
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 03b755521fd9..98c2a7c7108e 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -18,7 +18,7 @@
#define PLATFORM_DEVID_AUTO (-2)
struct mfd_cell;
-struct property_set;
+struct property_entry;
struct platform_device {
const char *name;
@@ -73,7 +73,7 @@ struct platform_device_info {
size_t size_data;
u64 dma_mask;
- const struct property_set *pset;
+ struct property_entry *properties;
};
extern struct platform_device *platform_device_register_full(
const struct platform_device_info *pdevinfo);
@@ -172,7 +172,7 @@ extern int platform_device_add_resources(struct platform_device *pdev,
extern int platform_device_add_data(struct platform_device *pdev,
const void *data, size_t size);
extern int platform_device_add_properties(struct platform_device *pdev,
- const struct property_set *pset);
+ struct property_entry *properties);
extern int platform_device_add(struct platform_device *pdev);
extern void platform_device_del(struct platform_device *pdev);
extern void platform_device_put(struct platform_device *pdev);
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 6a5d654f4447..06eb353182ab 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -563,7 +563,6 @@ struct dev_pm_info {
bool is_suspended:1; /* Ditto */
bool is_noirq_suspended:1;
bool is_late_suspended:1;
- bool ignore_children:1;
bool early_init:1; /* Owned by the PM core */
bool direct_complete:1; /* Owned by the PM core */
spinlock_t lock;
@@ -591,6 +590,7 @@ struct dev_pm_info {
unsigned int deferred_resume:1;
unsigned int run_wake:1;
unsigned int runtime_auto:1;
+ bool ignore_children:1;
unsigned int no_callbacks:1;
unsigned int irq_safe:1;
unsigned int use_autosuspend:1;
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 49cd8890b873..39285c7bd3f5 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -28,14 +28,12 @@ enum gpd_status {
struct dev_power_governor {
bool (*power_down_ok)(struct dev_pm_domain *domain);
- bool (*stop_ok)(struct device *dev);
+ bool (*suspend_ok)(struct device *dev);
};
struct gpd_dev_ops {
int (*start)(struct device *dev);
int (*stop)(struct device *dev);
- int (*save_state)(struct device *dev);
- int (*restore_state)(struct device *dev);
bool (*active_wakeup)(struct device *dev);
};
@@ -94,7 +92,7 @@ struct gpd_timing_data {
s64 resume_latency_ns;
s64 effective_constraint_ns;
bool constraint_changed;
- bool cached_stop_ok;
+ bool cached_suspend_ok;
};
struct pm_domain_data {
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index cccaf4a29e9f..bca26157f5b6 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -65,6 +65,10 @@ void dev_pm_opp_put_prop_name(struct device *dev);
int dev_pm_opp_set_regulator(struct device *dev, const char *name);
void dev_pm_opp_put_regulator(struct device *dev);
int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq);
+int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask);
+int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
+void dev_pm_opp_remove_table(struct device *dev);
+void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask);
#else
static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
{
@@ -109,25 +113,25 @@ static inline struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
unsigned long freq, bool available)
{
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-ENOTSUPP);
}
static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
unsigned long *freq)
{
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-ENOTSUPP);
}
static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
unsigned long *freq)
{
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-ENOTSUPP);
}
static inline int dev_pm_opp_add(struct device *dev, unsigned long freq,
unsigned long u_volt)
{
- return -EINVAL;
+ return -ENOTSUPP;
}
static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq)
@@ -147,73 +151,85 @@ static inline int dev_pm_opp_disable(struct device *dev, unsigned long freq)
static inline struct srcu_notifier_head *dev_pm_opp_get_notifier(
struct device *dev)
{
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-ENOTSUPP);
}
static inline int dev_pm_opp_set_supported_hw(struct device *dev,
const u32 *versions,
unsigned int count)
{
- return -EINVAL;
+ return -ENOTSUPP;
}
static inline void dev_pm_opp_put_supported_hw(struct device *dev) {}
static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
{
- return -EINVAL;
+ return -ENOTSUPP;
}
static inline void dev_pm_opp_put_prop_name(struct device *dev) {}
static inline int dev_pm_opp_set_regulator(struct device *dev, const char *name)
{
- return -EINVAL;
+ return -ENOTSUPP;
}
static inline void dev_pm_opp_put_regulator(struct device *dev) {}
static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
{
+ return -ENOTSUPP;
+}
+
+static inline int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask)
+{
+ return -ENOTSUPP;
+}
+
+static inline int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
+{
return -EINVAL;
}
+static inline void dev_pm_opp_remove_table(struct device *dev)
+{
+}
+
+static inline void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask)
+{
+}
+
#endif /* CONFIG_PM_OPP */
#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
int dev_pm_opp_of_add_table(struct device *dev);
void dev_pm_opp_of_remove_table(struct device *dev);
-int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask);
-void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask);
-int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask);
-int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask);
+int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask);
+void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask);
+int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
#else
static inline int dev_pm_opp_of_add_table(struct device *dev)
{
- return -EINVAL;
+ return -ENOTSUPP;
}
static inline void dev_pm_opp_of_remove_table(struct device *dev)
{
}
-static inline int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask)
-{
- return -ENOSYS;
-}
-
-static inline void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask)
+static inline int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask)
{
+ return -ENOTSUPP;
}
-static inline int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
+static inline void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask)
{
- return -ENOSYS;
}
-static inline int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
+static inline int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
{
- return -ENOSYS;
+ return -ENOTSUPP;
}
#endif
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 7af093d6a4dd..2e14d2667b6c 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -56,6 +56,11 @@ extern void pm_runtime_update_max_time_suspended(struct device *dev,
s64 delta_ns);
extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable);
+static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
+{
+ dev->power.ignore_children = enable;
+}
+
static inline bool pm_children_suspended(struct device *dev)
{
return dev->power.ignore_children
@@ -156,6 +161,7 @@ static inline void __pm_runtime_disable(struct device *dev, bool c) {}
static inline void pm_runtime_allow(struct device *dev) {}
static inline void pm_runtime_forbid(struct device *dev) {}
+static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {}
static inline bool pm_children_suspended(struct device *dev) { return false; }
static inline void pm_runtime_get_noresume(struct device *dev) {}
static inline void pm_runtime_put_noidle(struct device *dev) {}
diff --git a/include/linux/pnp.h b/include/linux/pnp.h
index 5df733b8f704..2588ca6a9028 100644
--- a/include/linux/pnp.h
+++ b/include/linux/pnp.h
@@ -337,9 +337,11 @@ extern struct mutex pnp_res_mutex;
#ifdef CONFIG_PNPBIOS
extern struct pnp_protocol pnpbios_protocol;
+extern bool arch_pnpbios_disabled(void);
#define pnp_device_is_pnpbios(dev) ((dev)->protocol == (&pnpbios_protocol))
#else
#define pnp_device_is_pnpbios(dev) 0
+#define arch_pnpbios_disabled() false
#endif
#ifdef CONFIG_PNPACPI
diff --git a/include/linux/poll.h b/include/linux/poll.h
index 9fb4f40d9a26..37b057b63b46 100644
--- a/include/linux/poll.h
+++ b/include/linux/poll.h
@@ -96,7 +96,7 @@ extern void poll_initwait(struct poll_wqueues *pwq);
extern void poll_freewait(struct poll_wqueues *pwq);
extern int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
ktime_t *expires, unsigned long slack);
-extern u64 select_estimate_accuracy(struct timespec *tv);
+extern u64 select_estimate_accuracy(struct timespec64 *tv);
static inline int poll_schedule(struct poll_wqueues *pwq, int state)
@@ -153,12 +153,13 @@ void zero_fd_set(unsigned long nr, unsigned long *fdset)
#define MAX_INT64_SECONDS (((s64)(~((u64)0)>>1)/HZ)-1)
-extern int do_select(int n, fd_set_bits *fds, struct timespec *end_time);
+extern int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time);
extern int do_sys_poll(struct pollfd __user * ufds, unsigned int nfds,
- struct timespec *end_time);
+ struct timespec64 *end_time);
extern int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
- fd_set __user *exp, struct timespec *end_time);
+ fd_set __user *exp, struct timespec64 *end_time);
-extern int poll_select_set_timeout(struct timespec *to, long sec, long nsec);
+extern int poll_select_set_timeout(struct timespec64 *to, time64_t sec,
+ long nsec);
#endif /* _LINUX_POLL_H */
diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h
index 3e96a6a76103..5b5a80cc5926 100644
--- a/include/linux/posix_acl.h
+++ b/include/linux/posix_acl.h
@@ -99,7 +99,6 @@ extern int posix_acl_create(struct inode *, umode_t *, struct posix_acl **,
extern int simple_set_acl(struct inode *, struct posix_acl *, int);
extern int simple_acl_create(struct inode *, struct inode *);
-struct posix_acl **acl_by_type(struct inode *inode, int type);
struct posix_acl *get_cached_acl(struct inode *inode, int type);
struct posix_acl *get_cached_acl_rcu(struct inode *inode, int type);
void set_cached_acl(struct inode *inode, int type, struct posix_acl *acl);
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 9ccbdf2c1453..f4da695fd615 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -122,7 +122,19 @@ static inline __printf(1, 2) __cold
void early_printk(const char *s, ...) { }
#endif
-typedef __printf(1, 0) int (*printk_func_t)(const char *fmt, va_list args);
+#ifdef CONFIG_PRINTK_NMI
+extern void printk_nmi_init(void);
+extern void printk_nmi_enter(void);
+extern void printk_nmi_exit(void);
+extern void printk_nmi_flush(void);
+extern void printk_nmi_flush_on_panic(void);
+#else
+static inline void printk_nmi_init(void) { }
+static inline void printk_nmi_enter(void) { }
+static inline void printk_nmi_exit(void) { }
+static inline void printk_nmi_flush(void) { }
+static inline void printk_nmi_flush_on_panic(void) { }
+#endif /* PRINTK_NMI */
#ifdef CONFIG_PRINTK
asmlinkage __printf(5, 0)
diff --git a/include/linux/property.h b/include/linux/property.h
index b51fcd36d892..ecab11e40794 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -238,18 +238,9 @@ struct property_entry {
.name = _name_, \
}
-/**
- * struct property_set - Collection of "built-in" device properties.
- * @fwnode: Handle to be pointed to by the fwnode field of struct device.
- * @properties: Array of properties terminated with a null entry.
- */
-struct property_set {
- struct fwnode_handle fwnode;
- struct property_entry *properties;
-};
-
-int device_add_property_set(struct device *dev, const struct property_set *pset);
-void device_remove_property_set(struct device *dev);
+int device_add_properties(struct device *dev,
+ struct property_entry *properties);
+void device_remove_properties(struct device *dev);
bool device_dma_supported(struct device *dev);
diff --git a/include/linux/proportions.h b/include/linux/proportions.h
deleted file mode 100644
index 21221338ad18..000000000000
--- a/include/linux/proportions.h
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * FLoating proportions
- *
- * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
- *
- * This file contains the public data structure and API definitions.
- */
-
-#ifndef _LINUX_PROPORTIONS_H
-#define _LINUX_PROPORTIONS_H
-
-#include <linux/percpu_counter.h>
-#include <linux/spinlock.h>
-#include <linux/mutex.h>
-#include <linux/gfp.h>
-
-struct prop_global {
- /*
- * The period over which we differentiate
- *
- * period = 2^shift
- */
- int shift;
- /*
- * The total event counter aka 'time'.
- *
- * Treated as an unsigned long; the lower 'shift - 1' bits are the
- * counter bits, the remaining upper bits the period counter.
- */
- struct percpu_counter events;
-};
-
-/*
- * global proportion descriptor
- *
- * this is needed to consistently flip prop_global structures.
- */
-struct prop_descriptor {
- int index;
- struct prop_global pg[2];
- struct mutex mutex; /* serialize the prop_global switch */
-};
-
-int prop_descriptor_init(struct prop_descriptor *pd, int shift, gfp_t gfp);
-void prop_change_shift(struct prop_descriptor *pd, int new_shift);
-
-/*
- * ----- PERCPU ------
- */
-
-struct prop_local_percpu {
- /*
- * the local events counter
- */
- struct percpu_counter events;
-
- /*
- * snapshot of the last seen global state
- */
- int shift;
- unsigned long period;
- raw_spinlock_t lock; /* protect the snapshot state */
-};
-
-int prop_local_init_percpu(struct prop_local_percpu *pl, gfp_t gfp);
-void prop_local_destroy_percpu(struct prop_local_percpu *pl);
-void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl);
-void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl,
- long *numerator, long *denominator);
-
-static inline
-void prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- __prop_inc_percpu(pd, pl);
- local_irq_restore(flags);
-}
-
-/*
- * Limit the time part in order to ensure there are some bits left for the
- * cycle counter and fraction multiply.
- */
-#if BITS_PER_LONG == 32
-#define PROP_MAX_SHIFT (3*BITS_PER_LONG/4)
-#else
-#define PROP_MAX_SHIFT (BITS_PER_LONG/2)
-#endif
-
-#define PROP_FRAC_SHIFT (BITS_PER_LONG - PROP_MAX_SHIFT - 1)
-#define PROP_FRAC_BASE (1UL << PROP_FRAC_SHIFT)
-
-void __prop_inc_percpu_max(struct prop_descriptor *pd,
- struct prop_local_percpu *pl, long frac);
-
-
-/*
- * ----- SINGLE ------
- */
-
-struct prop_local_single {
- /*
- * the local events counter
- */
- unsigned long events;
-
- /*
- * snapshot of the last seen global state
- * and a lock protecting this state
- */
- unsigned long period;
- int shift;
- raw_spinlock_t lock; /* protect the snapshot state */
-};
-
-#define INIT_PROP_LOCAL_SINGLE(name) \
-{ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
-}
-
-int prop_local_init_single(struct prop_local_single *pl);
-void prop_local_destroy_single(struct prop_local_single *pl);
-void __prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl);
-void prop_fraction_single(struct prop_descriptor *pd, struct prop_local_single *pl,
- long *numerator, long *denominator);
-
-static inline
-void prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- __prop_inc_single(pd, pl);
- local_irq_restore(flags);
-}
-
-#endif /* _LINUX_PROPORTIONS_H */
diff --git a/include/linux/psci.h b/include/linux/psci.h
index 393efe2edf9a..bdea1cb5e1db 100644
--- a/include/linux/psci.h
+++ b/include/linux/psci.h
@@ -21,8 +21,6 @@
#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
bool psci_tos_resident_on(int cpu);
-bool psci_power_state_loses_context(u32 state);
-bool psci_power_state_is_valid(u32 state);
int psci_cpu_init_idle(unsigned int cpu);
int psci_cpu_suspend_enter(unsigned long index);
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index cfc3ed46cad2..17018f3c066e 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -5,59 +5,7 @@
#include <linux/mutex.h>
#include <linux/of.h>
-struct pwm_device;
struct seq_file;
-
-#if IS_ENABLED(CONFIG_PWM)
-/*
- * pwm_request - request a PWM device
- */
-struct pwm_device *pwm_request(int pwm_id, const char *label);
-
-/*
- * pwm_free - free a PWM device
- */
-void pwm_free(struct pwm_device *pwm);
-
-/*
- * pwm_config - change a PWM device configuration
- */
-int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns);
-
-/*
- * pwm_enable - start a PWM output toggling
- */
-int pwm_enable(struct pwm_device *pwm);
-
-/*
- * pwm_disable - stop a PWM output toggling
- */
-void pwm_disable(struct pwm_device *pwm);
-#else
-static inline struct pwm_device *pwm_request(int pwm_id, const char *label)
-{
- return ERR_PTR(-ENODEV);
-}
-
-static inline void pwm_free(struct pwm_device *pwm)
-{
-}
-
-static inline int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
-{
- return -EINVAL;
-}
-
-static inline int pwm_enable(struct pwm_device *pwm)
-{
- return -EINVAL;
-}
-
-static inline void pwm_disable(struct pwm_device *pwm)
-{
-}
-#endif
-
struct pwm_chip;
/**
@@ -74,10 +22,41 @@ enum pwm_polarity {
PWM_POLARITY_INVERSED,
};
+/**
+ * struct pwm_args - board-dependent PWM arguments
+ * @period: reference period
+ * @polarity: reference polarity
+ *
+ * This structure describes board-dependent arguments attached to a PWM
+ * device. These arguments are usually retrieved from the PWM lookup table or
+ * device tree.
+ *
+ * Do not confuse this with the PWM state: PWM arguments represent the initial
+ * configuration that users want to use on this PWM device rather than the
+ * current PWM hardware state.
+ */
+struct pwm_args {
+ unsigned int period;
+ enum pwm_polarity polarity;
+};
+
enum {
PWMF_REQUESTED = 1 << 0,
- PWMF_ENABLED = 1 << 1,
- PWMF_EXPORTED = 1 << 2,
+ PWMF_EXPORTED = 1 << 1,
+};
+
+/*
+ * struct pwm_state - state of a PWM channel
+ * @period: PWM period (in nanoseconds)
+ * @duty_cycle: PWM duty cycle (in nanoseconds)
+ * @polarity: PWM polarity
+ * @enabled: PWM enabled status
+ */
+struct pwm_state {
+ unsigned int period;
+ unsigned int duty_cycle;
+ enum pwm_polarity polarity;
+ bool enabled;
};
/**
@@ -88,10 +67,8 @@ enum {
* @pwm: global index of the PWM device
* @chip: PWM chip providing this PWM device
* @chip_data: chip-private data associated with the PWM device
- * @lock: used to serialize accesses to the PWM device where necessary
- * @period: period of the PWM signal (in nanoseconds)
- * @duty_cycle: duty cycle of the PWM signal (in nanoseconds)
- * @polarity: polarity of the PWM signal
+ * @args: PWM arguments
+ * @state: curent PWM channel state
*/
struct pwm_device {
const char *label;
@@ -100,48 +77,74 @@ struct pwm_device {
unsigned int pwm;
struct pwm_chip *chip;
void *chip_data;
- struct mutex lock;
- unsigned int period;
- unsigned int duty_cycle;
- enum pwm_polarity polarity;
+ struct pwm_args args;
+ struct pwm_state state;
};
+/**
+ * pwm_get_state() - retrieve the current PWM state
+ * @pwm: PWM device
+ * @state: state to fill with the current PWM state
+ */
+static inline void pwm_get_state(const struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ *state = pwm->state;
+}
+
static inline bool pwm_is_enabled(const struct pwm_device *pwm)
{
- return test_bit(PWMF_ENABLED, &pwm->flags);
+ struct pwm_state state;
+
+ pwm_get_state(pwm, &state);
+
+ return state.enabled;
}
static inline void pwm_set_period(struct pwm_device *pwm, unsigned int period)
{
if (pwm)
- pwm->period = period;
+ pwm->state.period = period;
}
static inline unsigned int pwm_get_period(const struct pwm_device *pwm)
{
- return pwm ? pwm->period : 0;
+ struct pwm_state state;
+
+ pwm_get_state(pwm, &state);
+
+ return state.period;
}
static inline void pwm_set_duty_cycle(struct pwm_device *pwm, unsigned int duty)
{
if (pwm)
- pwm->duty_cycle = duty;
+ pwm->state.duty_cycle = duty;
}
static inline unsigned int pwm_get_duty_cycle(const struct pwm_device *pwm)
{
- return pwm ? pwm->duty_cycle : 0;
-}
+ struct pwm_state state;
-/*
- * pwm_set_polarity - configure the polarity of a PWM signal
- */
-int pwm_set_polarity(struct pwm_device *pwm, enum pwm_polarity polarity);
+ pwm_get_state(pwm, &state);
+
+ return state.duty_cycle;
+}
static inline enum pwm_polarity pwm_get_polarity(const struct pwm_device *pwm)
{
- return pwm ? pwm->polarity : PWM_POLARITY_NORMAL;
+ struct pwm_state state;
+
+ pwm_get_state(pwm, &state);
+
+ return state.polarity;
+}
+
+static inline void pwm_get_args(const struct pwm_device *pwm,
+ struct pwm_args *args)
+{
+ *args = pwm->args;
}
/**
@@ -152,6 +155,13 @@ static inline enum pwm_polarity pwm_get_polarity(const struct pwm_device *pwm)
* @set_polarity: configure the polarity of this PWM
* @enable: enable PWM output toggling
* @disable: disable PWM output toggling
+ * @apply: atomically apply a new PWM config. The state argument
+ * should be adjusted with the real hardware config (if the
+ * approximate the period or duty_cycle value, state should
+ * reflect it)
+ * @get_state: get the current PWM state. This function is only
+ * called once per PWM device when the PWM chip is
+ * registered.
* @dbg_show: optional routine to show contents in debugfs
* @owner: helps prevent removal of modules exporting active PWMs
*/
@@ -164,6 +174,10 @@ struct pwm_ops {
enum pwm_polarity polarity);
int (*enable)(struct pwm_chip *chip, struct pwm_device *pwm);
void (*disable)(struct pwm_chip *chip, struct pwm_device *pwm);
+ int (*apply)(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state);
+ void (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state);
#ifdef CONFIG_DEBUG_FS
void (*dbg_show)(struct pwm_chip *chip, struct seq_file *s);
#endif
@@ -199,6 +213,115 @@ struct pwm_chip {
};
#if IS_ENABLED(CONFIG_PWM)
+/* PWM user APIs */
+struct pwm_device *pwm_request(int pwm_id, const char *label);
+void pwm_free(struct pwm_device *pwm);
+int pwm_apply_state(struct pwm_device *pwm, struct pwm_state *state);
+int pwm_adjust_config(struct pwm_device *pwm);
+
+/**
+ * pwm_config() - change a PWM device configuration
+ * @pwm: PWM device
+ * @duty_ns: "on" time (in nanoseconds)
+ * @period_ns: duration (in nanoseconds) of one cycle
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+static inline int pwm_config(struct pwm_device *pwm, int duty_ns,
+ int period_ns)
+{
+ struct pwm_state state;
+
+ if (!pwm)
+ return -EINVAL;
+
+ pwm_get_state(pwm, &state);
+ if (state.duty_cycle == duty_ns && state.period == period_ns)
+ return 0;
+
+ state.duty_cycle = duty_ns;
+ state.period = period_ns;
+ return pwm_apply_state(pwm, &state);
+}
+
+/**
+ * pwm_set_polarity() - configure the polarity of a PWM signal
+ * @pwm: PWM device
+ * @polarity: new polarity of the PWM signal
+ *
+ * Note that the polarity cannot be configured while the PWM device is
+ * enabled.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+static inline int pwm_set_polarity(struct pwm_device *pwm,
+ enum pwm_polarity polarity)
+{
+ struct pwm_state state;
+
+ if (!pwm)
+ return -EINVAL;
+
+ pwm_get_state(pwm, &state);
+ if (state.polarity == polarity)
+ return 0;
+
+ /*
+ * Changing the polarity of a running PWM without adjusting the
+ * dutycycle/period value is a bit risky (can introduce glitches).
+ * Return -EBUSY in this case.
+ * Note that this is allowed when using pwm_apply_state() because
+ * the user specifies all the parameters.
+ */
+ if (state.enabled)
+ return -EBUSY;
+
+ state.polarity = polarity;
+ return pwm_apply_state(pwm, &state);
+}
+
+/**
+ * pwm_enable() - start a PWM output toggling
+ * @pwm: PWM device
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+static inline int pwm_enable(struct pwm_device *pwm)
+{
+ struct pwm_state state;
+
+ if (!pwm)
+ return -EINVAL;
+
+ pwm_get_state(pwm, &state);
+ if (state.enabled)
+ return 0;
+
+ state.enabled = true;
+ return pwm_apply_state(pwm, &state);
+}
+
+/**
+ * pwm_disable() - stop a PWM output toggling
+ * @pwm: PWM device
+ */
+static inline void pwm_disable(struct pwm_device *pwm)
+{
+ struct pwm_state state;
+
+ if (!pwm)
+ return;
+
+ pwm_get_state(pwm, &state);
+ if (!state.enabled)
+ return;
+
+ state.enabled = false;
+ pwm_apply_state(pwm, &state);
+}
+
+
+/* PWM provider APIs */
int pwm_set_chip_data(struct pwm_device *pwm, void *data);
void *pwm_get_chip_data(struct pwm_device *pwm);
@@ -224,6 +347,47 @@ void devm_pwm_put(struct device *dev, struct pwm_device *pwm);
bool pwm_can_sleep(struct pwm_device *pwm);
#else
+static inline struct pwm_device *pwm_request(int pwm_id, const char *label)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void pwm_free(struct pwm_device *pwm)
+{
+}
+
+static inline int pwm_apply_state(struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ return -ENOTSUPP;
+}
+
+static inline int pwm_adjust_config(struct pwm_device *pwm)
+{
+ return -ENOTSUPP;
+}
+
+static inline int pwm_config(struct pwm_device *pwm, int duty_ns,
+ int period_ns)
+{
+ return -EINVAL;
+}
+
+static inline int pwm_set_polarity(struct pwm_device *pwm,
+ enum pwm_polarity polarity)
+{
+ return -ENOTSUPP;
+}
+
+static inline int pwm_enable(struct pwm_device *pwm)
+{
+ return -EINVAL;
+}
+
+static inline void pwm_disable(struct pwm_device *pwm)
+{
+}
+
static inline int pwm_set_chip_data(struct pwm_device *pwm, void *data)
{
return -EINVAL;
@@ -295,6 +459,34 @@ static inline bool pwm_can_sleep(struct pwm_device *pwm)
}
#endif
+static inline void pwm_apply_args(struct pwm_device *pwm)
+{
+ /*
+ * PWM users calling pwm_apply_args() expect to have a fresh config
+ * where the polarity and period are set according to pwm_args info.
+ * The problem is, polarity can only be changed when the PWM is
+ * disabled.
+ *
+ * PWM drivers supporting hardware readout may declare the PWM device
+ * as enabled, and prevent polarity setting, which changes from the
+ * existing behavior, where all PWM devices are declared as disabled
+ * at startup (even if they are actually enabled), thus authorizing
+ * polarity setting.
+ *
+ * Instead of setting ->enabled to false, we call pwm_disable()
+ * before pwm_set_polarity() to ensure that everything is configured
+ * as expected, and the PWM is really disabled when the user request
+ * it.
+ *
+ * Note that PWM users requiring a smooth handover between the
+ * bootloader and the kernel (like critical regulators controlled by
+ * PWM devices) will have to switch to the atomic API and avoid calling
+ * pwm_apply_args().
+ */
+ pwm_disable(pwm);
+ pwm_set_polarity(pwm, pwm->args.polarity);
+}
+
struct pwm_lookup {
struct list_head list;
const char *provider;
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 53ecb37ae563..3f14c7efe68f 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -285,6 +285,63 @@
#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12
#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024
+#define PXP_VF_BAR0_START_IGU 0
+#define PXP_VF_BAR0_IGU_LENGTH 0x3000
+#define PXP_VF_BAR0_END_IGU (PXP_VF_BAR0_START_IGU + \
+ PXP_VF_BAR0_IGU_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_DQ 0x3000
+#define PXP_VF_BAR0_DQ_LENGTH 0x200
+#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET 0
+#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS (PXP_VF_BAR0_START_DQ + \
+ PXP_VF_BAR0_DQ_OPAQUE_OFFSET)
+#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS (PXP_VF_BAR0_ME_OPAQUE_ADDRESS \
+ + 4)
+#define PXP_VF_BAR0_END_DQ (PXP_VF_BAR0_START_DQ + \
+ PXP_VF_BAR0_DQ_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_TSDM_ZONE_B 0x3200
+#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B 0x200
+#define PXP_VF_BAR0_END_TSDM_ZONE_B (PXP_VF_BAR0_START_TSDM_ZONE_B \
+ + \
+ PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
+ - 1)
+
+#define PXP_VF_BAR0_START_MSDM_ZONE_B 0x3400
+#define PXP_VF_BAR0_END_MSDM_ZONE_B (PXP_VF_BAR0_START_MSDM_ZONE_B \
+ + \
+ PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
+ - 1)
+
+#define PXP_VF_BAR0_START_USDM_ZONE_B 0x3600
+#define PXP_VF_BAR0_END_USDM_ZONE_B (PXP_VF_BAR0_START_USDM_ZONE_B \
+ + \
+ PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
+ - 1)
+
+#define PXP_VF_BAR0_START_XSDM_ZONE_B 0x3800
+#define PXP_VF_BAR0_END_XSDM_ZONE_B (PXP_VF_BAR0_START_XSDM_ZONE_B \
+ + \
+ PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
+ - 1)
+
+#define PXP_VF_BAR0_START_YSDM_ZONE_B 0x3a00
+#define PXP_VF_BAR0_END_YSDM_ZONE_B (PXP_VF_BAR0_START_YSDM_ZONE_B \
+ + \
+ PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
+ - 1)
+
+#define PXP_VF_BAR0_START_PSDM_ZONE_B 0x3c00
+#define PXP_VF_BAR0_END_PSDM_ZONE_B (PXP_VF_BAR0_START_PSDM_ZONE_B \
+ + \
+ PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
+ - 1)
+
+#define PXP_VF_BAR0_START_SDM_ZONE_A 0x4000
+#define PXP_VF_BAR0_END_SDM_ZONE_A 0x10000
+
+#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32
+
/* ILT Records */
#define PXP_NUM_ILT_RECORDS_BB 7600
#define PXP_NUM_ILT_RECORDS_K2 11000
@@ -327,9 +384,14 @@ struct regpair {
__le32 hi;
};
+struct vf_pf_channel_eqe_data {
+ struct regpair msg_addr;
+};
+
/* Event Data Union */
union event_ring_data {
u8 bytes[8];
+ struct vf_pf_channel_eqe_data vf_pf_channel;
struct async_data async_info;
};
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index e1d69834a11f..6ae8cb4a61d3 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -13,6 +13,7 @@
#include <linux/if_link.h>
#include <linux/qed/eth_common.h>
#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_iov_if.h>
struct qed_dev_eth_info {
struct qed_dev_info common;
@@ -27,12 +28,15 @@ struct qed_dev_eth_info {
struct qed_update_vport_rss_params {
u16 rss_ind_table[128];
u32 rss_key[10];
+ u8 rss_caps;
};
struct qed_update_vport_params {
u8 vport_id;
u8 update_vport_active_flg;
u8 vport_active_flg;
+ u8 update_tx_switching_flg;
+ u8 tx_switching_flg;
u8 update_accept_any_vlan_flg;
u8 accept_any_vlan;
u8 update_rss_flg;
@@ -111,12 +115,23 @@ struct qed_queue_start_common_params {
u16 sb_idx;
};
+struct qed_tunn_params {
+ u16 vxlan_port;
+ u8 update_vxlan_port;
+ u16 geneve_port;
+ u8 update_geneve_port;
+};
+
struct qed_eth_cb_ops {
struct qed_common_cb_ops common;
+ void (*force_mac) (void *dev, u8 *mac);
};
struct qed_eth_ops {
const struct qed_common_ops *common;
+#ifdef CONFIG_QED_SRIOV
+ const struct qed_iov_hv_ops *iov;
+#endif
int (*fill_dev_info)(struct qed_dev *cdev,
struct qed_dev_eth_info *info);
@@ -125,6 +140,8 @@ struct qed_eth_ops {
struct qed_eth_cb_ops *ops,
void *cookie);
+ bool(*check_mac) (struct qed_dev *cdev, u8 *mac);
+
int (*vport_start)(struct qed_dev *cdev,
struct qed_start_vport_params *params);
@@ -165,9 +182,12 @@ struct qed_eth_ops {
void (*get_vport_stats)(struct qed_dev *cdev,
struct qed_eth_stats *stats);
+
+ int (*tunn_config)(struct qed_dev *cdev,
+ struct qed_tunn_params *params);
};
-const struct qed_eth_ops *qed_get_eth_ops(u32 version);
+const struct qed_eth_ops *qed_get_eth_ops(void);
void qed_put_eth_ops(void);
#endif
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index 1f7599c77cd4..4c29439f54bf 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -25,6 +25,15 @@
#include <linux/qed/common_hsi.h>
#include <linux/qed/qed_chain.h>
+enum dcbx_protocol_type {
+ DCBX_PROTOCOL_ISCSI,
+ DCBX_PROTOCOL_FCOE,
+ DCBX_PROTOCOL_ROCE,
+ DCBX_PROTOCOL_ROCE_V2,
+ DCBX_PROTOCOL_ETH,
+ DCBX_MAX_PROTOCOL_TYPE
+};
+
enum qed_led_mode {
QED_LED_MODE_OFF,
QED_LED_MODE_ON,
@@ -93,6 +102,7 @@ struct qed_dev_info {
u32 flash_size;
u8 mf_mode;
+ bool tx_switching;
};
enum qed_sb_type {
@@ -110,6 +120,7 @@ struct qed_link_params {
#define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS BIT(1)
#define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED BIT(2)
#define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3)
+#define QED_LINK_OVERRIDE_LOOPBACK_MODE BIT(4)
u32 override_flags;
bool autoneg;
u32 adv_speeds;
@@ -118,6 +129,12 @@ struct qed_link_params {
#define QED_LINK_PAUSE_RX_ENABLE BIT(1)
#define QED_LINK_PAUSE_TX_ENABLE BIT(2)
u32 pause_config;
+#define QED_LINK_LOOPBACK_NONE BIT(0)
+#define QED_LINK_LOOPBACK_INT_PHY BIT(1)
+#define QED_LINK_LOOPBACK_EXT_PHY BIT(2)
+#define QED_LINK_LOOPBACK_EXT BIT(3)
+#define QED_LINK_LOOPBACK_MAC BIT(4)
+ u32 loopback_mode;
};
struct qed_link_output {
@@ -133,6 +150,13 @@ struct qed_link_output {
u32 pause_config;
};
+struct qed_probe_params {
+ enum qed_protocol protocol;
+ u32 dp_module;
+ u8 dp_level;
+ bool is_vf;
+};
+
#define QED_DRV_VER_STR_SIZE 12
struct qed_slowpath_params {
u32 int_mode;
@@ -158,10 +182,49 @@ struct qed_common_cb_ops {
struct qed_link_output *link);
};
+struct qed_selftest_ops {
+/**
+ * @brief selftest_interrupt - Perform interrupt test
+ *
+ * @param cdev
+ *
+ * @return 0 on success, error otherwise.
+ */
+ int (*selftest_interrupt)(struct qed_dev *cdev);
+
+/**
+ * @brief selftest_memory - Perform memory test
+ *
+ * @param cdev
+ *
+ * @return 0 on success, error otherwise.
+ */
+ int (*selftest_memory)(struct qed_dev *cdev);
+
+/**
+ * @brief selftest_register - Perform register test
+ *
+ * @param cdev
+ *
+ * @return 0 on success, error otherwise.
+ */
+ int (*selftest_register)(struct qed_dev *cdev);
+
+/**
+ * @brief selftest_clock - Perform clock test
+ *
+ * @param cdev
+ *
+ * @return 0 on success, error otherwise.
+ */
+ int (*selftest_clock)(struct qed_dev *cdev);
+};
+
struct qed_common_ops {
+ struct qed_selftest_ops *selftest;
+
struct qed_dev* (*probe)(struct pci_dev *dev,
- enum qed_protocol protocol,
- u32 dp_module, u8 dp_level);
+ struct qed_probe_params *params);
void (*remove)(struct qed_dev *cdev);
@@ -211,6 +274,16 @@ struct qed_common_ops {
void (*simd_handler_clean)(struct qed_dev *cdev,
int index);
+
+/**
+ * @brief can_link_change - can the instance change the link or not
+ *
+ * @param cdev
+ *
+ * @return true if link-change is allowed, false otherwise.
+ */
+ bool (*can_link_change)(struct qed_dev *cdev);
+
/**
* @brief set_link - set links according to params
*
@@ -271,15 +344,6 @@ struct qed_common_ops {
enum qed_led_mode mode);
};
-/**
- * @brief qed_get_protocol_version
- *
- * @param protocol
- *
- * @return version supported by qed for given protocol driver
- */
-u32 qed_get_protocol_version(enum qed_protocol protocol);
-
#define MASK_FIELD(_name, _value) \
((_value) &= (_name ## _MASK))
@@ -393,16 +457,16 @@ struct qed_eth_stats {
/* port */
u64 rx_64_byte_packets;
- u64 rx_127_byte_packets;
- u64 rx_255_byte_packets;
- u64 rx_511_byte_packets;
- u64 rx_1023_byte_packets;
- u64 rx_1518_byte_packets;
- u64 rx_1522_byte_packets;
- u64 rx_2047_byte_packets;
- u64 rx_4095_byte_packets;
- u64 rx_9216_byte_packets;
- u64 rx_16383_byte_packets;
+ u64 rx_65_to_127_byte_packets;
+ u64 rx_128_to_255_byte_packets;
+ u64 rx_256_to_511_byte_packets;
+ u64 rx_512_to_1023_byte_packets;
+ u64 rx_1024_to_1518_byte_packets;
+ u64 rx_1519_to_1522_byte_packets;
+ u64 rx_1519_to_2047_byte_packets;
+ u64 rx_2048_to_4095_byte_packets;
+ u64 rx_4096_to_9216_byte_packets;
+ u64 rx_9217_to_16383_byte_packets;
u64 rx_crc_errors;
u64 rx_mac_crtl_frames;
u64 rx_pause_frames;
@@ -524,4 +588,15 @@ static inline void internal_ram_wr(void __iomem *addr,
__internal_ram_wr(NULL, addr, size, data);
}
+enum qed_rss_caps {
+ QED_RSS_IPV4 = 0x1,
+ QED_RSS_IPV6 = 0x2,
+ QED_RSS_IPV4_TCP = 0x4,
+ QED_RSS_IPV6_TCP = 0x8,
+ QED_RSS_IPV4_UDP = 0x10,
+ QED_RSS_IPV6_UDP = 0x20,
+};
+
+#define QED_RSS_IND_TABLE_SIZE 128
+#define QED_RSS_KEY_SIZE 10 /* size in 32b chunks */
#endif
diff --git a/include/linux/qed/qed_iov_if.h b/include/linux/qed/qed_iov_if.h
new file mode 100644
index 000000000000..5a4f8d0899e9
--- /dev/null
+++ b/include/linux/qed/qed_iov_if.h
@@ -0,0 +1,34 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_IOV_IF_H
+#define _QED_IOV_IF_H
+
+#include <linux/qed/qed_if.h>
+
+/* Structs used by PF to control and manipulate child VFs */
+struct qed_iov_hv_ops {
+ int (*configure)(struct qed_dev *cdev, int num_vfs_param);
+
+ int (*set_mac) (struct qed_dev *cdev, u8 *mac, int vfid);
+
+ int (*set_vlan) (struct qed_dev *cdev, u16 vid, int vfid);
+
+ int (*get_config) (struct qed_dev *cdev, int vf_id,
+ struct ifla_vf_info *ivi);
+
+ int (*set_link_state) (struct qed_dev *cdev, int vf_id,
+ int link_state);
+
+ int (*set_spoof) (struct qed_dev *cdev, int vfid, bool val);
+
+ int (*set_rate) (struct qed_dev *cdev, int vfid,
+ u32 min_rate, u32 max_rate);
+};
+
+#endif
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 51a97ac8bfbf..cb4b7e8cee81 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -29,51 +29,45 @@
#include <linux/rcupdate.h>
/*
- * An indirect pointer (root->rnode pointing to a radix_tree_node, rather
- * than a data item) is signalled by the low bit set in the root->rnode
- * pointer.
- *
- * In this case root->height is > 0, but the indirect pointer tests are
- * needed for RCU lookups (because root->height is unreliable). The only
- * time callers need worry about this is when doing a lookup_slot under
- * RCU.
- *
- * Indirect pointer in fact is also used to tag the last pointer of a node
- * when it is shrunk, before we rcu free the node. See shrink code for
- * details.
+ * The bottom two bits of the slot determine how the remaining bits in the
+ * slot are interpreted:
+ *
+ * 00 - data pointer
+ * 01 - internal entry
+ * 10 - exceptional entry
+ * 11 - locked exceptional entry
+ *
+ * The internal entry may be a pointer to the next level in the tree, a
+ * sibling entry, or an indicator that the entry in this slot has been moved
+ * to another location in the tree and the lookup should be restarted. While
+ * NULL fits the 'data pointer' pattern, it means that there is no entry in
+ * the tree for this index (no matter what level of the tree it is found at).
+ * This means that you cannot store NULL in the tree as a value for the index.
*/
-#define RADIX_TREE_INDIRECT_PTR 1
+#define RADIX_TREE_ENTRY_MASK 3UL
+#define RADIX_TREE_INTERNAL_NODE 1UL
+
/*
- * A common use of the radix tree is to store pointers to struct pages;
- * but shmem/tmpfs needs also to store swap entries in the same tree:
- * those are marked as exceptional entries to distinguish them.
+ * Most users of the radix tree store pointers but shmem/tmpfs stores swap
+ * entries in the same tree. They are marked as exceptional entries to
+ * distinguish them from pointers to struct page.
* EXCEPTIONAL_ENTRY tests the bit, EXCEPTIONAL_SHIFT shifts content past it.
*/
#define RADIX_TREE_EXCEPTIONAL_ENTRY 2
#define RADIX_TREE_EXCEPTIONAL_SHIFT 2
-#define RADIX_DAX_MASK 0xf
-#define RADIX_DAX_SHIFT 4
-#define RADIX_DAX_PTE (0x4 | RADIX_TREE_EXCEPTIONAL_ENTRY)
-#define RADIX_DAX_PMD (0x8 | RADIX_TREE_EXCEPTIONAL_ENTRY)
-#define RADIX_DAX_TYPE(entry) ((unsigned long)entry & RADIX_DAX_MASK)
-#define RADIX_DAX_SECTOR(entry) (((unsigned long)entry >> RADIX_DAX_SHIFT))
-#define RADIX_DAX_ENTRY(sector, pmd) ((void *)((unsigned long)sector << \
- RADIX_DAX_SHIFT | (pmd ? RADIX_DAX_PMD : RADIX_DAX_PTE)))
-
-static inline int radix_tree_is_indirect_ptr(void *ptr)
+static inline bool radix_tree_is_internal_node(void *ptr)
{
- return (int)((unsigned long)ptr & RADIX_TREE_INDIRECT_PTR);
+ return ((unsigned long)ptr & RADIX_TREE_ENTRY_MASK) ==
+ RADIX_TREE_INTERNAL_NODE;
}
/*** radix-tree API starts here ***/
#define RADIX_TREE_MAX_TAGS 3
-#ifdef __KERNEL__
+#ifndef RADIX_TREE_MAP_SHIFT
#define RADIX_TREE_MAP_SHIFT (CONFIG_BASE_SMALL ? 4 : 6)
-#else
-#define RADIX_TREE_MAP_SHIFT 3 /* For more stressful testing */
#endif
#define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT)
@@ -86,16 +80,13 @@ static inline int radix_tree_is_indirect_ptr(void *ptr)
#define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \
RADIX_TREE_MAP_SHIFT))
-/* Height component in node->path */
-#define RADIX_TREE_HEIGHT_SHIFT (RADIX_TREE_MAX_PATH + 1)
-#define RADIX_TREE_HEIGHT_MASK ((1UL << RADIX_TREE_HEIGHT_SHIFT) - 1)
-
/* Internally used bits of node->count */
#define RADIX_TREE_COUNT_SHIFT (RADIX_TREE_MAP_SHIFT + 1)
#define RADIX_TREE_COUNT_MASK ((1UL << RADIX_TREE_COUNT_SHIFT) - 1)
struct radix_tree_node {
- unsigned int path; /* Offset in parent & height from the bottom */
+ unsigned char shift; /* Bits remaining in each slot */
+ unsigned char offset; /* Slot offset in parent */
unsigned int count;
union {
struct {
@@ -115,13 +106,11 @@ struct radix_tree_node {
/* root tags are stored in gfp_mask, shifted by __GFP_BITS_SHIFT */
struct radix_tree_root {
- unsigned int height;
gfp_t gfp_mask;
struct radix_tree_node __rcu *rnode;
};
#define RADIX_TREE_INIT(mask) { \
- .height = 0, \
.gfp_mask = (mask), \
.rnode = NULL, \
}
@@ -131,11 +120,15 @@ struct radix_tree_root {
#define INIT_RADIX_TREE(root, mask) \
do { \
- (root)->height = 0; \
(root)->gfp_mask = (mask); \
(root)->rnode = NULL; \
} while (0)
+static inline bool radix_tree_empty(struct radix_tree_root *root)
+{
+ return root->rnode == NULL;
+}
+
/**
* Radix-tree synchronization
*
@@ -231,7 +224,7 @@ static inline void *radix_tree_deref_slot_protected(void **pslot,
*/
static inline int radix_tree_deref_retry(void *arg)
{
- return unlikely((unsigned long)arg & RADIX_TREE_INDIRECT_PTR);
+ return unlikely(radix_tree_is_internal_node(arg));
}
/**
@@ -252,8 +245,7 @@ static inline int radix_tree_exceptional_entry(void *arg)
*/
static inline int radix_tree_exception(void *arg)
{
- return unlikely((unsigned long)arg &
- (RADIX_TREE_INDIRECT_PTR | RADIX_TREE_EXCEPTIONAL_ENTRY));
+ return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK);
}
/**
@@ -266,7 +258,7 @@ static inline int radix_tree_exception(void *arg)
*/
static inline void radix_tree_replace_slot(void **pslot, void *item)
{
- BUG_ON(radix_tree_is_indirect_ptr(item));
+ BUG_ON(radix_tree_is_internal_node(item));
rcu_assign_pointer(*pslot, item);
}
@@ -288,9 +280,12 @@ bool __radix_tree_delete_node(struct radix_tree_root *root,
struct radix_tree_node *node);
void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *);
void *radix_tree_delete(struct radix_tree_root *, unsigned long);
-unsigned int
-radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
- unsigned long first_index, unsigned int max_items);
+struct radix_tree_node *radix_tree_replace_clear_tags(
+ struct radix_tree_root *root,
+ unsigned long index, void *entry);
+unsigned int radix_tree_gang_lookup(struct radix_tree_root *root,
+ void **results, unsigned long first_index,
+ unsigned int max_items);
unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root,
void ***results, unsigned long *indices,
unsigned long first_index, unsigned int max_items);
@@ -327,8 +322,9 @@ static inline void radix_tree_preload_end(void)
* struct radix_tree_iter - radix tree iterator state
*
* @index: index of current slot
- * @next_index: next-to-last index for this chunk
+ * @next_index: one beyond the last index for this chunk
* @tags: bit-mask for tag-iterating
+ * @shift: shift for the node that holds our slots
*
* This radix tree iterator works in terms of "chunks" of slots. A chunk is a
* subinterval of slots contained within one radix tree leaf node. It is
@@ -341,8 +337,20 @@ struct radix_tree_iter {
unsigned long index;
unsigned long next_index;
unsigned long tags;
+#ifdef CONFIG_RADIX_TREE_MULTIORDER
+ unsigned int shift;
+#endif
};
+static inline unsigned int iter_shift(struct radix_tree_iter *iter)
+{
+#ifdef CONFIG_RADIX_TREE_MULTIORDER
+ return iter->shift;
+#else
+ return 0;
+#endif
+}
+
#define RADIX_TREE_ITER_TAG_MASK 0x00FF /* tag index in lower byte */
#define RADIX_TREE_ITER_TAGGED 0x0100 /* lookup tagged slots */
#define RADIX_TREE_ITER_CONTIG 0x0200 /* stop at first hole */
@@ -402,6 +410,12 @@ void **radix_tree_iter_retry(struct radix_tree_iter *iter)
return NULL;
}
+static inline unsigned long
+__radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots)
+{
+ return iter->index + (slots << iter_shift(iter));
+}
+
/**
* radix_tree_iter_next - resume iterating when the chunk may be invalid
* @iter: iterator state
@@ -413,7 +427,7 @@ void **radix_tree_iter_retry(struct radix_tree_iter *iter)
static inline __must_check
void **radix_tree_iter_next(struct radix_tree_iter *iter)
{
- iter->next_index = iter->index + 1;
+ iter->next_index = __radix_tree_iter_add(iter, 1);
iter->tags = 0;
return NULL;
}
@@ -427,7 +441,12 @@ void **radix_tree_iter_next(struct radix_tree_iter *iter)
static __always_inline long
radix_tree_chunk_size(struct radix_tree_iter *iter)
{
- return iter->next_index - iter->index;
+ return (iter->next_index - iter->index) >> iter_shift(iter);
+}
+
+static inline struct radix_tree_node *entry_to_node(void *ptr)
+{
+ return (void *)((unsigned long)ptr & ~RADIX_TREE_INTERNAL_NODE);
}
/**
@@ -445,24 +464,49 @@ static __always_inline void **
radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags)
{
if (flags & RADIX_TREE_ITER_TAGGED) {
+ void *canon = slot;
+
iter->tags >>= 1;
+ if (unlikely(!iter->tags))
+ return NULL;
+ while (IS_ENABLED(CONFIG_RADIX_TREE_MULTIORDER) &&
+ radix_tree_is_internal_node(slot[1])) {
+ if (entry_to_node(slot[1]) == canon) {
+ iter->tags >>= 1;
+ iter->index = __radix_tree_iter_add(iter, 1);
+ slot++;
+ continue;
+ }
+ iter->next_index = __radix_tree_iter_add(iter, 1);
+ return NULL;
+ }
if (likely(iter->tags & 1ul)) {
- iter->index++;
+ iter->index = __radix_tree_iter_add(iter, 1);
return slot + 1;
}
- if (!(flags & RADIX_TREE_ITER_CONTIG) && likely(iter->tags)) {
+ if (!(flags & RADIX_TREE_ITER_CONTIG)) {
unsigned offset = __ffs(iter->tags);
iter->tags >>= offset;
- iter->index += offset + 1;
+ iter->index = __radix_tree_iter_add(iter, offset + 1);
return slot + offset + 1;
}
} else {
- long size = radix_tree_chunk_size(iter);
+ long count = radix_tree_chunk_size(iter);
+ void *canon = slot;
- while (--size > 0) {
+ while (--count > 0) {
slot++;
- iter->index++;
+ iter->index = __radix_tree_iter_add(iter, 1);
+
+ if (IS_ENABLED(CONFIG_RADIX_TREE_MULTIORDER) &&
+ radix_tree_is_internal_node(*slot)) {
+ if (entry_to_node(*slot) == canon)
+ continue;
+ iter->next_index = iter->index;
+ break;
+ }
+
if (likely(*slot))
return slot;
if (flags & RADIX_TREE_ITER_CONTIG) {
@@ -476,34 +520,6 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags)
}
/**
- * radix_tree_for_each_chunk - iterate over chunks
- *
- * @slot: the void** variable for pointer to chunk first slot
- * @root: the struct radix_tree_root pointer
- * @iter: the struct radix_tree_iter pointer
- * @start: iteration starting index
- * @flags: RADIX_TREE_ITER_* and tag index
- *
- * Locks can be released and reacquired between iterations.
- */
-#define radix_tree_for_each_chunk(slot, root, iter, start, flags) \
- for (slot = radix_tree_iter_init(iter, start) ; \
- (slot = radix_tree_next_chunk(root, iter, flags)) ;)
-
-/**
- * radix_tree_for_each_chunk_slot - iterate over slots in one chunk
- *
- * @slot: the void** variable, at the beginning points to chunk first slot
- * @iter: the struct radix_tree_iter pointer
- * @flags: RADIX_TREE_ITER_*, should be constant
- *
- * This macro is designed to be nested inside radix_tree_for_each_chunk().
- * @slot points to the radix tree slot, @iter->index contains its index.
- */
-#define radix_tree_for_each_chunk_slot(slot, iter, flags) \
- for (; slot ; slot = radix_tree_next_slot(slot, iter, flags))
-
-/**
* radix_tree_for_each_slot - iterate over non-empty slots
*
* @slot: the void** variable for pointer to slot
diff --git a/include/linux/random.h b/include/linux/random.h
index 9c29122037f9..e47e533742b5 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -26,7 +26,6 @@ extern void get_random_bytes(void *buf, int nbytes);
extern int add_random_ready_callback(struct random_ready_callback *rdy);
extern void del_random_ready_callback(struct random_ready_callback *rdy);
extern void get_random_bytes_arch(void *buf, int nbytes);
-void generate_random_uuid(unsigned char uuid_out[16]);
extern int random_int_secret_init(void);
#ifndef MODULE
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 17d4f849c65e..8beb98dcf14f 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -488,6 +488,42 @@ static inline void hlist_add_head_rcu(struct hlist_node *n,
}
/**
+ * hlist_add_tail_rcu
+ * @n: the element to add to the hash list.
+ * @h: the list to add to.
+ *
+ * Description:
+ * Adds the specified element to the specified hlist,
+ * while permitting racing traversals.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as hlist_add_head_rcu()
+ * or hlist_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * hlist_for_each_entry_rcu(), used to prevent memory-consistency
+ * problems on Alpha CPUs. Regardless of the type of CPU, the
+ * list-traversal primitive must be guarded by rcu_read_lock().
+ */
+static inline void hlist_add_tail_rcu(struct hlist_node *n,
+ struct hlist_head *h)
+{
+ struct hlist_node *i, *last = NULL;
+
+ for (i = hlist_first_rcu(h); i; i = hlist_next_rcu(i))
+ last = i;
+
+ if (last) {
+ n->next = last->next;
+ n->pprev = &last->next;
+ rcu_assign_pointer(hlist_next_rcu(last), n);
+ } else {
+ hlist_add_head_rcu(n, h);
+ }
+}
+
+/**
* hlist_add_before_rcu
* @n: the new element to add to the hash list.
* @next: the existing element to add the new element before.
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 2657aff2725b..5f1533e3d032 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -508,14 +508,7 @@ int rcu_read_lock_bh_held(void);
* CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
* critical section unless it can prove otherwise.
*/
-#ifdef CONFIG_PREEMPT_COUNT
int rcu_read_lock_sched_held(void);
-#else /* #ifdef CONFIG_PREEMPT_COUNT */
-static inline int rcu_read_lock_sched_held(void)
-{
- return 1;
-}
-#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
@@ -532,18 +525,10 @@ static inline int rcu_read_lock_bh_held(void)
return 1;
}
-#ifdef CONFIG_PREEMPT_COUNT
static inline int rcu_read_lock_sched_held(void)
{
- return preempt_count() != 0 || irqs_disabled();
-}
-#else /* #ifdef CONFIG_PREEMPT_COUNT */
-static inline int rcu_read_lock_sched_held(void)
-{
- return 1;
+ return !preemptible();
}
-#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
-
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
#ifdef CONFIG_PROVE_RCU
@@ -1144,4 +1129,17 @@ static inline void rcu_sysidle_force_exit(void)
#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
+/*
+ * Dump the ftrace buffer, but only one time per callsite per boot.
+ */
+#define rcu_ftrace_dump(oops_dump_mode) \
+do { \
+ static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \
+ \
+ if (!atomic_read(&___rfd_beenhere) && \
+ !atomic_xchg(&___rfd_beenhere, 1)) \
+ ftrace_dump(oops_dump_mode); \
+} while (0)
+
+
#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 64809aea661c..93aea75029fb 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -149,6 +149,22 @@ static inline unsigned long rcu_batches_completed_sched(void)
return 0;
}
+/*
+ * Return the number of expedited grace periods completed.
+ */
+static inline unsigned long rcu_exp_batches_completed(void)
+{
+ return 0;
+}
+
+/*
+ * Return the number of expedited sched grace periods completed.
+ */
+static inline unsigned long rcu_exp_batches_completed_sched(void)
+{
+ return 0;
+}
+
static inline void rcu_force_quiescent_state(void)
{
}
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index ad1eda9fa4da..5043cb823fb2 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -87,6 +87,8 @@ unsigned long rcu_batches_started_sched(void);
unsigned long rcu_batches_completed(void);
unsigned long rcu_batches_completed_bh(void);
unsigned long rcu_batches_completed_sched(void);
+unsigned long rcu_exp_batches_completed(void);
+unsigned long rcu_exp_batches_completed_sched(void);
void show_rcu_gp_kthreads(void);
void rcu_force_quiescent_state(void);
diff --git a/include/linux/regulator/act8865.h b/include/linux/regulator/act8865.h
index 2eb386017fa5..113d861a1e4c 100644
--- a/include/linux/regulator/act8865.h
+++ b/include/linux/regulator/act8865.h
@@ -69,11 +69,13 @@ enum {
* @id: regulator id
* @name: regulator name
* @init_data: regulator init data
+ * @of_node: device tree node (optional)
*/
struct act8865_regulator_data {
int id;
const char *name;
struct regulator_init_data *init_data;
+ struct device_node *of_node;
};
/**
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index cd271e89a7e6..fcfa40a6692c 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -93,6 +93,9 @@ struct regulator_linear_range {
* @get_current_limit: Get the configured limit for a current-limited regulator.
* @set_input_current_limit: Configure an input limit.
*
+ * @set_over_current_protection: Support capability of automatically shutting
+ * down when detecting an over current event.
+ *
* @set_active_discharge: Set active discharge enable/disable of regulators.
*
* @set_mode: Set the configured operating mode for the regulator.
@@ -255,6 +258,8 @@ enum regulator_type {
*
* @vsel_reg: Register for selector when using regulator_regmap_X_voltage_
* @vsel_mask: Mask for register bitfield used for selector
+ * @csel_reg: Register for TPS65218 LS3 current regulator
+ * @csel_mask: Mask for TPS65218 LS3 current regulator
* @apply_reg: Register for initiate voltage change on the output when
* using regulator_set_voltage_sel_regmap
* @apply_bit: Register bitfield used for initiate voltage change on the
@@ -292,7 +297,7 @@ struct regulator_desc {
const struct regulator_desc *,
struct regulator_config *);
int id;
- bool continuous_voltage_range;
+ unsigned int continuous_voltage_range:1;
unsigned n_voltages;
const struct regulator_ops *ops;
int irq;
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 5d627c83a630..ad3e5158e586 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -97,6 +97,7 @@ struct regulator_state {
* @ramp_disable: Disable ramp delay when initialising or when setting voltage.
* @soft_start: Enable soft start so that voltage ramps slowly.
* @pull_down: Enable pull down when regulator is disabled.
+ * @over_current_protection: Auto disable on over current event.
*
* @input_uV: Input voltage for regulator when supplied by another regulator.
*
diff --git a/include/linux/regulator/max8973-regulator.h b/include/linux/regulator/max8973-regulator.h
index f6a8a16a0d4d..2fcb9980262a 100644
--- a/include/linux/regulator/max8973-regulator.h
+++ b/include/linux/regulator/max8973-regulator.h
@@ -54,6 +54,10 @@
* @reg_init_data: The regulator init data.
* @control_flags: Control flags which are ORed value of above flags to
* configure device.
+ * @junction_temp_warning: Junction temp in millicelcius on which warning need
+ * to be set. Thermal functionality is only supported on
+ * MAX77621. The threshold warning supported by MAX77621
+ * are 120C and 140C.
* @enable_ext_control: Enable the voltage enable/disable through external
* control signal from EN input pin. If it is false then
* voltage output will be enabled/disabled through EN bit of
@@ -67,6 +71,7 @@
struct max8973_regulator_platform_data {
struct regulator_init_data *reg_init_data;
unsigned long control_flags;
+ unsigned long junction_temp_warning;
bool enable_ext_control;
int enable_gpio;
int dvs_gpio;
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index 9c4e1384f636..1c457a8dd5a6 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -365,6 +365,8 @@ enum rproc_state {
/**
* enum rproc_crash_type - remote processor crash types
* @RPROC_MMUFAULT: iommu fault
+ * @RPROC_WATCHDOG: watchdog bite
+ * @RPROC_FATAL_ERROR fatal error
*
* Each element of the enum is used as an array index. So that, the value of
* the elements should be always something sane.
@@ -373,6 +375,8 @@ enum rproc_state {
*/
enum rproc_crash_type {
RPROC_MMUFAULT,
+ RPROC_WATCHDOG,
+ RPROC_FATAL_ERROR,
};
/**
diff --git a/include/linux/reservation.h b/include/linux/reservation.h
index 5a0b64cf68b4..b0f305e77b7f 100644
--- a/include/linux/reservation.h
+++ b/include/linux/reservation.h
@@ -49,12 +49,27 @@ extern struct ww_class reservation_ww_class;
extern struct lock_class_key reservation_seqcount_class;
extern const char reservation_seqcount_string[];
+/**
+ * struct reservation_object_list - a list of shared fences
+ * @rcu: for internal use
+ * @shared_count: table of shared fences
+ * @shared_max: for growing shared fence table
+ * @shared: shared fence table
+ */
struct reservation_object_list {
struct rcu_head rcu;
u32 shared_count, shared_max;
struct fence __rcu *shared[];
};
+/**
+ * struct reservation_object - a reservation object manages fences for a buffer
+ * @lock: update side lock
+ * @seq: sequence count for managing RCU read-side synchronization
+ * @fence_excl: the exclusive fence, if there is one currently
+ * @fence: list of current shared fences
+ * @staged: staged copy of shared fences for RCU updates
+ */
struct reservation_object {
struct ww_mutex lock;
seqcount_t seq;
@@ -68,6 +83,10 @@ struct reservation_object {
#define reservation_object_assert_held(obj) \
lockdep_assert_held(&(obj)->lock.base)
+/**
+ * reservation_object_init - initialize a reservation object
+ * @obj: the reservation object
+ */
static inline void
reservation_object_init(struct reservation_object *obj)
{
@@ -79,6 +98,10 @@ reservation_object_init(struct reservation_object *obj)
obj->staged = NULL;
}
+/**
+ * reservation_object_fini - destroys a reservation object
+ * @obj: the reservation object
+ */
static inline void
reservation_object_fini(struct reservation_object *obj)
{
@@ -106,6 +129,14 @@ reservation_object_fini(struct reservation_object *obj)
ww_mutex_destroy(&obj->lock);
}
+/**
+ * reservation_object_get_list - get the reservation object's
+ * shared fence list, with update-side lock held
+ * @obj: the reservation object
+ *
+ * Returns the shared fence list. Does NOT take references to
+ * the fence. The obj->lock must be held.
+ */
static inline struct reservation_object_list *
reservation_object_get_list(struct reservation_object *obj)
{
@@ -113,6 +144,17 @@ reservation_object_get_list(struct reservation_object *obj)
reservation_object_held(obj));
}
+/**
+ * reservation_object_get_excl - get the reservation object's
+ * exclusive fence, with update-side lock held
+ * @obj: the reservation object
+ *
+ * Returns the exclusive fence (if any). Does NOT take a
+ * reference. The obj->lock must be held.
+ *
+ * RETURNS
+ * The exclusive fence or NULL
+ */
static inline struct fence *
reservation_object_get_excl(struct reservation_object *obj)
{
@@ -120,6 +162,35 @@ reservation_object_get_excl(struct reservation_object *obj)
reservation_object_held(obj));
}
+/**
+ * reservation_object_get_excl_rcu - get the reservation object's
+ * exclusive fence, without lock held.
+ * @obj: the reservation object
+ *
+ * If there is an exclusive fence, this atomically increments it's
+ * reference count and returns it.
+ *
+ * RETURNS
+ * The exclusive fence or NULL if none
+ */
+static inline struct fence *
+reservation_object_get_excl_rcu(struct reservation_object *obj)
+{
+ struct fence *fence;
+ unsigned seq;
+retry:
+ seq = read_seqcount_begin(&obj->seq);
+ rcu_read_lock();
+ fence = rcu_dereference(obj->fence_excl);
+ if (read_seqcount_retry(&obj->seq, seq)) {
+ rcu_read_unlock();
+ goto retry;
+ }
+ fence = fence_get(fence);
+ rcu_read_unlock();
+ return fence;
+}
+
int reservation_object_reserve_shared(struct reservation_object *obj);
void reservation_object_add_shared_fence(struct reservation_object *obj,
struct fence *fence);
diff --git a/include/linux/reset-controller.h b/include/linux/reset-controller.h
index a3a5bcdb1d02..b91ba932bbd4 100644
--- a/include/linux/reset-controller.h
+++ b/include/linux/reset-controller.h
@@ -31,6 +31,7 @@ struct of_phandle_args;
* @ops: a pointer to device specific struct reset_control_ops
* @owner: kernel module of the reset controller driver
* @list: internal list of reset controller devices
+ * @reset_control_head: head of internal list of requested reset controls
* @of_node: corresponding device tree node as phandle target
* @of_reset_n_cells: number of cells in reset line specifiers
* @of_xlate: translation function to translate from specifier as found in the
@@ -41,6 +42,7 @@ struct reset_controller_dev {
const struct reset_control_ops *ops;
struct module *owner;
struct list_head list;
+ struct list_head reset_control_head;
struct device_node *of_node;
int of_reset_n_cells;
int (*of_xlate)(struct reset_controller_dev *rcdev,
diff --git a/include/linux/reset.h b/include/linux/reset.h
index c4c097de0ba9..ec0306ce7b92 100644
--- a/include/linux/reset.h
+++ b/include/linux/reset.h
@@ -1,8 +1,8 @@
#ifndef _LINUX_RESET_H_
#define _LINUX_RESET_H_
-struct device;
-struct device_node;
+#include <linux/device.h>
+
struct reset_control;
#ifdef CONFIG_RESET_CONTROLLER
@@ -12,9 +12,11 @@ int reset_control_assert(struct reset_control *rstc);
int reset_control_deassert(struct reset_control *rstc);
int reset_control_status(struct reset_control *rstc);
-struct reset_control *reset_control_get(struct device *dev, const char *id);
+struct reset_control *__of_reset_control_get(struct device_node *node,
+ const char *id, int index, int shared);
void reset_control_put(struct reset_control *rstc);
-struct reset_control *devm_reset_control_get(struct device *dev, const char *id);
+struct reset_control *__devm_reset_control_get(struct device *dev,
+ const char *id, int index, int shared);
int __must_check device_reset(struct device *dev);
@@ -23,24 +25,6 @@ static inline int device_reset_optional(struct device *dev)
return device_reset(dev);
}
-static inline struct reset_control *reset_control_get_optional(
- struct device *dev, const char *id)
-{
- return reset_control_get(dev, id);
-}
-
-static inline struct reset_control *devm_reset_control_get_optional(
- struct device *dev, const char *id)
-{
- return devm_reset_control_get(dev, id);
-}
-
-struct reset_control *of_reset_control_get(struct device_node *node,
- const char *id);
-
-struct reset_control *of_reset_control_get_by_index(
- struct device_node *node, int index);
-
#else
static inline int reset_control_reset(struct reset_control *rstc)
@@ -72,49 +56,191 @@ static inline void reset_control_put(struct reset_control *rstc)
WARN_ON(1);
}
+static inline int __must_check device_reset(struct device *dev)
+{
+ WARN_ON(1);
+ return -ENOTSUPP;
+}
+
static inline int device_reset_optional(struct device *dev)
{
return -ENOTSUPP;
}
-static inline struct reset_control *__must_check reset_control_get(
- struct device *dev, const char *id)
+static inline struct reset_control *__of_reset_control_get(
+ struct device_node *node,
+ const char *id, int index, int shared)
{
- WARN_ON(1);
return ERR_PTR(-EINVAL);
}
-static inline struct reset_control *__must_check devm_reset_control_get(
+static inline struct reset_control *__devm_reset_control_get(
+ struct device *dev,
+ const char *id, int index, int shared)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+#endif /* CONFIG_RESET_CONTROLLER */
+
+/**
+ * reset_control_get - Lookup and obtain an exclusive reference to a
+ * reset controller.
+ * @dev: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Returns a struct reset_control or IS_ERR() condition containing errno.
+ * If this function is called more then once for the same reset_control it will
+ * return -EBUSY.
+ *
+ * See reset_control_get_shared for details on shared references to
+ * reset-controls.
+ *
+ * Use of id names is optional.
+ */
+static inline struct reset_control *__must_check reset_control_get(
struct device *dev, const char *id)
{
+#ifndef CONFIG_RESET_CONTROLLER
WARN_ON(1);
- return ERR_PTR(-EINVAL);
+#endif
+ return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0);
}
static inline struct reset_control *reset_control_get_optional(
struct device *dev, const char *id)
{
- return ERR_PTR(-ENOTSUPP);
+ return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0);
}
-static inline struct reset_control *devm_reset_control_get_optional(
+/**
+ * reset_control_get_shared - Lookup and obtain a shared reference to a
+ * reset controller.
+ * @dev: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Returns a struct reset_control or IS_ERR() condition containing errno.
+ * This function is intended for use with reset-controls which are shared
+ * between hardware-blocks.
+ *
+ * When a reset-control is shared, the behavior of reset_control_assert /
+ * deassert is changed, the reset-core will keep track of a deassert_count
+ * and only (re-)assert the reset after reset_control_assert has been called
+ * as many times as reset_control_deassert was called. Also see the remark
+ * about shared reset-controls in the reset_control_assert docs.
+ *
+ * Calling reset_control_assert without first calling reset_control_deassert
+ * is not allowed on a shared reset control. Calling reset_control_reset is
+ * also not allowed on a shared reset control.
+ *
+ * Use of id names is optional.
+ */
+static inline struct reset_control *reset_control_get_shared(
struct device *dev, const char *id)
{
- return ERR_PTR(-ENOTSUPP);
+ return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 1);
}
+/**
+ * of_reset_control_get - Lookup and obtain an exclusive reference to a
+ * reset controller.
+ * @node: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Returns a struct reset_control or IS_ERR() condition containing errno.
+ *
+ * Use of id names is optional.
+ */
static inline struct reset_control *of_reset_control_get(
struct device_node *node, const char *id)
{
- return ERR_PTR(-ENOTSUPP);
+ return __of_reset_control_get(node, id, 0, 0);
}
+/**
+ * of_reset_control_get_by_index - Lookup and obtain an exclusive reference to
+ * a reset controller by index.
+ * @node: device to be reset by the controller
+ * @index: index of the reset controller
+ *
+ * This is to be used to perform a list of resets for a device or power domain
+ * in whatever order. Returns a struct reset_control or IS_ERR() condition
+ * containing errno.
+ */
static inline struct reset_control *of_reset_control_get_by_index(
- struct device_node *node, int index)
+ struct device_node *node, int index)
{
- return ERR_PTR(-ENOTSUPP);
+ return __of_reset_control_get(node, NULL, index, 0);
}
-#endif /* CONFIG_RESET_CONTROLLER */
+/**
+ * devm_reset_control_get - resource managed reset_control_get()
+ * @dev: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Managed reset_control_get(). For reset controllers returned from this
+ * function, reset_control_put() is called automatically on driver detach.
+ * See reset_control_get() for more information.
+ */
+static inline struct reset_control *__must_check devm_reset_control_get(
+ struct device *dev, const char *id)
+{
+#ifndef CONFIG_RESET_CONTROLLER
+ WARN_ON(1);
+#endif
+ return __devm_reset_control_get(dev, id, 0, 0);
+}
+
+static inline struct reset_control *devm_reset_control_get_optional(
+ struct device *dev, const char *id)
+{
+ return __devm_reset_control_get(dev, id, 0, 0);
+}
+
+/**
+ * devm_reset_control_get_by_index - resource managed reset_control_get
+ * @dev: device to be reset by the controller
+ * @index: index of the reset controller
+ *
+ * Managed reset_control_get(). For reset controllers returned from this
+ * function, reset_control_put() is called automatically on driver detach.
+ * See reset_control_get() for more information.
+ */
+static inline struct reset_control *devm_reset_control_get_by_index(
+ struct device *dev, int index)
+{
+ return __devm_reset_control_get(dev, NULL, index, 0);
+}
+
+/**
+ * devm_reset_control_get_shared - resource managed reset_control_get_shared()
+ * @dev: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Managed reset_control_get_shared(). For reset controllers returned from
+ * this function, reset_control_put() is called automatically on driver detach.
+ * See reset_control_get_shared() for more information.
+ */
+static inline struct reset_control *devm_reset_control_get_shared(
+ struct device *dev, const char *id)
+{
+ return __devm_reset_control_get(dev, id, 0, 1);
+}
+
+/**
+ * devm_reset_control_get_shared_by_index - resource managed
+ * reset_control_get_shared
+ * @dev: device to be reset by the controller
+ * @index: index of the reset controller
+ *
+ * Managed reset_control_get_shared(). For reset controllers returned from
+ * this function, reset_control_put() is called automatically on driver detach.
+ * See reset_control_get_shared() for more information.
+ */
+static inline struct reset_control *devm_reset_control_get_shared_by_index(
+ struct device *dev, int index)
+{
+ return __devm_reset_control_get(dev, NULL, index, 1);
+}
#endif
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 63bd7601b6de..3eef0802a0cd 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -346,7 +346,8 @@ struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
struct bucket_table *old_tbl);
int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl);
-int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter);
+int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter,
+ gfp_t gfp);
void rhashtable_walk_exit(struct rhashtable_iter *iter);
int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
void *rhashtable_walk_next(struct rhashtable_iter *iter);
diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h
index 82a673905edb..ada50ff36da0 100644
--- a/include/linux/rpmsg.h
+++ b/include/linux/rpmsg.h
@@ -169,7 +169,7 @@ struct rpmsg_driver {
int register_rpmsg_device(struct rpmsg_channel *dev);
void unregister_rpmsg_device(struct rpmsg_channel *dev);
-int register_rpmsg_driver(struct rpmsg_driver *drv);
+int __register_rpmsg_driver(struct rpmsg_driver *drv, struct module *owner);
void unregister_rpmsg_driver(struct rpmsg_driver *drv);
void rpmsg_destroy_ept(struct rpmsg_endpoint *);
struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_channel *,
@@ -177,6 +177,22 @@ struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_channel *,
int
rpmsg_send_offchannel_raw(struct rpmsg_channel *, u32, u32, void *, int, bool);
+/* use a macro to avoid include chaining to get THIS_MODULE */
+#define register_rpmsg_driver(drv) \
+ __register_rpmsg_driver(drv, THIS_MODULE)
+
+/**
+ * module_rpmsg_driver() - Helper macro for registering an rpmsg driver
+ * @__rpmsg_driver: rpmsg_driver struct
+ *
+ * Helper macro for rpmsg drivers which do not do anything special in module
+ * init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
+ */
+#define module_rpmsg_driver(__rpmsg_driver) \
+ module_driver(__rpmsg_driver, register_rpmsg_driver, \
+ unregister_rpmsg_driver)
+
/**
* rpmsg_send() - send a message across to the remote processor
* @rpdev: the rpmsg channel
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
index 561e8615528d..ae0528b834cd 100644
--- a/include/linux/rwsem-spinlock.h
+++ b/include/linux/rwsem-spinlock.h
@@ -34,7 +34,7 @@ struct rw_semaphore {
extern void __down_read(struct rw_semaphore *sem);
extern int __down_read_trylock(struct rw_semaphore *sem);
extern void __down_write(struct rw_semaphore *sem);
-extern void __down_write_nested(struct rw_semaphore *sem, int subclass);
+extern int __must_check __down_write_killable(struct rw_semaphore *sem);
extern int __down_write_trylock(struct rw_semaphore *sem);
extern void __up_read(struct rw_semaphore *sem);
extern void __up_write(struct rw_semaphore *sem);
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 8f498cdde280..d37fbb34d06f 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -14,6 +14,7 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/atomic.h>
+#include <linux/err.h>
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
#include <linux/osq_lock.h>
#endif
@@ -43,6 +44,7 @@ struct rw_semaphore {
extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_down_write_failed_killable(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
@@ -116,6 +118,7 @@ extern int down_read_trylock(struct rw_semaphore *sem);
* lock for writing
*/
extern void down_write(struct rw_semaphore *sem);
+extern int __must_check down_write_killable(struct rw_semaphore *sem);
/*
* trylock for writing -- returns 1 if successful, 0 if contention
@@ -153,6 +156,7 @@ extern void downgrade_write(struct rw_semaphore *sem);
*/
extern void down_read_nested(struct rw_semaphore *sem, int subclass);
extern void down_write_nested(struct rw_semaphore *sem, int subclass);
+extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass);
extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock);
# define down_write_nest_lock(sem, nest_lock) \
@@ -173,6 +177,7 @@ extern void up_read_non_owner(struct rw_semaphore *sem);
# define down_read_nested(sem, subclass) down_read(sem)
# define down_write_nest_lock(sem, nest_lock) down_write(sem)
# define down_write_nested(sem, subclass) down_write(sem)
+# define down_write_killable_nested(sem, subclass) down_write_killable(sem)
# define down_read_non_owner(sem) down_read(sem)
# define up_read_non_owner(sem) up_read(sem)
#endif
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 556ec1ea2574..cb3c8fe6acd7 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -286,6 +286,31 @@ size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
#define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist))
/*
+ * The maximum number of SG segments that we will put inside a
+ * scatterlist (unless chaining is used). Should ideally fit inside a
+ * single page, to avoid a higher order allocation. We could define this
+ * to SG_MAX_SINGLE_ALLOC to pack correctly at the highest order. The
+ * minimum value is 32
+ */
+#define SG_CHUNK_SIZE 128
+
+/*
+ * Like SG_CHUNK_SIZE, but for archs that have sg chaining. This limit
+ * is totally arbitrary, a setting of 2048 will get you at least 8mb ios.
+ */
+#ifdef CONFIG_ARCH_HAS_SG_CHAIN
+#define SG_MAX_SEGMENTS 2048
+#else
+#define SG_MAX_SEGMENTS SG_CHUNK_SIZE
+#endif
+
+#ifdef CONFIG_SG_POOL
+void sg_free_table_chained(struct sg_table *table, bool first_chunk);
+int sg_alloc_table_chained(struct sg_table *table, int nents,
+ struct scatterlist *first_chunk);
+#endif
+
+/*
* sg page iterator
*
* Iterates over sg entries page-by-page. On each successful iteration,
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 52c4847b05e2..6e42ada26345 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -40,7 +40,6 @@ struct sched_param {
#include <linux/pid.h>
#include <linux/percpu.h>
#include <linux/topology.h>
-#include <linux/proportions.h>
#include <linux/seccomp.h>
#include <linux/rcupdate.h>
#include <linux/rculist.h>
@@ -178,9 +177,11 @@ extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
extern void calc_global_load(unsigned long ticks);
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
-extern void update_cpu_load_nohz(int active);
+extern void cpu_load_update_nohz_start(void);
+extern void cpu_load_update_nohz_stop(void);
#else
-static inline void update_cpu_load_nohz(int active) { }
+static inline void cpu_load_update_nohz_start(void) { }
+static inline void cpu_load_update_nohz_stop(void) { }
#endif
extern void dump_cpu_task(int cpu);
@@ -372,6 +373,15 @@ extern void cpu_init (void);
extern void trap_init(void);
extern void update_process_times(int user);
extern void scheduler_tick(void);
+extern int sched_cpu_starting(unsigned int cpu);
+extern int sched_cpu_activate(unsigned int cpu);
+extern int sched_cpu_deactivate(unsigned int cpu);
+
+#ifdef CONFIG_HOTPLUG_CPU
+extern int sched_cpu_dying(unsigned int cpu);
+#else
+# define sched_cpu_dying NULL
+#endif
extern void sched_show_task(struct task_struct *p);
@@ -511,6 +521,7 @@ static inline int get_dumpable(struct mm_struct *mm)
#define MMF_HAS_UPROBES 19 /* has uprobes */
#define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */
+#define MMF_OOM_REAPED 21 /* mm has been already reaped */
#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
@@ -658,6 +669,7 @@ struct signal_struct {
atomic_t sigcnt;
atomic_t live;
int nr_threads;
+ atomic_t oom_victims; /* # of TIF_MEDIE threads in this thread group */
struct list_head thread_head;
wait_queue_head_t wait_chldexit; /* for wait4() */
@@ -782,7 +794,11 @@ struct signal_struct {
struct tty_audit_buf *tty_audit_buf;
#endif
- oom_flags_t oom_flags;
+ /*
+ * Thread is the potential origin of an oom condition; kill first on
+ * oom
+ */
+ bool oom_flag_origin;
short oom_score_adj; /* OOM kill score adjustment */
short oom_score_adj_min; /* OOM kill score adjustment min value.
* Only settable by CAP_SYS_RESOURCE. */
@@ -935,9 +951,19 @@ enum cpu_idle_type {
};
/*
+ * Integer metrics need fixed point arithmetic, e.g., sched/fair
+ * has a few: load, load_avg, util_avg, freq, and capacity.
+ *
+ * We define a basic fixed point arithmetic range, and then formalize
+ * all these metrics based on that basic range.
+ */
+# define SCHED_FIXEDPOINT_SHIFT 10
+# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT)
+
+/*
* Increase resolution of cpu_capacity calculations
*/
-#define SCHED_CAPACITY_SHIFT 10
+#define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT
#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
/*
@@ -1199,18 +1225,56 @@ struct load_weight {
};
/*
- * The load_avg/util_avg accumulates an infinite geometric series.
- * 1) load_avg factors frequency scaling into the amount of time that a
- * sched_entity is runnable on a rq into its weight. For cfs_rq, it is the
- * aggregated such weights of all runnable and blocked sched_entities.
- * 2) util_avg factors frequency and cpu scaling into the amount of time
- * that a sched_entity is running on a CPU, in the range [0..SCHED_LOAD_SCALE].
- * For cfs_rq, it is the aggregated such times of all runnable and
+ * The load_avg/util_avg accumulates an infinite geometric series
+ * (see __update_load_avg() in kernel/sched/fair.c).
+ *
+ * [load_avg definition]
+ *
+ * load_avg = runnable% * scale_load_down(load)
+ *
+ * where runnable% is the time ratio that a sched_entity is runnable.
+ * For cfs_rq, it is the aggregated load_avg of all runnable and
* blocked sched_entities.
- * The 64 bit load_sum can:
- * 1) for cfs_rq, afford 4353082796 (=2^64/47742/88761) entities with
- * the highest weight (=88761) always runnable, we should not overflow
- * 2) for entity, support any load.weight always runnable
+ *
+ * load_avg may also take frequency scaling into account:
+ *
+ * load_avg = runnable% * scale_load_down(load) * freq%
+ *
+ * where freq% is the CPU frequency normalized to the highest frequency.
+ *
+ * [util_avg definition]
+ *
+ * util_avg = running% * SCHED_CAPACITY_SCALE
+ *
+ * where running% is the time ratio that a sched_entity is running on
+ * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable
+ * and blocked sched_entities.
+ *
+ * util_avg may also factor frequency scaling and CPU capacity scaling:
+ *
+ * util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity%
+ *
+ * where freq% is the same as above, and capacity% is the CPU capacity
+ * normalized to the greatest capacity (due to uarch differences, etc).
+ *
+ * N.B., the above ratios (runnable%, running%, freq%, and capacity%)
+ * themselves are in the range of [0, 1]. To do fixed point arithmetics,
+ * we therefore scale them to as large a range as necessary. This is for
+ * example reflected by util_avg's SCHED_CAPACITY_SCALE.
+ *
+ * [Overflow issue]
+ *
+ * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
+ * with the highest load (=88761), always runnable on a single cfs_rq,
+ * and should not overflow as the number already hits PID_MAX_LIMIT.
+ *
+ * For all other cases (including 32-bit kernels), struct load_weight's
+ * weight will overflow first before we do, because:
+ *
+ * Max(load_avg) <= Max(load.weight)
+ *
+ * Then it is the load_weight's responsibility to consider overflow
+ * issues.
*/
struct sched_avg {
u64 last_update_time, load_sum;
@@ -1475,6 +1539,7 @@ struct task_struct {
unsigned sched_reset_on_fork:1;
unsigned sched_contributes_to_load:1;
unsigned sched_migrated:1;
+ unsigned sched_remote_wakeup:1;
unsigned :0; /* force alignment to the next boundary */
/* unserialized, strictly 'current' */
@@ -1596,6 +1661,7 @@ struct task_struct {
unsigned long sas_ss_sp;
size_t sas_ss_size;
+ unsigned sas_ss_flags;
struct callback_head *task_works;
@@ -1871,6 +1937,11 @@ extern int arch_task_struct_size __read_mostly;
/* Future-safe accessor for struct task_struct's cpus_allowed. */
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
+static inline int tsk_nr_cpus_allowed(struct task_struct *p)
+{
+ return p->nr_cpus_allowed;
+}
+
#define TNF_MIGRATED 0x01
#define TNF_NO_GROUP 0x02
#define TNF_SHARED 0x04
@@ -2184,6 +2255,7 @@ static inline void memalloc_noio_restore(unsigned int flags)
#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
+#define PFA_LMK_WAITING 3 /* Lowmemorykiller is waiting */
#define TASK_PFA_TEST(name, func) \
@@ -2207,6 +2279,9 @@ TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
TASK_PFA_SET(SPREAD_SLAB, spread_slab)
TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
+TASK_PFA_TEST(LMK_WAITING, lmk_waiting)
+TASK_PFA_SET(LMK_WAITING, lmk_waiting)
+
/*
* task->jobctl flags
*/
@@ -2303,8 +2378,6 @@ extern unsigned long long notrace sched_clock(void);
/*
* See the comment in kernel/sched/clock.c
*/
-extern u64 cpu_clock(int cpu);
-extern u64 local_clock(void);
extern u64 running_clock(void);
extern u64 sched_clock_cpu(int cpu);
@@ -2323,6 +2396,16 @@ static inline void sched_clock_idle_sleep_event(void)
static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
{
}
+
+static inline u64 cpu_clock(int cpu)
+{
+ return sched_clock();
+}
+
+static inline u64 local_clock(void)
+{
+ return sched_clock();
+}
#else
/*
* Architectures can set this to 1 if they have specified
@@ -2337,6 +2420,26 @@ extern void clear_sched_clock_stable(void);
extern void sched_clock_tick(void);
extern void sched_clock_idle_sleep_event(void);
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
+
+/*
+ * As outlined in clock.c, provides a fast, high resolution, nanosecond
+ * time source that is monotonic per cpu argument and has bounded drift
+ * between cpus.
+ *
+ * ######################### BIG FAT WARNING ##########################
+ * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
+ * # go backwards !! #
+ * ####################################################################
+ */
+static inline u64 cpu_clock(int cpu)
+{
+ return sched_clock_cpu(cpu);
+}
+
+static inline u64 local_clock(void)
+{
+ return sched_clock_cpu(raw_smp_processor_id());
+}
#endif
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
@@ -2575,6 +2678,18 @@ static inline int kill_cad_pid(int sig, int priv)
*/
static inline int on_sig_stack(unsigned long sp)
{
+ /*
+ * If the signal stack is SS_AUTODISARM then, by construction, we
+ * can't be on the signal stack unless user code deliberately set
+ * SS_AUTODISARM when we were already on it.
+ *
+ * This improves reliability: if user state gets corrupted such that
+ * the stack pointer points very close to the end of the signal stack,
+ * then this check will enable the signal to be handled anyway.
+ */
+ if (current->sas_ss_flags & SS_AUTODISARM)
+ return 0;
+
#ifdef CONFIG_STACK_GROWSUP
return sp >= current->sas_ss_sp &&
sp - current->sas_ss_sp < current->sas_ss_size;
@@ -2592,6 +2707,13 @@ static inline int sas_ss_flags(unsigned long sp)
return on_sig_stack(sp) ? SS_ONSTACK : 0;
}
+static inline void sas_ss_reset(struct task_struct *p)
+{
+ p->sas_ss_sp = 0;
+ p->sas_ss_size = 0;
+ p->sas_ss_flags = SS_DISABLE;
+}
+
static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
{
if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
@@ -2610,14 +2732,26 @@ extern struct mm_struct * mm_alloc(void);
/* mmdrop drops the mm and the page tables */
extern void __mmdrop(struct mm_struct *);
-static inline void mmdrop(struct mm_struct * mm)
+static inline void mmdrop(struct mm_struct *mm)
{
if (unlikely(atomic_dec_and_test(&mm->mm_count)))
__mmdrop(mm);
}
+static inline bool mmget_not_zero(struct mm_struct *mm)
+{
+ return atomic_inc_not_zero(&mm->mm_users);
+}
+
/* mmput gets rid of the mappings and all user-space */
extern void mmput(struct mm_struct *);
+#ifdef CONFIG_MMU
+/* same as above but performs the slow path from the async context. Can
+ * be called from the atomic context as well
+ */
+extern void mmput_async(struct mm_struct *);
+#endif
+
/* Grab a reference to a task's mm, if it is not already going away */
extern struct mm_struct *get_task_mm(struct task_struct *task);
/*
@@ -2646,7 +2780,14 @@ static inline int copy_thread_tls(
}
#endif
extern void flush_thread(void);
-extern void exit_thread(void);
+
+#ifdef CONFIG_HAVE_EXIT_THREAD
+extern void exit_thread(struct task_struct *tsk);
+#else
+static inline void exit_thread(struct task_struct *tsk)
+{
+}
+#endif
extern void exit_files(struct task_struct *);
extern void __cleanup_sighand(struct sighand_struct *);
@@ -3240,7 +3381,10 @@ struct update_util_data {
u64 time, unsigned long util, unsigned long max);
};
-void cpufreq_set_update_util_data(int cpu, struct update_util_data *data);
+void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
+ void (*func)(struct update_util_data *data, u64 time,
+ unsigned long util, unsigned long max));
+void cpufreq_remove_update_util_hook(int cpu);
#endif /* CONFIG_CPU_FREQ */
#endif
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index a9414fd49dc6..de1f64318fc4 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -705,4 +705,73 @@ typedef struct sctp_auth_chunk {
sctp_authhdr_t auth_hdr;
} __packed sctp_auth_chunk_t;
+struct sctp_info {
+ __u32 sctpi_tag;
+ __u32 sctpi_state;
+ __u32 sctpi_rwnd;
+ __u16 sctpi_unackdata;
+ __u16 sctpi_penddata;
+ __u16 sctpi_instrms;
+ __u16 sctpi_outstrms;
+ __u32 sctpi_fragmentation_point;
+ __u32 sctpi_inqueue;
+ __u32 sctpi_outqueue;
+ __u32 sctpi_overall_error;
+ __u32 sctpi_max_burst;
+ __u32 sctpi_maxseg;
+ __u32 sctpi_peer_rwnd;
+ __u32 sctpi_peer_tag;
+ __u8 sctpi_peer_capable;
+ __u8 sctpi_peer_sack;
+ __u16 __reserved1;
+
+ /* assoc status info */
+ __u64 sctpi_isacks;
+ __u64 sctpi_osacks;
+ __u64 sctpi_opackets;
+ __u64 sctpi_ipackets;
+ __u64 sctpi_rtxchunks;
+ __u64 sctpi_outofseqtsns;
+ __u64 sctpi_idupchunks;
+ __u64 sctpi_gapcnt;
+ __u64 sctpi_ouodchunks;
+ __u64 sctpi_iuodchunks;
+ __u64 sctpi_oodchunks;
+ __u64 sctpi_iodchunks;
+ __u64 sctpi_octrlchunks;
+ __u64 sctpi_ictrlchunks;
+
+ /* primary transport info */
+ struct sockaddr_storage sctpi_p_address;
+ __s32 sctpi_p_state;
+ __u32 sctpi_p_cwnd;
+ __u32 sctpi_p_srtt;
+ __u32 sctpi_p_rto;
+ __u32 sctpi_p_hbinterval;
+ __u32 sctpi_p_pathmaxrxt;
+ __u32 sctpi_p_sackdelay;
+ __u32 sctpi_p_sackfreq;
+ __u32 sctpi_p_ssthresh;
+ __u32 sctpi_p_partial_bytes_acked;
+ __u32 sctpi_p_flight_size;
+ __u16 sctpi_p_error;
+ __u16 __reserved2;
+
+ /* sctp sock info */
+ __u32 sctpi_s_autoclose;
+ __u32 sctpi_s_adaptation_ind;
+ __u32 sctpi_s_pd_point;
+ __u8 sctpi_s_nodelay;
+ __u8 sctpi_s_disable_fragments;
+ __u8 sctpi_s_v4mapped;
+ __u8 sctpi_s_frag_interleave;
+ __u32 sctpi_s_type;
+ __u32 __reserved3;
+};
+
+struct sctp_infox {
+ struct sctp_info *sctpinfo;
+ struct sctp_association *asoc;
+};
+
#endif /* __LINUX_SCTP_H__ */
diff --git a/include/linux/security.h b/include/linux/security.h
index 157f0cb1e4d2..14df373ff2ca 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -71,7 +71,7 @@ struct timezone;
/* These functions are in security/commoncap.c */
extern int cap_capable(const struct cred *cred, struct user_namespace *ns,
int cap, int audit);
-extern int cap_settime(const struct timespec *ts, const struct timezone *tz);
+extern int cap_settime(const struct timespec64 *ts, const struct timezone *tz);
extern int cap_ptrace_access_check(struct task_struct *child, unsigned int mode);
extern int cap_ptrace_traceme(struct task_struct *parent);
extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted);
@@ -208,7 +208,13 @@ int security_capable_noaudit(const struct cred *cred, struct user_namespace *ns,
int security_quotactl(int cmds, int type, int id, struct super_block *sb);
int security_quota_on(struct dentry *dentry);
int security_syslog(int type);
-int security_settime(const struct timespec *ts, const struct timezone *tz);
+int security_settime64(const struct timespec64 *ts, const struct timezone *tz);
+static inline int security_settime(const struct timespec *ts, const struct timezone *tz)
+{
+ struct timespec64 ts64 = timespec_to_timespec64(*ts);
+
+ return security_settime64(&ts64, tz);
+}
int security_vm_enough_memory_mm(struct mm_struct *mm, long pages);
int security_bprm_set_creds(struct linux_binprm *bprm);
int security_bprm_check(struct linux_binprm *bprm);
@@ -222,10 +228,10 @@ int security_sb_remount(struct super_block *sb, void *data);
int security_sb_kern_mount(struct super_block *sb, int flags, void *data);
int security_sb_show_options(struct seq_file *m, struct super_block *sb);
int security_sb_statfs(struct dentry *dentry);
-int security_sb_mount(const char *dev_name, struct path *path,
+int security_sb_mount(const char *dev_name, const struct path *path,
const char *type, unsigned long flags, void *data);
int security_sb_umount(struct vfsmount *mnt, int flags);
-int security_sb_pivotroot(struct path *old_path, struct path *new_path);
+int security_sb_pivotroot(const struct path *old_path, const struct path *new_path);
int security_sb_set_mnt_opts(struct super_block *sb,
struct security_mnt_opts *opts,
unsigned long kern_flags,
@@ -462,10 +468,18 @@ static inline int security_syslog(int type)
return 0;
}
+static inline int security_settime64(const struct timespec64 *ts,
+ const struct timezone *tz)
+{
+ return cap_settime(ts, tz);
+}
+
static inline int security_settime(const struct timespec *ts,
const struct timezone *tz)
{
- return cap_settime(ts, tz);
+ struct timespec64 ts64 = timespec_to_timespec64(*ts);
+
+ return cap_settime(&ts64, tz);
}
static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
@@ -530,7 +544,7 @@ static inline int security_sb_statfs(struct dentry *dentry)
return 0;
}
-static inline int security_sb_mount(const char *dev_name, struct path *path,
+static inline int security_sb_mount(const char *dev_name, const struct path *path,
const char *type, unsigned long flags,
void *data)
{
@@ -542,8 +556,8 @@ static inline int security_sb_umount(struct vfsmount *mnt, int flags)
return 0;
}
-static inline int security_sb_pivotroot(struct path *old_path,
- struct path *new_path)
+static inline int security_sb_pivotroot(const struct path *old_path,
+ const struct path *new_path)
{
return 0;
}
@@ -1442,83 +1456,83 @@ static inline void security_skb_classify_flow(struct sk_buff *skb, struct flowi
#endif /* CONFIG_SECURITY_NETWORK_XFRM */
#ifdef CONFIG_SECURITY_PATH
-int security_path_unlink(struct path *dir, struct dentry *dentry);
-int security_path_mkdir(struct path *dir, struct dentry *dentry, umode_t mode);
-int security_path_rmdir(struct path *dir, struct dentry *dentry);
-int security_path_mknod(struct path *dir, struct dentry *dentry, umode_t mode,
+int security_path_unlink(const struct path *dir, struct dentry *dentry);
+int security_path_mkdir(const struct path *dir, struct dentry *dentry, umode_t mode);
+int security_path_rmdir(const struct path *dir, struct dentry *dentry);
+int security_path_mknod(const struct path *dir, struct dentry *dentry, umode_t mode,
unsigned int dev);
-int security_path_truncate(struct path *path);
-int security_path_symlink(struct path *dir, struct dentry *dentry,
+int security_path_truncate(const struct path *path);
+int security_path_symlink(const struct path *dir, struct dentry *dentry,
const char *old_name);
-int security_path_link(struct dentry *old_dentry, struct path *new_dir,
+int security_path_link(struct dentry *old_dentry, const struct path *new_dir,
struct dentry *new_dentry);
-int security_path_rename(struct path *old_dir, struct dentry *old_dentry,
- struct path *new_dir, struct dentry *new_dentry,
+int security_path_rename(const struct path *old_dir, struct dentry *old_dentry,
+ const struct path *new_dir, struct dentry *new_dentry,
unsigned int flags);
-int security_path_chmod(struct path *path, umode_t mode);
-int security_path_chown(struct path *path, kuid_t uid, kgid_t gid);
-int security_path_chroot(struct path *path);
+int security_path_chmod(const struct path *path, umode_t mode);
+int security_path_chown(const struct path *path, kuid_t uid, kgid_t gid);
+int security_path_chroot(const struct path *path);
#else /* CONFIG_SECURITY_PATH */
-static inline int security_path_unlink(struct path *dir, struct dentry *dentry)
+static inline int security_path_unlink(const struct path *dir, struct dentry *dentry)
{
return 0;
}
-static inline int security_path_mkdir(struct path *dir, struct dentry *dentry,
+static inline int security_path_mkdir(const struct path *dir, struct dentry *dentry,
umode_t mode)
{
return 0;
}
-static inline int security_path_rmdir(struct path *dir, struct dentry *dentry)
+static inline int security_path_rmdir(const struct path *dir, struct dentry *dentry)
{
return 0;
}
-static inline int security_path_mknod(struct path *dir, struct dentry *dentry,
+static inline int security_path_mknod(const struct path *dir, struct dentry *dentry,
umode_t mode, unsigned int dev)
{
return 0;
}
-static inline int security_path_truncate(struct path *path)
+static inline int security_path_truncate(const struct path *path)
{
return 0;
}
-static inline int security_path_symlink(struct path *dir, struct dentry *dentry,
+static inline int security_path_symlink(const struct path *dir, struct dentry *dentry,
const char *old_name)
{
return 0;
}
static inline int security_path_link(struct dentry *old_dentry,
- struct path *new_dir,
+ const struct path *new_dir,
struct dentry *new_dentry)
{
return 0;
}
-static inline int security_path_rename(struct path *old_dir,
+static inline int security_path_rename(const struct path *old_dir,
struct dentry *old_dentry,
- struct path *new_dir,
+ const struct path *new_dir,
struct dentry *new_dentry,
unsigned int flags)
{
return 0;
}
-static inline int security_path_chmod(struct path *path, umode_t mode)
+static inline int security_path_chmod(const struct path *path, umode_t mode)
{
return 0;
}
-static inline int security_path_chown(struct path *path, kuid_t uid, kgid_t gid)
+static inline int security_path_chown(const struct path *path, kuid_t uid, kgid_t gid)
{
return 0;
}
-static inline int security_path_chroot(struct path *path)
+static inline int security_path_chroot(const struct path *path)
{
return 0;
}
diff --git a/include/linux/selection.h b/include/linux/selection.h
index 85193aa8c1e3..8e4624efdb6f 100644
--- a/include/linux/selection.h
+++ b/include/linux/selection.h
@@ -24,10 +24,10 @@ extern void mouse_report(struct tty_struct * tty, int butt, int mrx, int mry);
extern int console_blanked;
-extern unsigned char color_table[];
-extern int default_red[];
-extern int default_grn[];
-extern int default_blu[];
+extern const unsigned char color_table[];
+extern unsigned char default_red[];
+extern unsigned char default_grn[];
+extern unsigned char default_blu[];
extern unsigned short *screen_pos(struct vc_data *vc, int w_offset, int viewed);
extern u16 screen_glyph(struct vc_data *vc, int offset);
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index e0582106ef4f..7973a821ac58 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -277,7 +277,7 @@ static inline void raw_write_seqcount_barrier(seqcount_t *s)
static inline int raw_read_seqcount_latch(seqcount_t *s)
{
- return lockless_dereference(s->sequence);
+ return lockless_dereference(s)->sequence;
}
/**
@@ -331,7 +331,7 @@ static inline int raw_read_seqcount_latch(seqcount_t *s)
* unsigned seq, idx;
*
* do {
- * seq = lockless_dereference(latch->seq);
+ * seq = lockless_dereference(latch)->seq;
*
* idx = seq & 0x01;
* entry = data_query(latch->data[idx], ...);
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index 434879759725..48ec7651989b 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -36,6 +36,7 @@ struct plat_serial8250_port {
void (*set_termios)(struct uart_port *,
struct ktermios *new,
struct ktermios *old);
+ unsigned int (*get_mctrl)(struct uart_port *);
int (*handle_irq)(struct uart_port *);
void (*pm)(struct uart_port *, unsigned int state,
unsigned old);
@@ -148,6 +149,7 @@ extern int early_serial8250_setup(struct earlycon_device *device,
const char *options);
extern void serial8250_do_set_termios(struct uart_port *port,
struct ktermios *termios, struct ktermios *old);
+extern unsigned int serial8250_do_get_mctrl(struct uart_port *port);
extern int serial8250_do_startup(struct uart_port *port);
extern void serial8250_do_shutdown(struct uart_port *port);
extern void serial8250_do_pm(struct uart_port *port, unsigned int state,
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index cbfcf38e220d..a3d7c0d4a03e 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -123,6 +123,7 @@ struct uart_port {
void (*set_termios)(struct uart_port *,
struct ktermios *new,
struct ktermios *old);
+ unsigned int (*get_mctrl)(struct uart_port *);
void (*set_mctrl)(struct uart_port *, unsigned int);
int (*startup)(struct uart_port *port);
void (*shutdown)(struct uart_port *port);
@@ -281,6 +282,8 @@ struct uart_state {
enum uart_pm_state pm_state;
struct circ_buf xmit;
+ atomic_t refcount;
+ wait_queue_head_t remove_wait;
struct uart_port *uart_port;
};
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 92557bbce7e7..b63f63eaa39c 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -28,6 +28,21 @@ struct sigpending {
sigset_t signal;
};
+#ifndef HAVE_ARCH_COPY_SIGINFO
+
+#include <linux/string.h>
+
+static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
+{
+ if (from->si_code < 0)
+ memcpy(to, from, sizeof(*to));
+ else
+ /* _sigchld is currently the largest know union member */
+ memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld));
+}
+
+#endif
+
/*
* Define some primitives to manipulate sigset_t.
*/
@@ -385,7 +400,9 @@ int unhandled_signal(struct task_struct *tsk, int sig);
#else
#define rt_sigmask(sig) sigmask(sig)
#endif
-#define siginmask(sig, mask) (rt_sigmask(sig) & (mask))
+
+#define siginmask(sig, mask) \
+ ((sig) < SIGRTMIN && (rt_sigmask(sig) & (mask)))
#define SIG_KERNEL_ONLY_MASK (\
rt_sigmask(SIGKILL) | rt_sigmask(SIGSTOP))
@@ -406,14 +423,10 @@ int unhandled_signal(struct task_struct *tsk, int sig);
rt_sigmask(SIGCONT) | rt_sigmask(SIGCHLD) | \
rt_sigmask(SIGWINCH) | rt_sigmask(SIGURG) )
-#define sig_kernel_only(sig) \
- (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_ONLY_MASK))
-#define sig_kernel_coredump(sig) \
- (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_COREDUMP_MASK))
-#define sig_kernel_ignore(sig) \
- (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_IGNORE_MASK))
-#define sig_kernel_stop(sig) \
- (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_STOP_MASK))
+#define sig_kernel_only(sig) siginmask(sig, SIG_KERNEL_ONLY_MASK)
+#define sig_kernel_coredump(sig) siginmask(sig, SIG_KERNEL_COREDUMP_MASK)
+#define sig_kernel_ignore(sig) siginmask(sig, SIG_KERNEL_IGNORE_MASK)
+#define sig_kernel_stop(sig) siginmask(sig, SIG_KERNEL_STOP_MASK)
#define sig_user_defined(t, signr) \
(((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
@@ -432,8 +445,10 @@ int __save_altstack(stack_t __user *, unsigned long);
stack_t __user *__uss = uss; \
struct task_struct *t = current; \
put_user_ex((void __user *)t->sas_ss_sp, &__uss->ss_sp); \
- put_user_ex(sas_ss_flags(sp), &__uss->ss_flags); \
+ put_user_ex(t->sas_ss_flags, &__uss->ss_flags); \
put_user_ex(t->sas_ss_size, &__uss->ss_size); \
+ if (t->sas_ss_flags & SS_AUTODISARM) \
+ sas_ss_reset(t); \
} while (0);
#ifdef CONFIG_PROC_FS
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 15d0df943466..ee38a4127475 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -382,14 +382,10 @@ enum {
/* generate software time stamp when entering packet scheduling */
SKBTX_SCHED_TSTAMP = 1 << 6,
-
- /* generate software timestamp on peer data acknowledgment */
- SKBTX_ACK_TSTAMP = 1 << 7,
};
#define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \
- SKBTX_SCHED_TSTAMP | \
- SKBTX_ACK_TSTAMP)
+ SKBTX_SCHED_TSTAMP)
#define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP)
/*
@@ -465,23 +461,27 @@ enum {
/* This indicates the tcp segment has CWR set. */
SKB_GSO_TCP_ECN = 1 << 3,
- SKB_GSO_TCPV6 = 1 << 4,
+ SKB_GSO_TCP_FIXEDID = 1 << 4,
+
+ SKB_GSO_TCPV6 = 1 << 5,
- SKB_GSO_FCOE = 1 << 5,
+ SKB_GSO_FCOE = 1 << 6,
- SKB_GSO_GRE = 1 << 6,
+ SKB_GSO_GRE = 1 << 7,
- SKB_GSO_GRE_CSUM = 1 << 7,
+ SKB_GSO_GRE_CSUM = 1 << 8,
- SKB_GSO_IPIP = 1 << 8,
+ SKB_GSO_IPXIP4 = 1 << 9,
- SKB_GSO_SIT = 1 << 9,
+ SKB_GSO_IPXIP6 = 1 << 10,
- SKB_GSO_UDP_TUNNEL = 1 << 10,
+ SKB_GSO_UDP_TUNNEL = 1 << 11,
- SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11,
+ SKB_GSO_UDP_TUNNEL_CSUM = 1 << 12,
- SKB_GSO_TUNNEL_REMCSUM = 1 << 12,
+ SKB_GSO_PARTIAL = 1 << 13,
+
+ SKB_GSO_TUNNEL_REMCSUM = 1 << 14,
};
#if BITS_PER_LONG > 32
@@ -1325,6 +1325,16 @@ static inline int skb_header_cloned(const struct sk_buff *skb)
return dataref != 1;
}
+static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri)
+{
+ might_sleep_if(gfpflags_allow_blocking(pri));
+
+ if (skb_header_cloned(skb))
+ return pskb_expand_head(skb, 0, 0, pri);
+
+ return 0;
+}
+
/**
* skb_header_release - release reference to header
* @skb: buffer to operate on
@@ -2457,7 +2467,7 @@ static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
static inline struct page *dev_alloc_pages(unsigned int order)
{
- return __dev_alloc_pages(GFP_ATOMIC, order);
+ return __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, order);
}
/**
@@ -2475,7 +2485,7 @@ static inline struct page *__dev_alloc_page(gfp_t gfp_mask)
static inline struct page *dev_alloc_page(void)
{
- return __dev_alloc_page(GFP_ATOMIC);
+ return dev_alloc_pages(0);
}
/**
@@ -2949,7 +2959,12 @@ int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
struct iov_iter *from, int len);
int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
-void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb);
+void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len);
+static inline void skb_free_datagram_locked(struct sock *sk,
+ struct sk_buff *skb)
+{
+ __skb_free_datagram_locked(sk, skb, 0);
+}
int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
@@ -2977,6 +2992,8 @@ struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
int skb_ensure_writable(struct sk_buff *skb, int write_len);
int skb_vlan_pop(struct sk_buff *skb);
int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
+struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
+ gfp_t gfp);
static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
{
@@ -3584,7 +3601,10 @@ static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
* Keeps track of level of encapsulation of network headers.
*/
struct skb_gso_cb {
- int mac_offset;
+ union {
+ int mac_offset;
+ int data_offset;
+ };
int encap_level;
__wsum csum;
__u16 csum_start;
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 508bd827e6dc..aeb3e6d00a66 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -315,8 +315,8 @@ static __always_inline int kmalloc_index(size_t size)
}
#endif /* !CONFIG_SLOB */
-void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment;
-void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment;
+void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
+void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
void kmem_cache_free(struct kmem_cache *, void *);
/*
@@ -339,8 +339,8 @@ static __always_inline void kfree_bulk(size_t size, void **p)
}
#ifdef CONFIG_NUMA
-void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment;
-void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment;
+void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
+void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
#else
static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
@@ -354,12 +354,12 @@ static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t f
#endif
#ifdef CONFIG_TRACING
-extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment;
+extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc;
#ifdef CONFIG_NUMA
extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
gfp_t gfpflags,
- int node, size_t size) __assume_slab_alignment;
+ int node, size_t size) __assume_slab_alignment __malloc;
#else
static __always_inline void *
kmem_cache_alloc_node_trace(struct kmem_cache *s,
@@ -392,10 +392,10 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
}
#endif /* CONFIG_TRACING */
-extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment;
+extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
#ifdef CONFIG_TRACING
-extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment;
+extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
#else
static __always_inline void *
kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 9edbbf352340..8694f7a5d92b 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -80,6 +80,10 @@ struct kmem_cache {
struct kasan_cache kasan_info;
#endif
+#ifdef CONFIG_SLAB_FREELIST_RANDOM
+ void *random_seq;
+#endif
+
struct kmem_cache_node *node[MAX_NUMNODES];
};
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 665cd0cd18b8..d1faa019c02a 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -111,22 +111,6 @@ static inline void sysfs_slab_remove(struct kmem_cache *s)
}
#endif
-
-/**
- * virt_to_obj - returns address of the beginning of object.
- * @s: object's kmem_cache
- * @slab_page: address of slab page
- * @x: address within object memory range
- *
- * Returns address of the beginning of object
- */
-static inline void *virt_to_obj(struct kmem_cache *s,
- const void *slab_page,
- const void *x)
-{
- return (void *)x - ((x - slab_page) % s->size);
-}
-
void object_err(struct kmem_cache *s, struct page *page,
u8 *object, char *reason);
diff --git a/include/linux/soc/qcom/smd.h b/include/linux/soc/qcom/smd.h
index d0cb6d189a0a..cbb0f06c41b2 100644
--- a/include/linux/soc/qcom/smd.h
+++ b/include/linux/soc/qcom/smd.h
@@ -26,6 +26,8 @@ struct qcom_smd_device {
struct qcom_smd_channel *channel;
};
+typedef int (*qcom_smd_cb_t)(struct qcom_smd_channel *, const void *, size_t);
+
/**
* struct qcom_smd_driver - smd driver struct
* @driver: underlying device driver
@@ -42,16 +44,71 @@ struct qcom_smd_driver {
int (*probe)(struct qcom_smd_device *dev);
void (*remove)(struct qcom_smd_device *dev);
- int (*callback)(struct qcom_smd_device *, const void *, size_t);
+ qcom_smd_cb_t callback;
};
+#if IS_ENABLED(CONFIG_QCOM_SMD)
+
int qcom_smd_driver_register(struct qcom_smd_driver *drv);
void qcom_smd_driver_unregister(struct qcom_smd_driver *drv);
+struct qcom_smd_channel *qcom_smd_open_channel(struct qcom_smd_channel *channel,
+ const char *name,
+ qcom_smd_cb_t cb);
+void *qcom_smd_get_drvdata(struct qcom_smd_channel *channel);
+void qcom_smd_set_drvdata(struct qcom_smd_channel *channel, void *data);
+int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len);
+
+
+#else
+
+static inline int qcom_smd_driver_register(struct qcom_smd_driver *drv)
+{
+ return -ENXIO;
+}
+
+static inline void qcom_smd_driver_unregister(struct qcom_smd_driver *drv)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+}
+
+static inline struct qcom_smd_channel *
+qcom_smd_open_channel(struct qcom_smd_channel *channel,
+ const char *name,
+ qcom_smd_cb_t cb)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+ return NULL;
+}
+
+void *qcom_smd_get_drvdata(struct qcom_smd_channel *channel)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+ return NULL;
+}
+
+void qcom_smd_set_drvdata(struct qcom_smd_channel *channel, void *data)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+}
+
+static inline int qcom_smd_send(struct qcom_smd_channel *channel,
+ const void *data, int len)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+ return -ENXIO;
+}
+
+#endif
+
#define module_qcom_smd_driver(__smd_driver) \
module_driver(__smd_driver, qcom_smd_driver_register, \
qcom_smd_driver_unregister)
-int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len);
#endif
diff --git a/include/linux/soc/qcom/smem_state.h b/include/linux/soc/qcom/smem_state.h
index f35e1512fcaa..7b88697929e9 100644
--- a/include/linux/soc/qcom/smem_state.h
+++ b/include/linux/soc/qcom/smem_state.h
@@ -1,12 +1,17 @@
#ifndef __QCOM_SMEM_STATE__
#define __QCOM_SMEM_STATE__
+#include <linux/errno.h>
+
+struct device_node;
struct qcom_smem_state;
struct qcom_smem_state_ops {
int (*update_bits)(void *, u32, u32);
};
+#ifdef CONFIG_QCOM_SMEM_STATE
+
struct qcom_smem_state *qcom_smem_state_get(struct device *dev, const char *con_id, unsigned *bit);
void qcom_smem_state_put(struct qcom_smem_state *);
@@ -15,4 +20,34 @@ int qcom_smem_state_update_bits(struct qcom_smem_state *state, u32 mask, u32 val
struct qcom_smem_state *qcom_smem_state_register(struct device_node *of_node, const struct qcom_smem_state_ops *ops, void *data);
void qcom_smem_state_unregister(struct qcom_smem_state *state);
+#else
+
+static inline struct qcom_smem_state *qcom_smem_state_get(struct device *dev,
+ const char *con_id, unsigned *bit)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline void qcom_smem_state_put(struct qcom_smem_state *state)
+{
+}
+
+static inline int qcom_smem_state_update_bits(struct qcom_smem_state *state,
+ u32 mask, u32 value)
+{
+ return -EINVAL;
+}
+
+static inline struct qcom_smem_state *qcom_smem_state_register(struct device_node *of_node,
+ const struct qcom_smem_state_ops *ops, void *data)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline void qcom_smem_state_unregister(struct qcom_smem_state *state)
+{
+}
+
+#endif
+
#endif
diff --git a/include/linux/soc/renesas/rcar-sysc.h b/include/linux/soc/renesas/rcar-sysc.h
new file mode 100644
index 000000000000..92fc613ab23d
--- /dev/null
+++ b/include/linux/soc/renesas/rcar-sysc.h
@@ -0,0 +1,16 @@
+#ifndef __LINUX_SOC_RENESAS_RCAR_SYSC_H__
+#define __LINUX_SOC_RENESAS_RCAR_SYSC_H__
+
+#include <linux/types.h>
+
+struct rcar_sysc_ch {
+ u16 chan_offs;
+ u8 chan_bit;
+ u8 isr_bit;
+};
+
+int rcar_sysc_power_down(const struct rcar_sysc_ch *sysc_ch);
+int rcar_sysc_power_up(const struct rcar_sysc_ch *sysc_ch);
+void __iomem *rcar_sysc_init(phys_addr_t base);
+
+#endif /* __LINUX_SOC_RENESAS_RCAR_SYSC_H__ */
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 73bf6c6a833b..b5cc5a6d7011 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -201,8 +201,9 @@ struct ucred {
#define AF_NFC 39 /* NFC sockets */
#define AF_VSOCK 40 /* vSockets */
#define AF_KCM 41 /* Kernel Connection Multiplexor*/
+#define AF_QIPCRTR 42 /* Qualcomm IPC Router */
-#define AF_MAX 42 /* For now.. */
+#define AF_MAX 43 /* For now.. */
/* Protocol families, same as address families. */
#define PF_UNSPEC AF_UNSPEC
@@ -249,6 +250,7 @@ struct ucred {
#define PF_NFC AF_NFC
#define PF_VSOCK AF_VSOCK
#define PF_KCM AF_KCM
+#define PF_QIPCRTR AF_QIPCRTR
#define PF_MAX AF_MAX
/* Maximum queue length specifiable by listen. */
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 857a9a1d82b5..1f03483f61e5 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -372,6 +372,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @unprepare_message: undo any work done by prepare_message().
* @spi_flash_read: to support spi-controller hardwares that provide
* accelerated interface to read from flash devices.
+ * @flash_read_supported: spi device supports flash read
* @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
* number. Any individual value may be -ENOENT for CS lines that
* are not GPIOs (driven by the SPI controller itself).
@@ -529,6 +530,7 @@ struct spi_master {
struct spi_message *message);
int (*spi_flash_read)(struct spi_device *spi,
struct spi_flash_read_message *msg);
+ bool (*flash_read_supported)(struct spi_device *spi);
/*
* These hooks are for drivers that use a generic implementation
@@ -1158,7 +1160,9 @@ struct spi_flash_read_message {
/* SPI core interface for flash read support */
static inline bool spi_flash_read_supported(struct spi_device *spi)
{
- return spi->master->spi_flash_read ? true : false;
+ return spi->master->spi_flash_read &&
+ (!spi->master->flash_read_supported ||
+ spi->master->flash_read_supported(spi));
}
int spi_flash_read(struct spi_device *spi,
diff --git a/include/linux/stm.h b/include/linux/stm.h
index 1a79ed8e43da..8369d8a8cabd 100644
--- a/include/linux/stm.h
+++ b/include/linux/stm.h
@@ -50,6 +50,8 @@ struct stm_device;
* @sw_end: last STP master available to software
* @sw_nchannels: number of STP channels per master
* @sw_mmiosz: size of one channel's IO space, for mmap, optional
+ * @hw_override: masters in the STP stream will not match the ones
+ * assigned by software, but are up to the STM hardware
* @packet: callback that sends an STP packet
* @mmio_addr: mmap callback, optional
* @link: called when a new stm_source gets linked to us, optional
@@ -85,6 +87,7 @@ struct stm_data {
unsigned int sw_end;
unsigned int sw_nchannels;
unsigned int sw_mmiosz;
+ unsigned int hw_override;
ssize_t (*packet)(struct stm_data *, unsigned int,
unsigned int, unsigned int,
unsigned int, unsigned int,
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index e6bc30a42a74..ffdaca9c01af 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -137,5 +137,7 @@ struct plat_stmmacenet_data {
void (*exit)(struct platform_device *pdev, void *priv);
void *bsp_priv;
struct stmmac_axi *axi;
+ int has_gmac4;
+ bool tso_en;
};
#endif
diff --git a/include/linux/string.h b/include/linux/string.h
index d3993a79a325..26b6f6a66f83 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -119,7 +119,7 @@ char *strreplace(char *s, char old, char new);
extern void kfree_const(const void *x);
-extern char *kstrdup(const char *s, gfp_t gfp);
+extern char *kstrdup(const char *s, gfp_t gfp) __malloc;
extern const char *kstrdup_const(const char *s, gfp_t gfp);
extern char *kstrndup(const char *s, size_t len, gfp_t gfp);
extern void *kmemdup(const void *src, size_t len, gfp_t gfp);
diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h
index dabe643eb5fa..5ce9538f290e 100644
--- a/include/linux/string_helpers.h
+++ b/include/linux/string_helpers.h
@@ -3,6 +3,8 @@
#include <linux/types.h>
+struct file;
+
/* Descriptions of the types of units to
* print in */
enum string_size_units {
@@ -68,4 +70,8 @@ static inline int string_escape_str_any_np(const char *src, char *dst,
return string_escape_str(src, dst, sz, ESCAPE_ANY_NP, only);
}
+char *kstrdup_quotable(const char *src, gfp_t gfp);
+char *kstrdup_quotable_cmdline(struct task_struct *task, gfp_t gfp);
+char *kstrdup_quotable_file(struct file *file, gfp_t gfp);
+
#endif
diff --git a/include/linux/stringhash.h b/include/linux/stringhash.h
new file mode 100644
index 000000000000..451771d9b9c0
--- /dev/null
+++ b/include/linux/stringhash.h
@@ -0,0 +1,76 @@
+#ifndef __LINUX_STRINGHASH_H
+#define __LINUX_STRINGHASH_H
+
+#include <linux/compiler.h> /* For __pure */
+#include <linux/types.h> /* For u32, u64 */
+
+/*
+ * Routines for hashing strings of bytes to a 32-bit hash value.
+ *
+ * These hash functions are NOT GUARANTEED STABLE between kernel
+ * versions, architectures, or even repeated boots of the same kernel.
+ * (E.g. they may depend on boot-time hardware detection or be
+ * deliberately randomized.)
+ *
+ * They are also not intended to be secure against collisions caused by
+ * malicious inputs; much slower hash functions are required for that.
+ *
+ * They are optimized for pathname components, meaning short strings.
+ * Even if a majority of files have longer names, the dynamic profile of
+ * pathname components skews short due to short directory names.
+ * (E.g. /usr/lib/libsesquipedalianism.so.3.141.)
+ */
+
+/*
+ * Version 1: one byte at a time. Example of use:
+ *
+ * unsigned long hash = init_name_hash;
+ * while (*p)
+ * hash = partial_name_hash(tolower(*p++), hash);
+ * hash = end_name_hash(hash);
+ *
+ * Although this is designed for bytes, fs/hfsplus/unicode.c
+ * abuses it to hash 16-bit values.
+ */
+
+/* Hash courtesy of the R5 hash in reiserfs modulo sign bits */
+#define init_name_hash() 0
+
+/* partial hash update function. Assume roughly 4 bits per character */
+static inline unsigned long
+partial_name_hash(unsigned long c, unsigned long prevhash)
+{
+ return (prevhash + (c << 4) + (c >> 4)) * 11;
+}
+
+/*
+ * Finally: cut down the number of bits to a int value (and try to avoid
+ * losing bits)
+ */
+static inline unsigned long end_name_hash(unsigned long hash)
+{
+ return (unsigned int)hash;
+}
+
+/*
+ * Version 2: One word (32 or 64 bits) at a time.
+ * If CONFIG_DCACHE_WORD_ACCESS is defined (meaning <asm/word-at-a-time.h>
+ * exists, which describes major Linux platforms like x86 and ARM), then
+ * this computes a different hash function much faster.
+ *
+ * If not set, this falls back to a wrapper around the preceding.
+ */
+extern unsigned int __pure full_name_hash(const char *, unsigned int);
+
+/*
+ * A hash_len is a u64 with the hash of a string in the low
+ * half and the length in the high half.
+ */
+#define hashlen_hash(hashlen) ((u32)(hashlen))
+#define hashlen_len(hashlen) ((u32)((hashlen) >> 32))
+#define hashlen_create(hash, len) ((u64)(len)<<32 | (u32)(hash))
+
+/* Return the "hash_len" (hash and length) of a null-terminated string */
+extern u64 __pure hashlen_string(const char *name);
+
+#endif /* __LINUX_STRINGHASH_H */
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index 6a241a277249..899791573a40 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -127,7 +127,7 @@ struct rpc_authops {
void (*destroy)(struct rpc_auth *);
struct rpc_cred * (*lookup_cred)(struct rpc_auth *, struct auth_cred *, int);
- struct rpc_cred * (*crcreate)(struct rpc_auth*, struct auth_cred *, int);
+ struct rpc_cred * (*crcreate)(struct rpc_auth*, struct auth_cred *, int, gfp_t);
int (*list_pseudoflavors)(rpc_authflavor_t *, int);
rpc_authflavor_t (*info2flavor)(struct rpcsec_gss_info *);
int (*flavor2info)(rpc_authflavor_t,
@@ -167,6 +167,7 @@ void rpc_destroy_authunix(void);
struct rpc_cred * rpc_lookup_cred(void);
struct rpc_cred * rpc_lookup_cred_nonblock(void);
+struct rpc_cred * rpc_lookup_generic_cred(struct auth_cred *, int, gfp_t);
struct rpc_cred * rpc_lookup_machine_cred(const char *service_name);
int rpcauth_register(const struct rpc_authops *);
int rpcauth_unregister(const struct rpc_authops *);
@@ -178,7 +179,7 @@ rpc_authflavor_t rpcauth_get_pseudoflavor(rpc_authflavor_t,
int rpcauth_get_gssinfo(rpc_authflavor_t,
struct rpcsec_gss_info *);
int rpcauth_list_flavors(rpc_authflavor_t *, int);
-struct rpc_cred * rpcauth_lookup_credcache(struct rpc_auth *, struct auth_cred *, int);
+struct rpc_cred * rpcauth_lookup_credcache(struct rpc_auth *, struct auth_cred *, int, gfp_t);
void rpcauth_init_cred(struct rpc_cred *, const struct auth_cred *, struct rpc_auth *, const struct rpc_credops *);
struct rpc_cred * rpcauth_lookupcred(struct rpc_auth *, int);
struct rpc_cred * rpcauth_generic_bind_cred(struct rpc_task *, struct rpc_cred *, int);
@@ -201,9 +202,28 @@ char * rpcauth_stringify_acceptor(struct rpc_cred *);
static inline
struct rpc_cred * get_rpccred(struct rpc_cred *cred)
{
- atomic_inc(&cred->cr_count);
+ if (cred != NULL)
+ atomic_inc(&cred->cr_count);
return cred;
}
+/**
+ * get_rpccred_rcu - get a reference to a cred using rcu-protected pointer
+ * @cred: cred of which to take a reference
+ *
+ * In some cases, we may have a pointer to a credential to which we
+ * want to take a reference, but don't already have one. Because these
+ * objects are freed using RCU, we can access the cr_count while its
+ * on its way to destruction and only take a reference if it's not already
+ * zero.
+ */
+static inline struct rpc_cred *
+get_rpccred_rcu(struct rpc_cred *cred)
+{
+ if (atomic_inc_not_zero(&cred->cr_count))
+ return cred;
+ return NULL;
+}
+
#endif /* __KERNEL__ */
#endif /* _LINUX_SUNRPC_AUTH_H */
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 9a7ddbaf116e..19c659d1c0f8 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -176,6 +176,7 @@ void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int);
int rpc_protocol(struct rpc_clnt *);
struct net * rpc_net_ns(struct rpc_clnt *);
size_t rpc_max_payload(struct rpc_clnt *);
+size_t rpc_max_bc_payload(struct rpc_clnt *);
unsigned long rpc_get_timeout(struct rpc_clnt *clnt);
void rpc_force_rebind(struct rpc_clnt *);
size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t);
diff --git a/include/linux/sunrpc/msg_prot.h b/include/linux/sunrpc/msg_prot.h
index 807371357160..59cbf16eaeb5 100644
--- a/include/linux/sunrpc/msg_prot.h
+++ b/include/linux/sunrpc/msg_prot.h
@@ -158,9 +158,9 @@ typedef __be32 rpc_fraghdr;
/*
* Note that RFC 1833 does not put any size restrictions on the
- * netid string, but all currently defined netid's fit in 4 bytes.
+ * netid string, but all currently defined netid's fit in 5 bytes.
*/
-#define RPCBIND_MAXNETIDLEN (4u)
+#define RPCBIND_MAXNETIDLEN (5u)
/*
* Universal addresses are introduced in RFC 1833 and further spelled
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index 3081339968c3..d6917b896d3a 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -199,7 +199,7 @@ extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt,
struct xdr_buf *rcvbuf);
/* svc_rdma_marshal.c */
-extern int svc_rdma_xdr_decode_req(struct rpcrdma_msg *, struct svc_rqst *);
+extern int svc_rdma_xdr_decode_req(struct xdr_buf *);
extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *,
struct rpcrdma_msg *,
enum rpcrdma_errcode, __be32 *);
diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
index c00f53a4ccdd..91d5a5d6f52b 100644
--- a/include/linux/sunrpc/svcauth.h
+++ b/include/linux/sunrpc/svcauth.h
@@ -16,6 +16,7 @@
#include <linux/sunrpc/cache.h>
#include <linux/sunrpc/gss_api.h>
#include <linux/hash.h>
+#include <linux/stringhash.h>
#include <linux/cred.h>
struct svc_cred {
@@ -165,41 +166,18 @@ extern int svcauth_unix_set_client(struct svc_rqst *rqstp);
extern int unix_gid_cache_create(struct net *net);
extern void unix_gid_cache_destroy(struct net *net);
-static inline unsigned long hash_str(char *name, int bits)
+/*
+ * The <stringhash.h> functions are good enough that we don't need to
+ * use hash_32() on them; just extracting the high bits is enough.
+ */
+static inline unsigned long hash_str(char const *name, int bits)
{
- unsigned long hash = 0;
- unsigned long l = 0;
- int len = 0;
- unsigned char c;
- do {
- if (unlikely(!(c = *name++))) {
- c = (char)len; len = -1;
- }
- l = (l << 8) | c;
- len++;
- if ((len & (BITS_PER_LONG/8-1))==0)
- hash = hash_long(hash^l, BITS_PER_LONG);
- } while (len);
- return hash >> (BITS_PER_LONG - bits);
+ return hashlen_hash(hashlen_string(name)) >> (32 - bits);
}
-static inline unsigned long hash_mem(char *buf, int length, int bits)
+static inline unsigned long hash_mem(char const *buf, int length, int bits)
{
- unsigned long hash = 0;
- unsigned long l = 0;
- int len = 0;
- unsigned char c;
- do {
- if (len == length) {
- c = (char)len; len = -1;
- } else
- c = *buf++;
- l = (l << 8) | c;
- len++;
- if ((len & (BITS_PER_LONG/8-1))==0)
- hash = hash_long(hash^l, BITS_PER_LONG);
- } while (len);
- return hash >> (BITS_PER_LONG - bits);
+ return full_name_hash(buf, length) >> (32 - bits);
}
#endif /* __KERNEL__ */
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index fb0d212e0d3a..5aa3834619a8 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -142,6 +142,7 @@ struct rpc_xprt_ops {
int (*bc_setup)(struct rpc_xprt *xprt,
unsigned int min_reqs);
int (*bc_up)(struct svc_serv *serv, struct net *net);
+ size_t (*bc_maxpayload)(struct rpc_xprt *xprt);
void (*bc_free_rqst)(struct rpc_rqst *rqst);
void (*bc_destroy)(struct rpc_xprt *xprt,
unsigned int max_reqs);
diff --git a/include/linux/sunrpc/xprtrdma.h b/include/linux/sunrpc/xprtrdma.h
index 767190b01363..39267dc3486a 100644
--- a/include/linux/sunrpc/xprtrdma.h
+++ b/include/linux/sunrpc/xprtrdma.h
@@ -52,7 +52,9 @@
#define RPCRDMA_DEF_SLOT_TABLE (128U)
#define RPCRDMA_MAX_SLOT_TABLE (256U)
-#define RPCRDMA_DEF_INLINE (1024) /* default inline max */
+#define RPCRDMA_MIN_INLINE (1024) /* min inline thresh */
+#define RPCRDMA_DEF_INLINE (1024) /* default inline thresh */
+#define RPCRDMA_MAX_INLINE (3068) /* max inline thresh */
/* Memory registration strategies, by number.
* This is part of a kernel / user space API. Do not remove. */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 2b83359c19ca..0af2bb2028fd 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -316,6 +316,7 @@ extern void lru_cache_add_active_or_unevictable(struct page *page,
struct vm_area_struct *vma);
/* linux/mm/vmscan.c */
+extern unsigned long zone_reclaimable_pages(struct zone *zone);
extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask, nodemask_t *mask);
extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
@@ -418,7 +419,7 @@ extern sector_t swapdev_block(int, pgoff_t);
extern int page_swapcount(struct page *);
extern int swp_swapcount(swp_entry_t entry);
extern struct swap_info_struct *page_swap_info(struct page *);
-extern int reuse_swap_page(struct page *);
+extern bool reuse_swap_page(struct page *, int *);
extern int try_to_free_swap(struct page *);
struct backing_dev_info;
@@ -513,8 +514,8 @@ static inline int swp_swapcount(swp_entry_t entry)
return 0;
}
-#define reuse_swap_page(page) \
- (!PageTransCompound(page) && page_mapcount(page) == 1)
+#define reuse_swap_page(page, total_mapcount) \
+ (page_trans_huge_mapcount(page, total_mapcount) == 1)
static inline int try_to_free_swap(struct page *page)
{
@@ -533,6 +534,10 @@ static inline swp_entry_t get_swap_page(void)
#ifdef CONFIG_MEMCG
static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
{
+ /* Cgroup2 doesn't have per-cgroup swappiness */
+ if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
+ return vm_swappiness;
+
/* root ? */
if (mem_cgroup_disabled() || !memcg->css.parent)
return vm_swappiness;
diff --git a/include/linux/sync_file.h b/include/linux/sync_file.h
new file mode 100644
index 000000000000..c6ffe8b0725c
--- /dev/null
+++ b/include/linux/sync_file.h
@@ -0,0 +1,57 @@
+/*
+ * include/linux/sync_file.h
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_SYNC_FILE_H
+#define _LINUX_SYNC_FILE_H
+
+#include <linux/types.h>
+#include <linux/kref.h>
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/fence.h>
+
+struct sync_file_cb {
+ struct fence_cb cb;
+ struct fence *fence;
+ struct sync_file *sync_file;
+};
+
+/**
+ * struct sync_file - sync file to export to the userspace
+ * @file: file representing this fence
+ * @kref: reference count on fence.
+ * @name: name of sync_file. Useful for debugging
+ * @sync_file_list: membership in global file list
+ * @num_fences: number of sync_pts in the fence
+ * @wq: wait queue for fence signaling
+ * @status: 0: signaled, >0:active, <0: error
+ * @cbs: sync_pts callback information
+ */
+struct sync_file {
+ struct file *file;
+ struct kref kref;
+ char name[32];
+#ifdef CONFIG_DEBUG_FS
+ struct list_head sync_file_list;
+#endif
+ int num_fences;
+
+ wait_queue_head_t wq;
+ atomic_t status;
+
+ struct sync_file_cb cbs[];
+};
+
+struct sync_file *sync_file_create(struct fence *fence);
+
+#endif /* _LINUX_SYNC_H */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index d795472c54d8..d02239022bd0 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -371,10 +371,10 @@ asmlinkage long sys_rt_sigtimedwait(const sigset_t __user *uthese,
size_t sigsetsize);
asmlinkage long sys_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig,
siginfo_t __user *uinfo);
-asmlinkage long sys_kill(int pid, int sig);
-asmlinkage long sys_tgkill(int tgid, int pid, int sig);
-asmlinkage long sys_tkill(int pid, int sig);
-asmlinkage long sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo);
+asmlinkage long sys_kill(pid_t pid, int sig);
+asmlinkage long sys_tgkill(pid_t tgid, pid_t pid, int sig);
+asmlinkage long sys_tkill(pid_t pid, int sig);
+asmlinkage long sys_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t __user *uinfo);
asmlinkage long sys_sgetmask(void);
asmlinkage long sys_ssetmask(int newmask);
asmlinkage long sys_signal(int sig, __sighandler_t handler);
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 1b8a5a7876ce..e45abe7db9a6 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -340,6 +340,7 @@ struct thermal_zone_of_device_ops {
int (*get_temp)(void *, int *);
int (*get_trend)(void *, long *);
int (*set_emul_temp)(void *, int);
+ int (*set_trip_temp)(void *, int, int);
};
/**
diff --git a/include/linux/time64.h b/include/linux/time64.h
index 367d5af899e8..7e5d2fa9ac46 100644
--- a/include/linux/time64.h
+++ b/include/linux/time64.h
@@ -65,7 +65,6 @@ static inline struct itimerspec64 itimerspec_to_itimerspec64(struct itimerspec *
# define timespec64_equal timespec_equal
# define timespec64_compare timespec_compare
# define set_normalized_timespec64 set_normalized_timespec
-# define timespec64_add_safe timespec_add_safe
# define timespec64_add timespec_add
# define timespec64_sub timespec_sub
# define timespec64_valid timespec_valid
@@ -134,15 +133,6 @@ static inline int timespec64_compare(const struct timespec64 *lhs, const struct
extern void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec);
-/*
- * timespec64_add_safe assumes both values are positive and checks for
- * overflow. It will return TIME_T_MAX if the returned value would be
- * smaller then either of the arguments.
- */
-extern struct timespec64 timespec64_add_safe(const struct timespec64 lhs,
- const struct timespec64 rhs);
-
-
static inline struct timespec64 timespec64_add(struct timespec64 lhs,
struct timespec64 rhs)
{
@@ -224,4 +214,11 @@ static __always_inline void timespec64_add_ns(struct timespec64 *a, u64 ns)
#endif
+/*
+ * timespec64_add_safe assumes both values are positive and checks for
+ * overflow. It will return TIME64_MAX in case of overflow.
+ */
+extern struct timespec64 timespec64_add_safe(const struct timespec64 lhs,
+ const struct timespec64 rhs);
+
#endif /* _LINUX_TIME64_H */
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 96f37bee3bc1..816b7543f81b 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -1,6 +1,8 @@
#ifndef _LINUX_TIMEKEEPING_H
#define _LINUX_TIMEKEEPING_H
+#include <asm-generic/errno-base.h>
+
/* Included from linux/ktime.h */
void timekeeping_init(void);
@@ -11,8 +13,22 @@ extern int timekeeping_suspended;
*/
extern void do_gettimeofday(struct timeval *tv);
extern int do_settimeofday64(const struct timespec64 *ts);
-extern int do_sys_settimeofday(const struct timespec *tv,
- const struct timezone *tz);
+extern int do_sys_settimeofday64(const struct timespec64 *tv,
+ const struct timezone *tz);
+static inline int do_sys_settimeofday(const struct timespec *tv,
+ const struct timezone *tz)
+{
+ struct timespec64 ts64;
+
+ if (!tv)
+ return do_sys_settimeofday64(NULL, tz);
+
+ if (!timespec_valid(tv))
+ return -EINVAL;
+
+ ts64 = timespec_to_timespec64(*tv);
+ return do_sys_settimeofday64(&ts64, tz);
+}
/*
* Kernel time accessors
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 61aa61dc410c..20ac746f3eb3 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -145,6 +145,8 @@ static inline void init_timer_on_stack_key(struct timer_list *timer,
#define setup_timer(timer, fn, data) \
__setup_timer((timer), (fn), (data), 0)
+#define setup_deferrable_timer(timer, fn, data) \
+ __setup_timer((timer), (fn), (data), TIMER_DEFERRABLE)
#define setup_timer_on_stack(timer, fn, data) \
__setup_timer_on_stack((timer), (fn), (data), 0)
#define setup_deferrable_timer_on_stack(timer, fn, data) \
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 0810f81b6db2..be007610ceb0 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -154,21 +154,6 @@ trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer,
struct trace_event_file *trace_file,
int type, unsigned long len,
unsigned long flags, int pc);
-struct ring_buffer_event *
-trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer,
- int type, unsigned long len,
- unsigned long flags, int pc);
-void trace_buffer_unlock_commit(struct trace_array *tr,
- struct ring_buffer *buffer,
- struct ring_buffer_event *event,
- unsigned long flags, int pc);
-void trace_buffer_unlock_commit_regs(struct trace_array *tr,
- struct ring_buffer *buffer,
- struct ring_buffer_event *event,
- unsigned long flags, int pc,
- struct pt_regs *regs);
-void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
- struct ring_buffer_event *event);
void tracing_record_cmdline(struct task_struct *tsk);
@@ -229,7 +214,6 @@ enum {
TRACE_EVENT_FL_NO_SET_FILTER_BIT,
TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
TRACE_EVENT_FL_WAS_ENABLED_BIT,
- TRACE_EVENT_FL_USE_CALL_FILTER_BIT,
TRACE_EVENT_FL_TRACEPOINT_BIT,
TRACE_EVENT_FL_KPROBE_BIT,
TRACE_EVENT_FL_UPROBE_BIT,
@@ -244,7 +228,6 @@ enum {
* WAS_ENABLED - Set and stays set when an event was ever enabled
* (used for module unloading, if a module event is enabled,
* it is best to clear the buffers that used it).
- * USE_CALL_FILTER - For trace internal events, don't use file filter
* TRACEPOINT - Event is a tracepoint
* KPROBE - Event is a kprobe
* UPROBE - Event is a uprobe
@@ -255,7 +238,6 @@ enum {
TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
TRACE_EVENT_FL_WAS_ENABLED = (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT),
- TRACE_EVENT_FL_USE_CALL_FILTER = (1 << TRACE_EVENT_FL_USE_CALL_FILTER_BIT),
TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT),
TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT),
TRACE_EVENT_FL_UPROBE = (1 << TRACE_EVENT_FL_UPROBE_BIT),
@@ -407,16 +389,12 @@ enum event_trigger_type {
ETT_SNAPSHOT = (1 << 1),
ETT_STACKTRACE = (1 << 2),
ETT_EVENT_ENABLE = (1 << 3),
+ ETT_EVENT_HIST = (1 << 4),
+ ETT_HIST_ENABLE = (1 << 5),
};
extern int filter_match_preds(struct event_filter *filter, void *rec);
-extern int filter_check_discard(struct trace_event_file *file, void *rec,
- struct ring_buffer *buffer,
- struct ring_buffer_event *event);
-extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
- struct ring_buffer *buffer,
- struct ring_buffer_event *event);
extern enum event_trigger_type event_triggers_call(struct trace_event_file *file,
void *rec);
extern void event_triggers_post_call(struct trace_event_file *file,
@@ -450,100 +428,6 @@ trace_trigger_soft_disabled(struct trace_event_file *file)
return false;
}
-/*
- * Helper function for event_trigger_unlock_commit{_regs}().
- * If there are event triggers attached to this event that requires
- * filtering against its fields, then they wil be called as the
- * entry already holds the field information of the current event.
- *
- * It also checks if the event should be discarded or not.
- * It is to be discarded if the event is soft disabled and the
- * event was only recorded to process triggers, or if the event
- * filter is active and this event did not match the filters.
- *
- * Returns true if the event is discarded, false otherwise.
- */
-static inline bool
-__event_trigger_test_discard(struct trace_event_file *file,
- struct ring_buffer *buffer,
- struct ring_buffer_event *event,
- void *entry,
- enum event_trigger_type *tt)
-{
- unsigned long eflags = file->flags;
-
- if (eflags & EVENT_FILE_FL_TRIGGER_COND)
- *tt = event_triggers_call(file, entry);
-
- if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags))
- ring_buffer_discard_commit(buffer, event);
- else if (!filter_check_discard(file, entry, buffer, event))
- return false;
-
- return true;
-}
-
-/**
- * event_trigger_unlock_commit - handle triggers and finish event commit
- * @file: The file pointer assoctiated to the event
- * @buffer: The ring buffer that the event is being written to
- * @event: The event meta data in the ring buffer
- * @entry: The event itself
- * @irq_flags: The state of the interrupts at the start of the event
- * @pc: The state of the preempt count at the start of the event.
- *
- * This is a helper function to handle triggers that require data
- * from the event itself. It also tests the event against filters and
- * if the event is soft disabled and should be discarded.
- */
-static inline void
-event_trigger_unlock_commit(struct trace_event_file *file,
- struct ring_buffer *buffer,
- struct ring_buffer_event *event,
- void *entry, unsigned long irq_flags, int pc)
-{
- enum event_trigger_type tt = ETT_NONE;
-
- if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
- trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc);
-
- if (tt)
- event_triggers_post_call(file, tt, entry);
-}
-
-/**
- * event_trigger_unlock_commit_regs - handle triggers and finish event commit
- * @file: The file pointer assoctiated to the event
- * @buffer: The ring buffer that the event is being written to
- * @event: The event meta data in the ring buffer
- * @entry: The event itself
- * @irq_flags: The state of the interrupts at the start of the event
- * @pc: The state of the preempt count at the start of the event.
- *
- * This is a helper function to handle triggers that require data
- * from the event itself. It also tests the event against filters and
- * if the event is soft disabled and should be discarded.
- *
- * Same as event_trigger_unlock_commit() but calls
- * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit().
- */
-static inline void
-event_trigger_unlock_commit_regs(struct trace_event_file *file,
- struct ring_buffer *buffer,
- struct ring_buffer_event *event,
- void *entry, unsigned long irq_flags, int pc,
- struct pt_regs *regs)
-{
- enum event_trigger_type tt = ETT_NONE;
-
- if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
- trace_buffer_unlock_commit_regs(file->tr, buffer, event,
- irq_flags, pc, regs);
-
- if (tt)
- event_triggers_post_call(file, tt, entry);
-}
-
#ifdef CONFIG_BPF_EVENTS
unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx);
#else
@@ -569,6 +453,7 @@ extern int trace_define_field(struct trace_event_call *call, const char *type,
int is_signed, int filter_type);
extern int trace_add_event_call(struct trace_event_call *call);
extern int trace_remove_event_call(struct trace_event_call *call);
+extern int trace_event_get_offsets(struct trace_event_call *call);
#define is_signed_type(type) (((type)(-1)) < (type)1)
@@ -605,15 +490,20 @@ extern void perf_trace_del(struct perf_event *event, int flags);
extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
char *filter_str);
extern void ftrace_profile_free_filter(struct perf_event *event);
-extern void *perf_trace_buf_prepare(int size, unsigned short type,
- struct pt_regs **regs, int *rctxp);
+void perf_trace_buf_update(void *record, u16 type);
+void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp);
+
+void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
+ struct trace_event_call *call, u64 count,
+ struct pt_regs *regs, struct hlist_head *head,
+ struct task_struct *task);
static inline void
-perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
+perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type,
u64 count, struct pt_regs *regs, void *head,
struct task_struct *task)
{
- perf_tp_event(addr, count, raw_data, size, regs, head, rctx, task);
+ perf_tp_event(type, count, raw_data, size, regs, head, rctx, task);
}
#endif
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 3b09f235db66..40144f382516 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -228,7 +228,8 @@ struct tty_port {
int count; /* Usage count */
wait_queue_head_t open_wait; /* Open waiters */
wait_queue_head_t delta_msr_wait; /* Modem status change */
- unsigned long flags; /* TTY flags ASY_*/
+ unsigned long flags; /* User TTY flags ASYNC_ */
+ unsigned long iflags; /* Internal flags TTY_PORT_ */
unsigned char console:1, /* port is a console */
low_latency:1; /* optional: tune for latency */
struct mutex mutex; /* Locking */
@@ -242,6 +243,18 @@ struct tty_port {
struct kref kref; /* Ref counter */
};
+/* tty_port::iflags bits -- use atomic bit ops */
+#define TTY_PORT_INITIALIZED 0 /* device is initialized */
+#define TTY_PORT_SUSPENDED 1 /* device is suspended */
+#define TTY_PORT_ACTIVE 2 /* device is open */
+
+/*
+ * uart drivers: use the uart_port::status field and the UPSTAT_* defines
+ * for s/w-based flow control steering and carrier detection status
+ */
+#define TTY_PORT_CTS_FLOW 3 /* h/w flow control enabled */
+#define TTY_PORT_CHECK_CD 4 /* carrier detect enabled */
+
/*
* Where all of the state associated with a tty is kept while the tty
* is open. Since the termios state should be kept even if the tty
@@ -338,7 +351,6 @@ struct tty_file_private {
#define TTY_OTHER_CLOSED 2 /* Other side (if any) has closed */
#define TTY_EXCLUSIVE 3 /* Exclusive open mode */
#define TTY_DO_WRITE_WAKEUP 5 /* Call write_wakeup after queuing new */
-#define TTY_OTHER_DONE 6 /* Closed pty has completed input processing */
#define TTY_LDISC_OPEN 11 /* Line discipline is open */
#define TTY_PTY_LOCK 16 /* pty private */
#define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */
@@ -360,6 +372,16 @@ static inline void tty_set_flow_change(struct tty_struct *tty, int val)
smp_mb();
}
+static inline bool tty_io_error(struct tty_struct *tty)
+{
+ return test_bit(TTY_IO_ERROR, &tty->flags);
+}
+
+static inline bool tty_throttled(struct tty_struct *tty)
+{
+ return test_bit(TTY_THROTTLED, &tty->flags);
+}
+
#ifdef CONFIG_TTY
extern void console_init(void);
extern void tty_kref_put(struct tty_struct *tty);
@@ -371,6 +393,7 @@ extern void proc_clear_tty(struct task_struct *p);
extern struct tty_struct *get_current_tty(void);
/* tty_io.c */
extern int __init tty_init(void);
+extern const char *tty_name(const struct tty_struct *tty);
#else
static inline void console_init(void)
{ }
@@ -391,6 +414,8 @@ static inline struct tty_struct *get_current_tty(void)
/* tty_io.c */
static inline int __init tty_init(void)
{ return 0; }
+static inline const char *tty_name(const struct tty_struct *tty)
+{ return "(none)"; }
#endif
extern struct ktermios tty_std_termios;
@@ -415,7 +440,6 @@ static inline struct tty_struct *tty_kref_get(struct tty_struct *tty)
return tty;
}
-extern const char *tty_name(const struct tty_struct *tty);
extern const char *tty_driver_name(const struct tty_struct *tty);
extern void tty_wait_until_sent(struct tty_struct *tty, long timeout);
extern int __tty_check_change(struct tty_struct *tty, int sig);
@@ -457,6 +481,7 @@ extern void tty_buffer_init(struct tty_port *port);
extern void tty_buffer_set_lock_subclass(struct tty_port *port);
extern bool tty_buffer_restart_work(struct tty_port *port);
extern bool tty_buffer_cancel_work(struct tty_port *port);
+extern void tty_buffer_flush_work(struct tty_port *port);
extern speed_t tty_termios_baud_rate(struct ktermios *termios);
extern speed_t tty_termios_input_baud_rate(struct ktermios *termios);
extern void tty_termios_encode_baud_rate(struct ktermios *termios,
@@ -537,7 +562,67 @@ static inline struct tty_port *tty_port_get(struct tty_port *port)
/* If the cts flow control is enabled, return true. */
static inline bool tty_port_cts_enabled(struct tty_port *port)
{
- return port->flags & ASYNC_CTS_FLOW;
+ return test_bit(TTY_PORT_CTS_FLOW, &port->iflags);
+}
+
+static inline void tty_port_set_cts_flow(struct tty_port *port, bool val)
+{
+ if (val)
+ set_bit(TTY_PORT_CTS_FLOW, &port->iflags);
+ else
+ clear_bit(TTY_PORT_CTS_FLOW, &port->iflags);
+}
+
+static inline bool tty_port_active(struct tty_port *port)
+{
+ return test_bit(TTY_PORT_ACTIVE, &port->iflags);
+}
+
+static inline void tty_port_set_active(struct tty_port *port, bool val)
+{
+ if (val)
+ set_bit(TTY_PORT_ACTIVE, &port->iflags);
+ else
+ clear_bit(TTY_PORT_ACTIVE, &port->iflags);
+}
+
+static inline bool tty_port_check_carrier(struct tty_port *port)
+{
+ return test_bit(TTY_PORT_CHECK_CD, &port->iflags);
+}
+
+static inline void tty_port_set_check_carrier(struct tty_port *port, bool val)
+{
+ if (val)
+ set_bit(TTY_PORT_CHECK_CD, &port->iflags);
+ else
+ clear_bit(TTY_PORT_CHECK_CD, &port->iflags);
+}
+
+static inline bool tty_port_suspended(struct tty_port *port)
+{
+ return test_bit(TTY_PORT_SUSPENDED, &port->iflags);
+}
+
+static inline void tty_port_set_suspended(struct tty_port *port, bool val)
+{
+ if (val)
+ set_bit(TTY_PORT_SUSPENDED, &port->iflags);
+ else
+ clear_bit(TTY_PORT_SUSPENDED, &port->iflags);
+}
+
+static inline bool tty_port_initialized(struct tty_port *port)
+{
+ return test_bit(TTY_PORT_INITIALIZED, &port->iflags);
+}
+
+static inline void tty_port_set_initialized(struct tty_port *port, bool val)
+{
+ if (val)
+ set_bit(TTY_PORT_INITIALIZED, &port->iflags);
+ else
+ clear_bit(TTY_PORT_INITIALIZED, &port->iflags);
}
extern struct tty_struct *tty_port_tty_get(struct tty_port *port);
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index 161052477f77..b742b5e47cc2 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -7,7 +7,7 @@
* defined; unless noted otherwise, they are optional, and can be
* filled in with a null pointer.
*
- * struct tty_struct * (*lookup)(struct tty_driver *self, int idx)
+ * struct tty_struct * (*lookup)(struct tty_driver *self, struct file *, int idx)
*
* Return the tty device corresponding to idx, NULL if there is not
* one currently in use and an ERR_PTR value on error. Called under
@@ -250,7 +250,7 @@ struct serial_icounter_struct;
struct tty_operations {
struct tty_struct * (*lookup)(struct tty_driver *driver,
- struct inode *inode, int idx);
+ struct file *filp, int idx);
int (*install)(struct tty_driver *driver, struct tty_struct *tty);
void (*remove)(struct tty_driver *driver, struct tty_struct *tty);
int (*open)(struct tty_struct * tty, struct file * filp);
diff --git a/include/linux/types.h b/include/linux/types.h
index 70dd3dfde631..baf718324f4a 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -156,7 +156,6 @@ typedef u32 dma_addr_t;
typedef unsigned __bitwise__ gfp_t;
typedef unsigned __bitwise__ fmode_t;
-typedef unsigned __bitwise__ oom_flags_t;
#ifdef CONFIG_PHYS_ADDR_T_64BIT
typedef u64 phys_addr_t;
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index df89c9bcba7d..d3a2bb712af3 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -89,6 +89,20 @@ static inline void u64_stats_update_end(struct u64_stats_sync *syncp)
#endif
}
+static inline void u64_stats_update_begin_raw(struct u64_stats_sync *syncp)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ raw_write_seqcount_begin(&syncp->seq);
+#endif
+}
+
+static inline void u64_stats_update_end_raw(struct u64_stats_sync *syncp)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ raw_write_seqcount_end(&syncp->seq);
+#endif
+}
+
static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
{
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
diff --git a/include/linux/udp.h b/include/linux/udp.h
index 87c094961bd5..d1fd8cd39478 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -71,6 +71,14 @@ struct udp_sock {
*/
int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
void (*encap_destroy)(struct sock *sk);
+
+ /* GRO functions for UDP socket */
+ struct sk_buff ** (*gro_receive)(struct sock *sk,
+ struct sk_buff **head,
+ struct sk_buff *skb);
+ int (*gro_complete)(struct sock *sk,
+ struct sk_buff *skb,
+ int nhoff);
};
static inline struct udp_sock *udp_sk(const struct sock *sk)
@@ -98,11 +106,11 @@ static inline bool udp_get_no_check6_rx(struct sock *sk)
return udp_sk(sk)->no_check6_rx;
}
-#define udp_portaddr_for_each_entry(__sk, node, list) \
- hlist_nulls_for_each_entry(__sk, node, list, __sk_common.skc_portaddr_node)
+#define udp_portaddr_for_each_entry(__sk, list) \
+ hlist_for_each_entry(__sk, list, __sk_common.skc_portaddr_node)
-#define udp_portaddr_for_each_entry_rcu(__sk, node, list) \
- hlist_nulls_for_each_entry_rcu(__sk, node, list, __sk_common.skc_portaddr_node)
+#define udp_portaddr_for_each_entry_rcu(__sk, list) \
+ hlist_for_each_entry_rcu(__sk, list, __sk_common.skc_portaddr_node)
#define IS_UDPLITE(__sk) (udp_sk(__sk)->pcflag)
diff --git a/include/linux/uio.h b/include/linux/uio.h
index fd9bcfedad42..1b5d1cd796e2 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -87,6 +87,7 @@ size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
size_t iov_iter_zero(size_t bytes, struct iov_iter *);
unsigned long iov_iter_alignment(const struct iov_iter *i);
+unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov,
unsigned long nr_segs, size_t count);
void iov_iter_kvec(struct iov_iter *i, int direction, const struct kvec *kvec,
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 6a9a0c28415d..eba1f10e8cfd 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -374,13 +374,12 @@ struct usb_bus {
int devnum_next; /* Next open device number in
* round-robin allocation */
+ struct mutex devnum_next_mutex; /* devnum_next mutex */
struct usb_devmap devmap; /* device address allocation map */
struct usb_device *root_hub; /* Root hub */
struct usb_bus *hs_companion; /* Companion EHCI bus, if any */
- struct mutex usb_address0_mutex; /* unaddressed device mutex */
-
int bandwidth_allocated; /* on this bus: how much of the time
* reserved for periodic (intr/iso)
* requests is used, on average?
@@ -720,7 +719,7 @@ extern void usb_enable_ltm(struct usb_device *udev);
static inline bool usb_device_supports_ltm(struct usb_device *udev)
{
- if (udev->speed != USB_SPEED_SUPER || !udev->bos || !udev->bos->ss_cap)
+ if (udev->speed < USB_SPEED_SUPER || !udev->bos || !udev->bos->ss_cap)
return false;
return udev->bos->ss_cap->bmAttributes & USB_LTM_SUPPORT;
}
@@ -1069,7 +1068,7 @@ struct usbdrv_wrap {
* for interfaces bound to this driver.
* @soft_unbind: if set to 1, the USB core will not kill URBs and disable
* endpoints before calling the driver's disconnect method.
- * @disable_hub_initiated_lpm: if set to 0, the USB core will not allow hubs
+ * @disable_hub_initiated_lpm: if set to 1, the USB core will not allow hubs
* to initiate lower power link state transitions when an idle timeout
* occurs. Device-initiated USB 3.0 link PM will still be allowed.
*
@@ -1569,7 +1568,7 @@ static inline void usb_fill_bulk_urb(struct urb *urb,
* Initializes a interrupt urb with the proper information needed to submit
* it to a device.
*
- * Note that High Speed and SuperSpeed interrupt endpoints use a logarithmic
+ * Note that High Speed and SuperSpeed(+) interrupt endpoints use a logarithmic
* encoding of the endpoint interval, and express polling intervals in
* microframes (eight per millisecond) rather than in frames (one per
* millisecond).
@@ -1595,7 +1594,7 @@ static inline void usb_fill_int_urb(struct urb *urb,
urb->complete = complete_fn;
urb->context = context;
- if (dev->speed == USB_SPEED_HIGH || dev->speed == USB_SPEED_SUPER) {
+ if (dev->speed == USB_SPEED_HIGH || dev->speed >= USB_SPEED_SUPER) {
/* make sure interval is within allowed range */
interval = clamp(interval, 1, 16);
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 5d4e151c49bf..457651bf45b0 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -1223,9 +1223,13 @@ int usb_otg_descriptor_init(struct usb_gadget *gadget,
/* utility to simplify map/unmap of usb_requests to/from DMA */
+extern int usb_gadget_map_request_by_dev(struct device *dev,
+ struct usb_request *req, int is_in);
extern int usb_gadget_map_request(struct usb_gadget *gadget,
struct usb_request *req, int is_in);
+extern void usb_gadget_unmap_request_by_dev(struct device *dev,
+ struct usb_request *req, int is_in);
extern void usb_gadget_unmap_request(struct usb_gadget *gadget,
struct usb_request *req, int is_in);
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index b98f831dcda3..66fc13705ab7 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -181,6 +181,7 @@ struct usb_hcd {
* bandwidth_mutex should be dropped after a successful control message
* to the device, or resetting the bandwidth after a failed attempt.
*/
+ struct mutex *address0_mutex;
struct mutex *bandwidth_mutex;
struct usb_hcd *shared_hcd;
struct usb_hcd *primary_hcd;
diff --git a/include/linux/usb/otg-fsm.h b/include/linux/usb/otg-fsm.h
index 24198e16f849..7a0350535cb1 100644
--- a/include/linux/usb/otg-fsm.h
+++ b/include/linux/usb/otg-fsm.h
@@ -72,37 +72,113 @@ enum otg_fsm_timer {
NUM_OTG_FSM_TIMERS,
};
-/* OTG state machine according to the OTG spec */
+/**
+ * struct otg_fsm - OTG state machine according to the OTG spec
+ *
+ * OTG hardware Inputs
+ *
+ * Common inputs for A and B device
+ * @id: TRUE for B-device, FALSE for A-device.
+ * @adp_change: TRUE when current ADP measurement (n) value, compared to the
+ * ADP measurement taken at n-2, differs by more than CADP_THR
+ * @power_up: TRUE when the OTG device first powers up its USB system and
+ * ADP measurement taken if ADP capable
+ *
+ * A-Device state inputs
+ * @a_srp_det: TRUE if the A-device detects SRP
+ * @a_vbus_vld: TRUE when VBUS voltage is in regulation
+ * @b_conn: TRUE if the A-device detects connection from the B-device
+ * @a_bus_resume: TRUE when the B-device detects that the A-device is signaling
+ * a resume (K state)
+ * B-Device state inputs
+ * @a_bus_suspend: TRUE when the B-device detects that the A-device has put the
+ * bus into suspend
+ * @a_conn: TRUE if the B-device detects a connection from the A-device
+ * @b_se0_srp: TRUE when the line has been at SE0 for more than the minimum
+ * time before generating SRP
+ * @b_ssend_srp: TRUE when the VBUS has been below VOTG_SESS_VLD for more than
+ * the minimum time before generating SRP
+ * @b_sess_vld: TRUE when the B-device detects that the voltage on VBUS is
+ * above VOTG_SESS_VLD
+ * @test_device: TRUE when the B-device switches to B-Host and detects an OTG
+ * test device. This must be set by host/hub driver
+ *
+ * Application inputs (A-Device)
+ * @a_bus_drop: TRUE when A-device application needs to power down the bus
+ * @a_bus_req: TRUE when A-device application wants to use the bus.
+ * FALSE to suspend the bus
+ *
+ * Application inputs (B-Device)
+ * @b_bus_req: TRUE during the time that the Application running on the
+ * B-device wants to use the bus
+ *
+ * Auxilary inputs (OTG v1.3 only. Obsolete now.)
+ * @a_sess_vld: TRUE if the A-device detects that VBUS is above VA_SESS_VLD
+ * @b_bus_suspend: TRUE when the A-device detects that the B-device has put
+ * the bus into suspend
+ * @b_bus_resume: TRUE when the A-device detects that the B-device is signaling
+ * resume on the bus
+ *
+ * OTG Output status. Read only for users. Updated by OTG FSM helpers defined
+ * in this file
+ *
+ * Outputs for Both A and B device
+ * @drv_vbus: TRUE when A-device is driving VBUS
+ * @loc_conn: TRUE when the local device has signaled that it is connected
+ * to the bus
+ * @loc_sof: TRUE when the local device is generating activity on the bus
+ * @adp_prb: TRUE when the local device is in the process of doing
+ * ADP probing
+ *
+ * Outputs for B-device state
+ * @adp_sns: TRUE when the B-device is in the process of carrying out
+ * ADP sensing
+ * @data_pulse: TRUE when the B-device is performing data line pulsing
+ *
+ * Internal Variables
+ *
+ * a_set_b_hnp_en: TRUE when the A-device has successfully set the
+ * b_hnp_enable bit in the B-device.
+ * Unused as OTG fsm uses otg->host->b_hnp_enable instead
+ * b_srp_done: TRUE when the B-device has completed initiating SRP
+ * b_hnp_enable: TRUE when the B-device has accepted the
+ * SetFeature(b_hnp_enable) B-device.
+ * Unused as OTG fsm uses otg->gadget->b_hnp_enable instead
+ * a_clr_err: Asserted (by application ?) to clear a_vbus_err due to an
+ * overcurrent condition and causes the A-device to transition
+ * to a_wait_vfall
+ */
struct otg_fsm {
/* Input */
int id;
int adp_change;
int power_up;
- int test_device;
- int a_bus_drop;
- int a_bus_req;
int a_srp_det;
int a_vbus_vld;
int b_conn;
int a_bus_resume;
int a_bus_suspend;
int a_conn;
- int b_bus_req;
int b_se0_srp;
int b_ssend_srp;
int b_sess_vld;
+ int test_device;
+ int a_bus_drop;
+ int a_bus_req;
+ int b_bus_req;
+
/* Auxilary inputs */
int a_sess_vld;
int b_bus_resume;
int b_bus_suspend;
/* Output */
- int data_pulse;
int drv_vbus;
int loc_conn;
int loc_sof;
int adp_prb;
int adp_sns;
+ int data_pulse;
/* Internal variables */
int a_set_b_hnp_en;
@@ -110,7 +186,7 @@ struct otg_fsm {
int b_hnp_enable;
int a_clr_err;
- /* Informative variables */
+ /* Informative variables. All unused as of now */
int a_bus_drop_inf;
int a_bus_req_inf;
int a_clr_err_inf;
@@ -134,6 +210,7 @@ struct otg_fsm {
struct mutex lock;
u8 *host_req_flag;
struct delayed_work hnp_polling_work;
+ bool state_changed;
};
struct otg_fsm_ops {
diff --git a/include/linux/uuid.h b/include/linux/uuid.h
index 6df2509033d7..2d095fc60204 100644
--- a/include/linux/uuid.h
+++ b/include/linux/uuid.h
@@ -1,7 +1,7 @@
/*
* UUID/GUID definition
*
- * Copyright (C) 2010, Intel Corp.
+ * Copyright (C) 2010, 2016 Intel Corp.
* Huang Ying <ying.huang@intel.com>
*
* This program is free software; you can redistribute it and/or
@@ -12,16 +12,17 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _LINUX_UUID_H_
#define _LINUX_UUID_H_
#include <uapi/linux/uuid.h>
+/*
+ * The length of a UUID string ("aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee")
+ * not including trailing NUL.
+ */
+#define UUID_STRING_LEN 36
static inline int uuid_le_cmp(const uuid_le u1, const uuid_le u2)
{
@@ -33,7 +34,17 @@ static inline int uuid_be_cmp(const uuid_be u1, const uuid_be u2)
return memcmp(&u1, &u2, sizeof(uuid_be));
}
+void generate_random_uuid(unsigned char uuid[16]);
+
extern void uuid_le_gen(uuid_le *u);
extern void uuid_be_gen(uuid_be *u);
+bool __must_check uuid_is_valid(const char *uuid);
+
+extern const u8 uuid_le_index[16];
+extern const u8 uuid_be_index[16];
+
+int uuid_le_to_bin(const char *uuid, uuid_le *u);
+int uuid_be_to_bin(const char *uuid, uuid_be *u);
+
#endif
diff --git a/include/linux/verification.h b/include/linux/verification.h
new file mode 100644
index 000000000000..a10549a6c7cd
--- /dev/null
+++ b/include/linux/verification.h
@@ -0,0 +1,49 @@
+/* Signature verification
+ *
+ * Copyright (C) 2014 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_VERIFICATION_H
+#define _LINUX_VERIFICATION_H
+
+/*
+ * The use to which an asymmetric key is being put.
+ */
+enum key_being_used_for {
+ VERIFYING_MODULE_SIGNATURE,
+ VERIFYING_FIRMWARE_SIGNATURE,
+ VERIFYING_KEXEC_PE_SIGNATURE,
+ VERIFYING_KEY_SIGNATURE,
+ VERIFYING_KEY_SELF_SIGNATURE,
+ VERIFYING_UNSPECIFIED_SIGNATURE,
+ NR__KEY_BEING_USED_FOR
+};
+extern const char *const key_being_used_for[NR__KEY_BEING_USED_FOR];
+
+#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
+
+struct key;
+
+extern int verify_pkcs7_signature(const void *data, size_t len,
+ const void *raw_pkcs7, size_t pkcs7_len,
+ struct key *trusted_keys,
+ enum key_being_used_for usage,
+ int (*view_content)(void *ctx,
+ const void *data, size_t len,
+ size_t asn1hdrlen),
+ void *ctx);
+
+#ifdef CONFIG_SIGNED_PE_FILE_VERIFICATION
+extern int verify_pefile_signature(const void *pebuf, unsigned pelen,
+ struct key *trusted_keys,
+ enum key_being_used_for usage);
+#endif
+
+#endif /* CONFIG_SYSTEM_DATA_VERIFICATION */
+#endif /* _LINUX_VERIFY_PEFILE_H */
diff --git a/include/linux/verify_pefile.h b/include/linux/verify_pefile.h
deleted file mode 100644
index da2049b5161c..000000000000
--- a/include/linux/verify_pefile.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* Signed PE file verification
- *
- * Copyright (C) 2014 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-
-#ifndef _LINUX_VERIFY_PEFILE_H
-#define _LINUX_VERIFY_PEFILE_H
-
-#include <crypto/public_key.h>
-
-extern int verify_pefile_signature(const void *pebuf, unsigned pelen,
- struct key *trusted_keyring,
- enum key_being_used_for usage,
- bool *_trusted);
-
-#endif /* _LINUX_VERIFY_PEFILE_H */
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index d1f1d338af20..3d9d786a943c 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -4,10 +4,12 @@
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/list.h>
+#include <linux/llist.h>
#include <asm/page.h> /* pgprot_t */
#include <linux/rbtree.h>
struct vm_area_struct; /* vma defining user mapping in mm_types.h */
+struct notifier_block; /* in notifier.h */
/* bits in flags of vmalloc's vm_struct below */
#define VM_IOREMAP 0x00000001 /* ioremap() and friends */
@@ -44,7 +46,7 @@ struct vmap_area {
unsigned long flags;
struct rb_node rb_node; /* address sorted rbtree */
struct list_head list; /* address sorted list */
- struct list_head purge_list; /* "lazy purge" list */
+ struct llist_node purge_list; /* "lazy purge" list */
struct vm_struct *vm;
struct rcu_head rcu_head;
};
@@ -187,4 +189,7 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
#define VMALLOC_TOTAL 0UL
#endif
+int register_vmap_purge_notifier(struct notifier_block *nb);
+int unregister_vmap_purge_notifier(struct notifier_block *nb);
+
#endif /* _LINUX_VMALLOC_H */
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 73fae8c4a5fb..d2da8e053210 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -163,12 +163,10 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone,
#ifdef CONFIG_NUMA
extern unsigned long node_page_state(int node, enum zone_stat_item item);
-extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
#else
#define node_page_state(node, item) global_page_state(item)
-#define zone_statistics(_zl, _z, gfp) do { } while (0)
#endif /* CONFIG_NUMA */
@@ -193,6 +191,10 @@ void quiet_vmstat(void);
void cpu_vm_stats_fold(int cpu);
void refresh_zone_stat_thresholds(void);
+struct ctl_table;
+int vmstat_refresh(struct ctl_table *, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
+
void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
int calculate_pressure_threshold(struct zone *zone);
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index 4457541de3c9..94079bab9243 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -30,10 +30,11 @@ struct xattr_handler {
int flags; /* fs private flags */
bool (*list)(struct dentry *dentry);
int (*get)(const struct xattr_handler *, struct dentry *dentry,
- const char *name, void *buffer, size_t size);
+ struct inode *inode, const char *name, void *buffer,
+ size_t size);
int (*set)(const struct xattr_handler *, struct dentry *dentry,
- const char *name, const void *buffer, size_t size,
- int flags);
+ struct inode *inode, const char *name, const void *buffer,
+ size_t size, int flags);
};
const char *xattr_full_name(const struct xattr_handler *, const char *);
@@ -51,9 +52,10 @@ int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, i
int vfs_setxattr(struct dentry *, const char *, const void *, size_t, int);
int vfs_removexattr(struct dentry *, const char *);
-ssize_t generic_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size);
+ssize_t generic_getxattr(struct dentry *dentry, struct inode *inode, const char *name, void *buffer, size_t size);
ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size);
-int generic_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags);
+int generic_setxattr(struct dentry *dentry, struct inode *inode,
+ const char *name, const void *value, size_t size, int flags);
int generic_removexattr(struct dentry *dentry, const char *name);
ssize_t vfs_getxattr_alloc(struct dentry *dentry, const char *name,
char **xattr_value, size_t size, gfp_t flags);
diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
index 34eb16098a33..57a8e98f2708 100644
--- a/include/linux/zsmalloc.h
+++ b/include/linux/zsmalloc.h
@@ -41,10 +41,10 @@ struct zs_pool_stats {
struct zs_pool;
-struct zs_pool *zs_create_pool(const char *name, gfp_t flags);
+struct zs_pool *zs_create_pool(const char *name);
void zs_destroy_pool(struct zs_pool *pool);
-unsigned long zs_malloc(struct zs_pool *pool, size_t size);
+unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t flags);
void zs_free(struct zs_pool *pool, unsigned long obj);
void *zs_map_object(struct zs_pool *pool, unsigned long handle,
diff --git a/include/media/media-device.h b/include/media/media-device.h
index df74cfa7da4a..a9b33c47310d 100644
--- a/include/media/media-device.h
+++ b/include/media/media-device.h
@@ -25,7 +25,6 @@
#include <linux/list.h>
#include <linux/mutex.h>
-#include <linux/spinlock.h>
#include <media/media-devnode.h>
#include <media/media-entity.h>
@@ -304,8 +303,7 @@ struct media_entity_notify {
* @pads: List of registered pads
* @links: List of registered links
* @entity_notify: List of registered entity_notify callbacks
- * @lock: Entities list lock
- * @graph_mutex: Entities graph operation lock
+ * @graph_mutex: Protects access to struct media_device data
* @pm_count_walk: Graph walk for power state walk. Access serialised using
* graph_mutex.
*
@@ -313,7 +311,8 @@ struct media_entity_notify {
* @enable_source: Enable Source Handler function pointer
* @disable_source: Disable Source Handler function pointer
*
- * @link_notify: Link state change notification callback
+ * @link_notify: Link state change notification callback. This callback is
+ * called with the graph_mutex held.
*
* This structure represents an abstract high-level media device. It allows easy
* access to entities and provides basic media device-level support. The
@@ -357,7 +356,7 @@ struct media_device {
u32 hw_revision;
u32 driver_version;
- u32 topology_version;
+ u64 topology_version;
u32 id;
struct ida entity_internal_idx;
@@ -371,8 +370,6 @@ struct media_device {
/* notify callback list invoked when a new entity is registered */
struct list_head entity_notify;
- /* Protects the graph objects creation/removal */
- spinlock_t lock;
/* Serializes graph operations. */
struct mutex graph_mutex;
struct media_entity_graph pm_count_walk;
@@ -494,7 +491,7 @@ int __must_check __media_device_register(struct media_device *mdev,
#define media_device_register(mdev) __media_device_register(mdev, THIS_MODULE)
/**
- * __media_device_unregister() - Unegisters a media device element
+ * media_device_unregister() - Unregisters a media device element
*
* @mdev: pointer to struct &media_device
*
diff --git a/include/media/media-entity.h b/include/media/media-entity.h
index 6dc9e4e8cbd4..cbb266f7f2b5 100644
--- a/include/media/media-entity.h
+++ b/include/media/media-entity.h
@@ -179,6 +179,9 @@ struct media_pad {
* @link_validate: Return whether a link is valid from the entity point of
* view. The media_entity_pipeline_start() function
* validates all links by calling this operation. Optional.
+ *
+ * Note: Those these callbacks are called with struct media_device.@graph_mutex
+ * mutex held.
*/
struct media_entity_operations {
int (*link_setup)(struct media_entity *entity,
@@ -188,10 +191,38 @@ struct media_entity_operations {
};
/**
+ * enum media_entity_type - Media entity type
+ *
+ * @MEDIA_ENTITY_TYPE_BASE:
+ * The entity isn't embedded in another subsystem structure.
+ * @MEDIA_ENTITY_TYPE_VIDEO_DEVICE:
+ * The entity is embedded in a struct video_device instance.
+ * @MEDIA_ENTITY_TYPE_V4L2_SUBDEV:
+ * The entity is embedded in a struct v4l2_subdev instance.
+ *
+ * Media entity objects are often not instantiated directly, but the media
+ * entity structure is inherited by (through embedding) other subsystem-specific
+ * structures. The media entity type identifies the type of the subclass
+ * structure that implements a media entity instance.
+ *
+ * This allows runtime type identification of media entities and safe casting to
+ * the correct object type. For instance, a media entity structure instance
+ * embedded in a v4l2_subdev structure instance will have the type
+ * MEDIA_ENTITY_TYPE_V4L2_SUBDEV and can safely be cast to a v4l2_subdev
+ * structure using the container_of() macro.
+ */
+enum media_entity_type {
+ MEDIA_ENTITY_TYPE_BASE,
+ MEDIA_ENTITY_TYPE_VIDEO_DEVICE,
+ MEDIA_ENTITY_TYPE_V4L2_SUBDEV,
+};
+
+/**
* struct media_entity - A media entity graph object.
*
* @graph_obj: Embedded structure containing the media object common data.
* @name: Entity name.
+ * @obj_type: Type of the object that implements the media_entity.
* @function: Entity main function, as defined in uapi/media.h
* (MEDIA_ENT_F_*)
* @flags: Entity flags, as defined in uapi/media.h (MEDIA_ENT_FL_*)
@@ -220,6 +251,7 @@ struct media_entity_operations {
struct media_entity {
struct media_gobj graph_obj; /* must be first field in struct */
const char *name;
+ enum media_entity_type obj_type;
u32 function;
unsigned long flags;
@@ -329,56 +361,29 @@ static inline u32 media_gobj_gen_id(enum media_gobj_type type, u64 local_id)
}
/**
- * is_media_entity_v4l2_io() - identify if the entity main function
- * is a V4L2 I/O
- *
+ * is_media_entity_v4l2_video_device() - Check if the entity is a video_device
* @entity: pointer to entity
*
- * Return: true if the entity main function is one of the V4L2 I/O types
- * (video, VBI or SDR radio); false otherwise.
+ * Return: true if the entity is an instance of a video_device object and can
+ * safely be cast to a struct video_device using the container_of() macro, or
+ * false otherwise.
*/
-static inline bool is_media_entity_v4l2_io(struct media_entity *entity)
+static inline bool is_media_entity_v4l2_video_device(struct media_entity *entity)
{
- if (!entity)
- return false;
-
- switch (entity->function) {
- case MEDIA_ENT_F_IO_V4L:
- case MEDIA_ENT_F_IO_VBI:
- case MEDIA_ENT_F_IO_SWRADIO:
- return true;
- default:
- return false;
- }
+ return entity && entity->obj_type == MEDIA_ENTITY_TYPE_VIDEO_DEVICE;
}
/**
- * is_media_entity_v4l2_subdev - return true if the entity main function is
- * associated with the V4L2 API subdev usage
- *
+ * is_media_entity_v4l2_subdev() - Check if the entity is a v4l2_subdev
* @entity: pointer to entity
*
- * This is an ancillary function used by subdev-based V4L2 drivers.
- * It checks if the entity function is one of functions used by a V4L2 subdev,
- * e. g. camera-relatef functions, analog TV decoder, TV tuner, V4L2 DSPs.
+ * Return: true if the entity is an instance of a v4l2_subdev object and can
+ * safely be cast to a struct v4l2_subdev using the container_of() macro, or
+ * false otherwise.
*/
static inline bool is_media_entity_v4l2_subdev(struct media_entity *entity)
{
- if (!entity)
- return false;
-
- switch (entity->function) {
- case MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN:
- case MEDIA_ENT_F_CAM_SENSOR:
- case MEDIA_ENT_F_FLASH:
- case MEDIA_ENT_F_LENS:
- case MEDIA_ENT_F_ATV_DECODER:
- case MEDIA_ENT_F_TUNER:
- return true;
-
- default:
- return false;
- }
+ return entity && entity->obj_type == MEDIA_ENTITY_TYPE_V4L2_SUBDEV;
}
/**
diff --git a/include/media/rc-core.h b/include/media/rc-core.h
index 0f77b3dffb37..b6586a91129c 100644
--- a/include/media/rc-core.h
+++ b/include/media/rc-core.h
@@ -215,12 +215,9 @@ enum raw_event_type {
struct ir_raw_event {
union {
u32 duration;
-
- struct {
- u32 carrier;
- u8 duty_cycle;
- };
+ u32 carrier;
};
+ u8 duty_cycle;
unsigned pulse:1;
unsigned reset:1;
@@ -228,13 +225,7 @@ struct ir_raw_event {
unsigned carrier_report:1;
};
-#define DEFINE_IR_RAW_EVENT(event) \
- struct ir_raw_event event = { \
- { .duration = 0 } , \
- .pulse = 0, \
- .reset = 0, \
- .timeout = 0, \
- .carrier_report = 0 }
+#define DEFINE_IR_RAW_EVENT(event) struct ir_raw_event event = {}
static inline void init_ir_raw_event(struct ir_raw_event *ev)
{
@@ -256,8 +247,7 @@ void ir_raw_event_set_idle(struct rc_dev *dev, bool idle);
static inline void ir_raw_event_reset(struct rc_dev *dev)
{
- DEFINE_IR_RAW_EVENT(ev);
- ev.reset = true;
+ struct ir_raw_event ev = { .reset = true };
ir_raw_event_store(dev, &ev);
ir_raw_event_handle(dev);
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
index 76056ab5c5bd..25a3190308fb 100644
--- a/include/media/v4l2-dev.h
+++ b/include/media/v4l2-dev.h
@@ -92,6 +92,9 @@ struct video_device
/* device ops */
const struct v4l2_file_operations *fops;
+ /* device capabilities as used in v4l2_capabilities */
+ u32 device_caps;
+
/* sysfs */
struct device dev; /* v4l device */
struct cdev *cdev; /* character device */
diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
index 9c581578783f..d5d45a8d3998 100644
--- a/include/media/v4l2-device.h
+++ b/include/media/v4l2-device.h
@@ -196,11 +196,64 @@ static inline void v4l2_subdev_notify(struct v4l2_subdev *sd,
##args); \
})
-#define v4l2_device_has_op(v4l2_dev, o, f) \
+/*
+ * Call the specified callback for all subdevs where grp_id & grpmsk != 0
+ * (if grpmsk == `0, then match them all). Ignore any errors. Note that you
+ * cannot add or delete a subdev while walking the subdevs list.
+ */
+#define v4l2_device_mask_call_all(v4l2_dev, grpmsk, o, f, args...) \
+ do { \
+ struct v4l2_subdev *__sd; \
+ \
+ __v4l2_device_call_subdevs_p(v4l2_dev, __sd, \
+ !(grpmsk) || (__sd->grp_id & (grpmsk)), o, f , \
+ ##args); \
+ } while (0)
+
+/*
+ * Call the specified callback for all subdevs where grp_id & grpmsk != 0
+ * (if grpmsk == `0, then match them all). If the callback returns an error
+ * other than 0 or -ENOIOCTLCMD, then return with that error code. Note that
+ * you cannot add or delete a subdev while walking the subdevs list.
+ */
+#define v4l2_device_mask_call_until_err(v4l2_dev, grpmsk, o, f, args...) \
+({ \
+ struct v4l2_subdev *__sd; \
+ __v4l2_device_call_subdevs_until_err_p(v4l2_dev, __sd, \
+ !(grpmsk) || (__sd->grp_id & (grpmsk)), o, f , \
+ ##args); \
+})
+
+/*
+ * Does any subdev with matching grpid (or all if grpid == 0) has the given
+ * op?
+ */
+#define v4l2_device_has_op(v4l2_dev, grpid, o, f) \
+({ \
+ struct v4l2_subdev *__sd; \
+ bool __result = false; \
+ list_for_each_entry(__sd, &(v4l2_dev)->subdevs, list) { \
+ if ((grpid) && __sd->grp_id != (grpid)) \
+ continue; \
+ if (v4l2_subdev_has_op(__sd, o, f)) { \
+ __result = true; \
+ break; \
+ } \
+ } \
+ __result; \
+})
+
+/*
+ * Does any subdev with matching grpmsk (or all if grpmsk == 0) has the given
+ * op?
+ */
+#define v4l2_device_mask_has_op(v4l2_dev, grpmsk, o, f) \
({ \
struct v4l2_subdev *__sd; \
bool __result = false; \
list_for_each_entry(__sd, &(v4l2_dev)->subdevs, list) { \
+ if ((grpmsk) && !(__sd->grp_id & (grpmsk))) \
+ continue; \
if (v4l2_subdev_has_op(__sd, o, f)) { \
__result = true; \
break; \
diff --git a/include/media/v4l2-rect.h b/include/media/v4l2-rect.h
new file mode 100644
index 000000000000..d2125f0cc7cd
--- /dev/null
+++ b/include/media/v4l2-rect.h
@@ -0,0 +1,173 @@
+/*
+ * v4l2-rect.h - v4l2_rect helper functions
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _V4L2_RECT_H_
+#define _V4L2_RECT_H_
+
+#include <linux/videodev2.h>
+
+/**
+ * v4l2_rect_set_size_to() - copy the width/height values.
+ * @r: rect whose width and height fields will be set
+ * @size: rect containing the width and height fields you need.
+ */
+static inline void v4l2_rect_set_size_to(struct v4l2_rect *r,
+ const struct v4l2_rect *size)
+{
+ r->width = size->width;
+ r->height = size->height;
+}
+
+/**
+ * v4l2_rect_set_min_size() - width and height of r should be >= min_size.
+ * @r: rect whose width and height will be modified
+ * @min_size: rect containing the minimal width and height
+ */
+static inline void v4l2_rect_set_min_size(struct v4l2_rect *r,
+ const struct v4l2_rect *min_size)
+{
+ if (r->width < min_size->width)
+ r->width = min_size->width;
+ if (r->height < min_size->height)
+ r->height = min_size->height;
+}
+
+/**
+ * v4l2_rect_set_max_size() - width and height of r should be <= max_size
+ * @r: rect whose width and height will be modified
+ * @max_size: rect containing the maximum width and height
+ */
+static inline void v4l2_rect_set_max_size(struct v4l2_rect *r,
+ const struct v4l2_rect *max_size)
+{
+ if (r->width > max_size->width)
+ r->width = max_size->width;
+ if (r->height > max_size->height)
+ r->height = max_size->height;
+}
+
+/**
+ * v4l2_rect_map_inside()- r should be inside boundary.
+ * @r: rect that will be modified
+ * @boundary: rect containing the boundary for @r
+ */
+static inline void v4l2_rect_map_inside(struct v4l2_rect *r,
+ const struct v4l2_rect *boundary)
+{
+ v4l2_rect_set_max_size(r, boundary);
+ if (r->left < boundary->left)
+ r->left = boundary->left;
+ if (r->top < boundary->top)
+ r->top = boundary->top;
+ if (r->left + r->width > boundary->width)
+ r->left = boundary->width - r->width;
+ if (r->top + r->height > boundary->height)
+ r->top = boundary->height - r->height;
+}
+
+/**
+ * v4l2_rect_same_size() - return true if r1 has the same size as r2
+ * @r1: rectangle.
+ * @r2: rectangle.
+ *
+ * Return true if both rectangles have the same size.
+ */
+static inline bool v4l2_rect_same_size(const struct v4l2_rect *r1,
+ const struct v4l2_rect *r2)
+{
+ return r1->width == r2->width && r1->height == r2->height;
+}
+
+/**
+ * v4l2_rect_intersect() - calculate the intersection of two rects.
+ * @r: intersection of @r1 and @r2.
+ * @r1: rectangle.
+ * @r2: rectangle.
+ */
+static inline void v4l2_rect_intersect(struct v4l2_rect *r,
+ const struct v4l2_rect *r1,
+ const struct v4l2_rect *r2)
+{
+ int right, bottom;
+
+ r->top = max(r1->top, r2->top);
+ r->left = max(r1->left, r2->left);
+ bottom = min(r1->top + r1->height, r2->top + r2->height);
+ right = min(r1->left + r1->width, r2->left + r2->width);
+ r->height = max(0, bottom - r->top);
+ r->width = max(0, right - r->left);
+}
+
+/**
+ * v4l2_rect_scale() - scale rect r by to/from
+ * @r: rect to be scaled.
+ * @from: from rectangle.
+ * @to: to rectangle.
+ *
+ * This scales rectangle @r horizontally by @to->width / @from->width and
+ * vertically by @to->height / @from->height.
+ *
+ * Typically @r is a rectangle inside @from and you want the rectangle as
+ * it would appear after scaling @from to @to. So the resulting @r will
+ * be the scaled rectangle inside @to.
+ */
+static inline void v4l2_rect_scale(struct v4l2_rect *r,
+ const struct v4l2_rect *from,
+ const struct v4l2_rect *to)
+{
+ if (from->width == 0 || from->height == 0) {
+ r->left = r->top = r->width = r->height = 0;
+ return;
+ }
+ r->left = (((r->left - from->left) * to->width) / from->width) & ~1;
+ r->width = ((r->width * to->width) / from->width) & ~1;
+ r->top = ((r->top - from->top) * to->height) / from->height;
+ r->height = (r->height * to->height) / from->height;
+}
+
+/**
+ * v4l2_rect_overlap() - do r1 and r2 overlap?
+ * @r1: rectangle.
+ * @r2: rectangle.
+ *
+ * Returns true if @r1 and @r2 overlap.
+ */
+static inline bool v4l2_rect_overlap(const struct v4l2_rect *r1,
+ const struct v4l2_rect *r2)
+{
+ /*
+ * IF the left side of r1 is to the right of the right side of r2 OR
+ * the left side of r2 is to the right of the right side of r1 THEN
+ * they do not overlap.
+ */
+ if (r1->left >= r2->left + r2->width ||
+ r2->left >= r1->left + r1->width)
+ return false;
+ /*
+ * IF the top side of r1 is below the bottom of r2 OR
+ * the top side of r2 is below the bottom of r1 THEN
+ * they do not overlap.
+ */
+ if (r1->top >= r2->top + r2->height ||
+ r2->top >= r1->top + r1->height)
+ return false;
+ return true;
+}
+
+#endif
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index 11e2dfec0198..32fc7a4beb5e 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -572,6 +572,7 @@ struct v4l2_subdev_pad_config {
/**
* struct v4l2_subdev_pad_ops - v4l2-subdev pad level operations
*
+ * @init_cfg: initialize the pad config to default values
* @enum_mbus_code: callback for VIDIOC_SUBDEV_ENUM_MBUS_CODE ioctl handler
* code.
* @enum_frame_size: callback for VIDIOC_SUBDEV_ENUM_FRAME_SIZE ioctl handler
@@ -607,6 +608,8 @@ struct v4l2_subdev_pad_config {
* may be adjusted by the subdev driver to device capabilities.
*/
struct v4l2_subdev_pad_ops {
+ int (*init_cfg)(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg);
int (*enum_mbus_code)(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code);
@@ -801,7 +804,12 @@ int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd,
struct v4l2_subdev_format *source_fmt,
struct v4l2_subdev_format *sink_fmt);
int v4l2_subdev_link_validate(struct media_link *link);
+
+struct v4l2_subdev_pad_config *
+v4l2_subdev_alloc_pad_config(struct v4l2_subdev *sd);
+void v4l2_subdev_free_pad_config(struct v4l2_subdev_pad_config *cfg);
#endif /* CONFIG_MEDIA_CONTROLLER */
+
void v4l2_subdev_init(struct v4l2_subdev *sd,
const struct v4l2_subdev_ops *ops);
diff --git a/include/media/v4l2-tpg-colors.h b/include/media/v4l2-tpg-colors.h
new file mode 100644
index 000000000000..2a88d1fae0cd
--- /dev/null
+++ b/include/media/v4l2-tpg-colors.h
@@ -0,0 +1,68 @@
+/*
+ * v4l2-tpg-colors.h - Color definitions for the test pattern generator
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _V4L2_TPG_COLORS_H_
+#define _V4L2_TPG_COLORS_H_
+
+struct color {
+ unsigned char r, g, b;
+};
+
+struct color16 {
+ int r, g, b;
+};
+
+enum tpg_color {
+ TPG_COLOR_CSC_WHITE,
+ TPG_COLOR_CSC_YELLOW,
+ TPG_COLOR_CSC_CYAN,
+ TPG_COLOR_CSC_GREEN,
+ TPG_COLOR_CSC_MAGENTA,
+ TPG_COLOR_CSC_RED,
+ TPG_COLOR_CSC_BLUE,
+ TPG_COLOR_CSC_BLACK,
+ TPG_COLOR_75_YELLOW,
+ TPG_COLOR_75_CYAN,
+ TPG_COLOR_75_GREEN,
+ TPG_COLOR_75_MAGENTA,
+ TPG_COLOR_75_RED,
+ TPG_COLOR_75_BLUE,
+ TPG_COLOR_100_WHITE,
+ TPG_COLOR_100_YELLOW,
+ TPG_COLOR_100_CYAN,
+ TPG_COLOR_100_GREEN,
+ TPG_COLOR_100_MAGENTA,
+ TPG_COLOR_100_RED,
+ TPG_COLOR_100_BLUE,
+ TPG_COLOR_100_BLACK,
+ TPG_COLOR_TEXTFG,
+ TPG_COLOR_TEXTBG,
+ TPG_COLOR_RANDOM,
+ TPG_COLOR_RAMP,
+ TPG_COLOR_MAX = TPG_COLOR_RAMP + 256
+};
+
+extern const struct color tpg_colors[TPG_COLOR_MAX];
+extern const unsigned short tpg_rec709_to_linear[255 * 16 + 1];
+extern const unsigned short tpg_linear_to_rec709[255 * 16 + 1];
+extern const struct color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1]
+ [V4L2_XFER_FUNC_SMPTE2084 + 1]
+ [TPG_COLOR_CSC_BLACK + 1];
+
+#endif
diff --git a/include/media/v4l2-tpg.h b/include/media/v4l2-tpg.h
new file mode 100644
index 000000000000..329bebfa930c
--- /dev/null
+++ b/include/media/v4l2-tpg.h
@@ -0,0 +1,597 @@
+/*
+ * v4l2-tpg.h - Test Pattern Generator
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _V4L2_TPG_H_
+#define _V4L2_TPG_H_
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-tpg-colors.h>
+
+enum tpg_pattern {
+ TPG_PAT_75_COLORBAR,
+ TPG_PAT_100_COLORBAR,
+ TPG_PAT_CSC_COLORBAR,
+ TPG_PAT_100_HCOLORBAR,
+ TPG_PAT_100_COLORSQUARES,
+ TPG_PAT_BLACK,
+ TPG_PAT_WHITE,
+ TPG_PAT_RED,
+ TPG_PAT_GREEN,
+ TPG_PAT_BLUE,
+ TPG_PAT_CHECKERS_16X16,
+ TPG_PAT_CHECKERS_2X2,
+ TPG_PAT_CHECKERS_1X1,
+ TPG_PAT_COLOR_CHECKERS_2X2,
+ TPG_PAT_COLOR_CHECKERS_1X1,
+ TPG_PAT_ALTERNATING_HLINES,
+ TPG_PAT_ALTERNATING_VLINES,
+ TPG_PAT_CROSS_1_PIXEL,
+ TPG_PAT_CROSS_2_PIXELS,
+ TPG_PAT_CROSS_10_PIXELS,
+ TPG_PAT_GRAY_RAMP,
+
+ /* Must be the last pattern */
+ TPG_PAT_NOISE,
+};
+
+extern const char * const tpg_pattern_strings[];
+
+enum tpg_quality {
+ TPG_QUAL_COLOR,
+ TPG_QUAL_GRAY,
+ TPG_QUAL_NOISE
+};
+
+enum tpg_video_aspect {
+ TPG_VIDEO_ASPECT_IMAGE,
+ TPG_VIDEO_ASPECT_4X3,
+ TPG_VIDEO_ASPECT_14X9_CENTRE,
+ TPG_VIDEO_ASPECT_16X9_CENTRE,
+ TPG_VIDEO_ASPECT_16X9_ANAMORPHIC,
+};
+
+enum tpg_pixel_aspect {
+ TPG_PIXEL_ASPECT_SQUARE,
+ TPG_PIXEL_ASPECT_NTSC,
+ TPG_PIXEL_ASPECT_PAL,
+};
+
+enum tpg_move_mode {
+ TPG_MOVE_NEG_FAST,
+ TPG_MOVE_NEG,
+ TPG_MOVE_NEG_SLOW,
+ TPG_MOVE_NONE,
+ TPG_MOVE_POS_SLOW,
+ TPG_MOVE_POS,
+ TPG_MOVE_POS_FAST,
+};
+
+extern const char * const tpg_aspect_strings[];
+
+#define TPG_MAX_PLANES 3
+#define TPG_MAX_PAT_LINES 8
+
+struct tpg_data {
+ /* Source frame size */
+ unsigned src_width, src_height;
+ /* Buffer height */
+ unsigned buf_height;
+ /* Scaled output frame size */
+ unsigned scaled_width;
+ u32 field;
+ bool field_alternate;
+ /* crop coordinates are frame-based */
+ struct v4l2_rect crop;
+ /* compose coordinates are format-based */
+ struct v4l2_rect compose;
+ /* border and square coordinates are frame-based */
+ struct v4l2_rect border;
+ struct v4l2_rect square;
+
+ /* Color-related fields */
+ enum tpg_quality qual;
+ unsigned qual_offset;
+ u8 alpha_component;
+ bool alpha_red_only;
+ u8 brightness;
+ u8 contrast;
+ u8 saturation;
+ s16 hue;
+ u32 fourcc;
+ bool is_yuv;
+ u32 colorspace;
+ u32 xfer_func;
+ u32 ycbcr_enc;
+ /*
+ * Stores the actual transfer function, i.e. will never be
+ * V4L2_XFER_FUNC_DEFAULT.
+ */
+ u32 real_xfer_func;
+ /*
+ * Stores the actual Y'CbCr encoding, i.e. will never be
+ * V4L2_YCBCR_ENC_DEFAULT.
+ */
+ u32 real_ycbcr_enc;
+ u32 quantization;
+ /*
+ * Stores the actual quantization, i.e. will never be
+ * V4L2_QUANTIZATION_DEFAULT.
+ */
+ u32 real_quantization;
+ enum tpg_video_aspect vid_aspect;
+ enum tpg_pixel_aspect pix_aspect;
+ unsigned rgb_range;
+ unsigned real_rgb_range;
+ unsigned buffers;
+ unsigned planes;
+ bool interleaved;
+ u8 vdownsampling[TPG_MAX_PLANES];
+ u8 hdownsampling[TPG_MAX_PLANES];
+ /*
+ * horizontal positions must be ANDed with this value to enforce
+ * correct boundaries for packed YUYV values.
+ */
+ unsigned hmask[TPG_MAX_PLANES];
+ /* Used to store the colors in native format, either RGB or YUV */
+ u8 colors[TPG_COLOR_MAX][3];
+ u8 textfg[TPG_MAX_PLANES][8], textbg[TPG_MAX_PLANES][8];
+ /* size in bytes for two pixels in each plane */
+ unsigned twopixelsize[TPG_MAX_PLANES];
+ unsigned bytesperline[TPG_MAX_PLANES];
+
+ /* Configuration */
+ enum tpg_pattern pattern;
+ bool hflip;
+ bool vflip;
+ unsigned perc_fill;
+ bool perc_fill_blank;
+ bool show_border;
+ bool show_square;
+ bool insert_sav;
+ bool insert_eav;
+
+ /* Test pattern movement */
+ enum tpg_move_mode mv_hor_mode;
+ int mv_hor_count;
+ int mv_hor_step;
+ enum tpg_move_mode mv_vert_mode;
+ int mv_vert_count;
+ int mv_vert_step;
+
+ bool recalc_colors;
+ bool recalc_lines;
+ bool recalc_square_border;
+
+ /* Used to store TPG_MAX_PAT_LINES lines, each with up to two planes */
+ unsigned max_line_width;
+ u8 *lines[TPG_MAX_PAT_LINES][TPG_MAX_PLANES];
+ u8 *downsampled_lines[TPG_MAX_PAT_LINES][TPG_MAX_PLANES];
+ u8 *random_line[TPG_MAX_PLANES];
+ u8 *contrast_line[TPG_MAX_PLANES];
+ u8 *black_line[TPG_MAX_PLANES];
+};
+
+void tpg_init(struct tpg_data *tpg, unsigned w, unsigned h);
+int tpg_alloc(struct tpg_data *tpg, unsigned max_w);
+void tpg_free(struct tpg_data *tpg);
+void tpg_reset_source(struct tpg_data *tpg, unsigned width, unsigned height,
+ u32 field);
+void tpg_log_status(struct tpg_data *tpg);
+
+void tpg_set_font(const u8 *f);
+void tpg_gen_text(const struct tpg_data *tpg,
+ u8 *basep[TPG_MAX_PLANES][2], int y, int x, char *text);
+void tpg_calc_text_basep(struct tpg_data *tpg,
+ u8 *basep[TPG_MAX_PLANES][2], unsigned p, u8 *vbuf);
+unsigned tpg_g_interleaved_plane(const struct tpg_data *tpg, unsigned buf_line);
+void tpg_fill_plane_buffer(struct tpg_data *tpg, v4l2_std_id std,
+ unsigned p, u8 *vbuf);
+void tpg_fillbuffer(struct tpg_data *tpg, v4l2_std_id std,
+ unsigned p, u8 *vbuf);
+bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc);
+void tpg_s_crop_compose(struct tpg_data *tpg, const struct v4l2_rect *crop,
+ const struct v4l2_rect *compose);
+
+static inline void tpg_s_pattern(struct tpg_data *tpg, enum tpg_pattern pattern)
+{
+ if (tpg->pattern == pattern)
+ return;
+ tpg->pattern = pattern;
+ tpg->recalc_colors = true;
+}
+
+static inline void tpg_s_quality(struct tpg_data *tpg,
+ enum tpg_quality qual, unsigned qual_offset)
+{
+ if (tpg->qual == qual && tpg->qual_offset == qual_offset)
+ return;
+ tpg->qual = qual;
+ tpg->qual_offset = qual_offset;
+ tpg->recalc_colors = true;
+}
+
+static inline enum tpg_quality tpg_g_quality(const struct tpg_data *tpg)
+{
+ return tpg->qual;
+}
+
+static inline void tpg_s_alpha_component(struct tpg_data *tpg,
+ u8 alpha_component)
+{
+ if (tpg->alpha_component == alpha_component)
+ return;
+ tpg->alpha_component = alpha_component;
+ tpg->recalc_colors = true;
+}
+
+static inline void tpg_s_alpha_mode(struct tpg_data *tpg,
+ bool red_only)
+{
+ if (tpg->alpha_red_only == red_only)
+ return;
+ tpg->alpha_red_only = red_only;
+ tpg->recalc_colors = true;
+}
+
+static inline void tpg_s_brightness(struct tpg_data *tpg,
+ u8 brightness)
+{
+ if (tpg->brightness == brightness)
+ return;
+ tpg->brightness = brightness;
+ tpg->recalc_colors = true;
+}
+
+static inline void tpg_s_contrast(struct tpg_data *tpg,
+ u8 contrast)
+{
+ if (tpg->contrast == contrast)
+ return;
+ tpg->contrast = contrast;
+ tpg->recalc_colors = true;
+}
+
+static inline void tpg_s_saturation(struct tpg_data *tpg,
+ u8 saturation)
+{
+ if (tpg->saturation == saturation)
+ return;
+ tpg->saturation = saturation;
+ tpg->recalc_colors = true;
+}
+
+static inline void tpg_s_hue(struct tpg_data *tpg,
+ s16 hue)
+{
+ if (tpg->hue == hue)
+ return;
+ tpg->hue = hue;
+ tpg->recalc_colors = true;
+}
+
+static inline void tpg_s_rgb_range(struct tpg_data *tpg,
+ unsigned rgb_range)
+{
+ if (tpg->rgb_range == rgb_range)
+ return;
+ tpg->rgb_range = rgb_range;
+ tpg->recalc_colors = true;
+}
+
+static inline void tpg_s_real_rgb_range(struct tpg_data *tpg,
+ unsigned rgb_range)
+{
+ if (tpg->real_rgb_range == rgb_range)
+ return;
+ tpg->real_rgb_range = rgb_range;
+ tpg->recalc_colors = true;
+}
+
+static inline void tpg_s_colorspace(struct tpg_data *tpg, u32 colorspace)
+{
+ if (tpg->colorspace == colorspace)
+ return;
+ tpg->colorspace = colorspace;
+ tpg->recalc_colors = true;
+}
+
+static inline u32 tpg_g_colorspace(const struct tpg_data *tpg)
+{
+ return tpg->colorspace;
+}
+
+static inline void tpg_s_ycbcr_enc(struct tpg_data *tpg, u32 ycbcr_enc)
+{
+ if (tpg->ycbcr_enc == ycbcr_enc)
+ return;
+ tpg->ycbcr_enc = ycbcr_enc;
+ tpg->recalc_colors = true;
+}
+
+static inline u32 tpg_g_ycbcr_enc(const struct tpg_data *tpg)
+{
+ return tpg->ycbcr_enc;
+}
+
+static inline void tpg_s_xfer_func(struct tpg_data *tpg, u32 xfer_func)
+{
+ if (tpg->xfer_func == xfer_func)
+ return;
+ tpg->xfer_func = xfer_func;
+ tpg->recalc_colors = true;
+}
+
+static inline u32 tpg_g_xfer_func(const struct tpg_data *tpg)
+{
+ return tpg->xfer_func;
+}
+
+static inline void tpg_s_quantization(struct tpg_data *tpg, u32 quantization)
+{
+ if (tpg->quantization == quantization)
+ return;
+ tpg->quantization = quantization;
+ tpg->recalc_colors = true;
+}
+
+static inline u32 tpg_g_quantization(const struct tpg_data *tpg)
+{
+ return tpg->quantization;
+}
+
+static inline unsigned tpg_g_buffers(const struct tpg_data *tpg)
+{
+ return tpg->buffers;
+}
+
+static inline unsigned tpg_g_planes(const struct tpg_data *tpg)
+{
+ return tpg->interleaved ? 1 : tpg->planes;
+}
+
+static inline bool tpg_g_interleaved(const struct tpg_data *tpg)
+{
+ return tpg->interleaved;
+}
+
+static inline unsigned tpg_g_twopixelsize(const struct tpg_data *tpg, unsigned plane)
+{
+ return tpg->twopixelsize[plane];
+}
+
+static inline unsigned tpg_hdiv(const struct tpg_data *tpg,
+ unsigned plane, unsigned x)
+{
+ return ((x / tpg->hdownsampling[plane]) & tpg->hmask[plane]) *
+ tpg->twopixelsize[plane] / 2;
+}
+
+static inline unsigned tpg_hscale(const struct tpg_data *tpg, unsigned x)
+{
+ return (x * tpg->scaled_width) / tpg->src_width;
+}
+
+static inline unsigned tpg_hscale_div(const struct tpg_data *tpg,
+ unsigned plane, unsigned x)
+{
+ return tpg_hdiv(tpg, plane, tpg_hscale(tpg, x));
+}
+
+static inline unsigned tpg_g_bytesperline(const struct tpg_data *tpg, unsigned plane)
+{
+ return tpg->bytesperline[plane];
+}
+
+static inline void tpg_s_bytesperline(struct tpg_data *tpg, unsigned plane, unsigned bpl)
+{
+ unsigned p;
+
+ if (tpg->buffers > 1) {
+ tpg->bytesperline[plane] = bpl;
+ return;
+ }
+
+ for (p = 0; p < tpg_g_planes(tpg); p++) {
+ unsigned plane_w = bpl * tpg->twopixelsize[p] / tpg->twopixelsize[0];
+
+ tpg->bytesperline[p] = plane_w / tpg->hdownsampling[p];
+ }
+ if (tpg_g_interleaved(tpg))
+ tpg->bytesperline[1] = tpg->bytesperline[0];
+}
+
+
+static inline unsigned tpg_g_line_width(const struct tpg_data *tpg, unsigned plane)
+{
+ unsigned w = 0;
+ unsigned p;
+
+ if (tpg->buffers > 1)
+ return tpg_g_bytesperline(tpg, plane);
+ for (p = 0; p < tpg_g_planes(tpg); p++) {
+ unsigned plane_w = tpg_g_bytesperline(tpg, p);
+
+ w += plane_w / tpg->vdownsampling[p];
+ }
+ return w;
+}
+
+static inline unsigned tpg_calc_line_width(const struct tpg_data *tpg,
+ unsigned plane, unsigned bpl)
+{
+ unsigned w = 0;
+ unsigned p;
+
+ if (tpg->buffers > 1)
+ return bpl;
+ for (p = 0; p < tpg_g_planes(tpg); p++) {
+ unsigned plane_w = bpl * tpg->twopixelsize[p] / tpg->twopixelsize[0];
+
+ plane_w /= tpg->hdownsampling[p];
+ w += plane_w / tpg->vdownsampling[p];
+ }
+ return w;
+}
+
+static inline unsigned tpg_calc_plane_size(const struct tpg_data *tpg, unsigned plane)
+{
+ if (plane >= tpg_g_planes(tpg))
+ return 0;
+
+ return tpg_g_bytesperline(tpg, plane) * tpg->buf_height /
+ tpg->vdownsampling[plane];
+}
+
+static inline void tpg_s_buf_height(struct tpg_data *tpg, unsigned h)
+{
+ tpg->buf_height = h;
+}
+
+static inline void tpg_s_field(struct tpg_data *tpg, unsigned field, bool alternate)
+{
+ tpg->field = field;
+ tpg->field_alternate = alternate;
+}
+
+static inline void tpg_s_perc_fill(struct tpg_data *tpg,
+ unsigned perc_fill)
+{
+ tpg->perc_fill = perc_fill;
+}
+
+static inline unsigned tpg_g_perc_fill(const struct tpg_data *tpg)
+{
+ return tpg->perc_fill;
+}
+
+static inline void tpg_s_perc_fill_blank(struct tpg_data *tpg,
+ bool perc_fill_blank)
+{
+ tpg->perc_fill_blank = perc_fill_blank;
+}
+
+static inline void tpg_s_video_aspect(struct tpg_data *tpg,
+ enum tpg_video_aspect vid_aspect)
+{
+ if (tpg->vid_aspect == vid_aspect)
+ return;
+ tpg->vid_aspect = vid_aspect;
+ tpg->recalc_square_border = true;
+}
+
+static inline enum tpg_video_aspect tpg_g_video_aspect(const struct tpg_data *tpg)
+{
+ return tpg->vid_aspect;
+}
+
+static inline void tpg_s_pixel_aspect(struct tpg_data *tpg,
+ enum tpg_pixel_aspect pix_aspect)
+{
+ if (tpg->pix_aspect == pix_aspect)
+ return;
+ tpg->pix_aspect = pix_aspect;
+ tpg->recalc_square_border = true;
+}
+
+static inline void tpg_s_show_border(struct tpg_data *tpg,
+ bool show_border)
+{
+ tpg->show_border = show_border;
+}
+
+static inline void tpg_s_show_square(struct tpg_data *tpg,
+ bool show_square)
+{
+ tpg->show_square = show_square;
+}
+
+static inline void tpg_s_insert_sav(struct tpg_data *tpg, bool insert_sav)
+{
+ tpg->insert_sav = insert_sav;
+}
+
+static inline void tpg_s_insert_eav(struct tpg_data *tpg, bool insert_eav)
+{
+ tpg->insert_eav = insert_eav;
+}
+
+void tpg_update_mv_step(struct tpg_data *tpg);
+
+static inline void tpg_s_mv_hor_mode(struct tpg_data *tpg,
+ enum tpg_move_mode mv_hor_mode)
+{
+ tpg->mv_hor_mode = mv_hor_mode;
+ tpg_update_mv_step(tpg);
+}
+
+static inline void tpg_s_mv_vert_mode(struct tpg_data *tpg,
+ enum tpg_move_mode mv_vert_mode)
+{
+ tpg->mv_vert_mode = mv_vert_mode;
+ tpg_update_mv_step(tpg);
+}
+
+static inline void tpg_init_mv_count(struct tpg_data *tpg)
+{
+ tpg->mv_hor_count = tpg->mv_vert_count = 0;
+}
+
+static inline void tpg_update_mv_count(struct tpg_data *tpg, bool frame_is_field)
+{
+ tpg->mv_hor_count += tpg->mv_hor_step * (frame_is_field ? 1 : 2);
+ tpg->mv_vert_count += tpg->mv_vert_step * (frame_is_field ? 1 : 2);
+}
+
+static inline void tpg_s_hflip(struct tpg_data *tpg, bool hflip)
+{
+ if (tpg->hflip == hflip)
+ return;
+ tpg->hflip = hflip;
+ tpg_update_mv_step(tpg);
+ tpg->recalc_lines = true;
+}
+
+static inline bool tpg_g_hflip(const struct tpg_data *tpg)
+{
+ return tpg->hflip;
+}
+
+static inline void tpg_s_vflip(struct tpg_data *tpg, bool vflip)
+{
+ tpg->vflip = vflip;
+}
+
+static inline bool tpg_g_vflip(const struct tpg_data *tpg)
+{
+ return tpg->vflip;
+}
+
+static inline bool tpg_pattern_is_static(const struct tpg_data *tpg)
+{
+ return tpg->pattern != TPG_PAT_NOISE &&
+ tpg->mv_hor_mode == TPG_MOVE_NONE &&
+ tpg->mv_vert_mode == TPG_MOVE_NONE;
+}
+
+#endif
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index 8a0f55b6c2ba..88e3ab496e8f 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -375,6 +375,9 @@ struct vb2_ops {
/**
* struct vb2_ops - driver-specific callbacks
*
+ * @verify_planes_array: Verify that a given user space structure contains
+ * enough planes for the buffer. This is called
+ * for each dequeued buffer.
* @fill_user_buffer: given a vb2_buffer fill in the userspace structure.
* For V4L2 this is a struct v4l2_buffer.
* @fill_vb2_buffer: given a userspace structure, fill in the vb2_buffer.
@@ -384,6 +387,7 @@ struct vb2_ops {
* the vb2_buffer struct.
*/
struct vb2_buf_ops {
+ int (*verify_planes_array)(struct vb2_buffer *vb, const void *pb);
void (*fill_user_buffer)(struct vb2_buffer *vb, void *pb);
int (*fill_vb2_buffer)(struct vb2_buffer *vb, const void *pb,
struct vb2_plane *planes);
@@ -400,6 +404,9 @@ struct vb2_buf_ops {
* @fileio_read_once: report EOF after reading the first buffer
* @fileio_write_immediately: queue buffer after each write() call
* @allow_zero_bytesused: allow bytesused == 0 to be passed to the driver
+ * @quirk_poll_must_check_waiting_for_buffers: Return POLLERR at poll when QBUF
+ * has not been called. This is a vb1 idiom that has been adopted
+ * also by vb2.
* @lock: pointer to a mutex that protects the vb2_queue struct. The
* driver can set this to a mutex to let the v4l2 core serialize
* the queuing ioctls. If the driver wants to handle locking
@@ -463,6 +470,7 @@ struct vb2_queue {
unsigned fileio_read_once:1;
unsigned fileio_write_immediately:1;
unsigned allow_zero_bytesused:1;
+ unsigned quirk_poll_must_check_waiting_for_buffers:1;
struct mutex *lock;
void *owner;
diff --git a/include/media/vsp1.h b/include/media/vsp1.h
index cc541753896f..3e654a0455bd 100644
--- a/include/media/vsp1.h
+++ b/include/media/vsp1.h
@@ -23,11 +23,22 @@ int vsp1_du_init(struct device *dev);
int vsp1_du_setup_lif(struct device *dev, unsigned int width,
unsigned int height);
-int vsp1_du_atomic_begin(struct device *dev);
-int vsp1_du_atomic_update(struct device *dev, unsigned int rpf, u32 pixelformat,
- unsigned int pitch, dma_addr_t mem[2],
- const struct v4l2_rect *src,
- const struct v4l2_rect *dst);
-int vsp1_du_atomic_flush(struct device *dev);
+void vsp1_du_atomic_begin(struct device *dev);
+int vsp1_du_atomic_update_ext(struct device *dev, unsigned int rpf,
+ u32 pixelformat, unsigned int pitch,
+ dma_addr_t mem[2], const struct v4l2_rect *src,
+ const struct v4l2_rect *dst, unsigned int alpha,
+ unsigned int zpos);
+void vsp1_du_atomic_flush(struct device *dev);
+
+static inline int vsp1_du_atomic_update(struct device *dev,
+ unsigned int rpf_index, u32 pixelformat,
+ unsigned int pitch, dma_addr_t mem[2],
+ const struct v4l2_rect *src,
+ const struct v4l2_rect *dst)
+{
+ return vsp1_du_atomic_update_ext(dev, rpf_index, pixelformat, pitch,
+ mem, src, dst, 255, 0);
+}
#endif /* __MEDIA_VSP1_H__ */
diff --git a/include/misc/cxl.h b/include/misc/cxl.h
index 7d5e2613c7b8..56560c5781b4 100644
--- a/include/misc/cxl.h
+++ b/include/misc/cxl.h
@@ -127,6 +127,14 @@ int cxl_afu_reset(struct cxl_context *ctx);
void cxl_set_master(struct cxl_context *ctx);
/*
+ * Sets the context to use real mode memory accesses to operate with
+ * translation disabled. Note that this only makes sense for kernel contexts
+ * under bare metal, and will not work with virtualisation. May only be
+ * performed on stopped contexts.
+ */
+int cxl_set_translation_mode(struct cxl_context *ctx, bool real_mode);
+
+/*
* Map and unmap the AFU Problem Space area. The amount and location mapped
* depends on if this context is a master or slave.
*/
diff --git a/include/net/6lowpan.h b/include/net/6lowpan.h
index da3a77d25fcb..da84cf920b78 100644
--- a/include/net/6lowpan.h
+++ b/include/net/6lowpan.h
@@ -58,6 +58,9 @@
#include <net/ipv6.h>
#include <net/net_namespace.h>
+/* special link-layer handling */
+#include <net/mac802154.h>
+
#define EUI64_ADDR_LEN 8
#define LOWPAN_NHC_MAX_ID_LEN 1
@@ -93,7 +96,7 @@ static inline bool lowpan_is_iphc(u8 dispatch)
}
#define LOWPAN_PRIV_SIZE(llpriv_size) \
- (sizeof(struct lowpan_priv) + llpriv_size)
+ (sizeof(struct lowpan_dev) + llpriv_size)
enum lowpan_lltypes {
LOWPAN_LLTYPE_BTLE,
@@ -129,7 +132,7 @@ lowpan_iphc_ctx_is_compression(const struct lowpan_iphc_ctx *ctx)
return test_bit(LOWPAN_IPHC_CTX_FLAG_COMPRESSION, &ctx->flags);
}
-struct lowpan_priv {
+struct lowpan_dev {
enum lowpan_lltypes lltype;
struct dentry *iface_debugfs;
struct lowpan_iphc_ctx_table ctx;
@@ -139,11 +142,23 @@ struct lowpan_priv {
};
static inline
-struct lowpan_priv *lowpan_priv(const struct net_device *dev)
+struct lowpan_dev *lowpan_dev(const struct net_device *dev)
{
return netdev_priv(dev);
}
+/* private device info */
+struct lowpan_802154_dev {
+ struct net_device *wdev; /* wpan device ptr */
+ u16 fragment_tag;
+};
+
+static inline struct
+lowpan_802154_dev *lowpan_802154_dev(const struct net_device *dev)
+{
+ return (struct lowpan_802154_dev *)lowpan_dev(dev)->priv;
+}
+
struct lowpan_802154_cb {
u16 d_tag;
unsigned int d_size;
@@ -157,6 +172,22 @@ struct lowpan_802154_cb *lowpan_802154_cb(const struct sk_buff *skb)
return (struct lowpan_802154_cb *)skb->cb;
}
+static inline void lowpan_iphc_uncompress_eui64_lladdr(struct in6_addr *ipaddr,
+ const void *lladdr)
+{
+ /* fe:80::XXXX:XXXX:XXXX:XXXX
+ * \_________________/
+ * hwaddr
+ */
+ ipaddr->s6_addr[0] = 0xFE;
+ ipaddr->s6_addr[1] = 0x80;
+ memcpy(&ipaddr->s6_addr[8], lladdr, EUI64_ADDR_LEN);
+ /* second bit-flip (Universe/Local)
+ * is done according RFC2464
+ */
+ ipaddr->s6_addr[8] ^= 0x02;
+}
+
#ifdef DEBUG
/* print data in line */
static inline void raw_dump_inline(const char *caller, char *msg,
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 03e322b30218..9a9a8edc138f 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -106,6 +106,7 @@ struct tc_action_ops {
int bind);
int (*walk)(struct net *, struct sk_buff *,
struct netlink_callback *, int, struct tc_action *);
+ void (*stats_update)(struct tc_action *, u64, u32, u64);
};
struct tc_action_net {
@@ -178,10 +179,21 @@ int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int);
#define tc_for_each_action(_a, _exts) \
list_for_each_entry(a, &(_exts)->actions, list)
+
+static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes,
+ u64 packets, u64 lastuse)
+{
+ if (!a->ops->stats_update)
+ return;
+
+ a->ops->stats_update(a, bytes, packets, lastuse);
+}
+
#else /* CONFIG_NET_CLS_ACT */
#define tc_no_actions(_exts) true
-#define tc_for_each_action(_a, _exts) while (0)
+#define tc_for_each_action(_a, _exts) while ((void)(_a), 0)
+#define tcf_action_stats_update(a, bytes, packets, lastuse)
#endif /* CONFIG_NET_CLS_ACT */
#endif
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index e797d45a5ae6..ac1bc3c49fbd 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -12,6 +12,7 @@
#ifndef _NET_RXRPC_H
#define _NET_RXRPC_H
+#include <linux/skbuff.h>
#include <linux/rxrpc.h>
struct rxrpc_call;
@@ -19,11 +20,12 @@ struct rxrpc_call;
/*
* the mark applied to socket buffers that may be intercepted
*/
-enum {
+enum rxrpc_skb_mark {
RXRPC_SKB_MARK_DATA, /* data message */
RXRPC_SKB_MARK_FINAL_ACK, /* final ACK received message */
RXRPC_SKB_MARK_BUSY, /* server busy message */
RXRPC_SKB_MARK_REMOTE_ABORT, /* remote abort message */
+ RXRPC_SKB_MARK_LOCAL_ABORT, /* local abort message */
RXRPC_SKB_MARK_NET_ERROR, /* network error message */
RXRPC_SKB_MARK_LOCAL_ERROR, /* local error message */
RXRPC_SKB_MARK_NEW_CALL, /* local error message */
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 5d38d980b89d..eefcf3e96421 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -61,6 +61,8 @@
#define HCI_RS232 4
#define HCI_PCI 5
#define HCI_SDIO 6
+#define HCI_SPI 7
+#define HCI_I2C 8
/* HCI controller types */
#define HCI_BREDR 0x00
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 9e1b24c29f0c..63921672bed0 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -68,26 +68,6 @@ struct wiphy;
*/
/**
- * enum ieee80211_band - supported frequency bands
- *
- * The bands are assigned this way because the supported
- * bitrates differ in these bands.
- *
- * @IEEE80211_BAND_2GHZ: 2.4GHz ISM band
- * @IEEE80211_BAND_5GHZ: around 5GHz band (4.9-5.7)
- * @IEEE80211_BAND_60GHZ: around 60 GHz band (58.32 - 64.80 GHz)
- * @IEEE80211_NUM_BANDS: number of defined bands
- */
-enum ieee80211_band {
- IEEE80211_BAND_2GHZ = NL80211_BAND_2GHZ,
- IEEE80211_BAND_5GHZ = NL80211_BAND_5GHZ,
- IEEE80211_BAND_60GHZ = NL80211_BAND_60GHZ,
-
- /* keep last */
- IEEE80211_NUM_BANDS
-};
-
-/**
* enum ieee80211_channel_flags - channel flags
*
* Channel flags set by the regulatory control code.
@@ -167,7 +147,7 @@ enum ieee80211_channel_flags {
* @dfs_cac_ms: DFS CAC time in milliseconds, this is valid for DFS channels.
*/
struct ieee80211_channel {
- enum ieee80211_band band;
+ enum nl80211_band band;
u16 center_freq;
u16 hw_value;
u32 flags;
@@ -324,7 +304,7 @@ struct ieee80211_sta_vht_cap {
struct ieee80211_supported_band {
struct ieee80211_channel *channels;
struct ieee80211_rate *bitrates;
- enum ieee80211_band band;
+ enum nl80211_band band;
int n_channels;
int n_bitrates;
struct ieee80211_sta_ht_cap ht_cap;
@@ -816,6 +796,7 @@ enum station_parameters_apply_mask {
* @supported_oper_classes_len: number of supported operating classes
* @opmode_notif: operating mode field from Operating Mode Notification
* @opmode_notif_used: information if operating mode field is used
+ * @support_p2p_ps: information if station supports P2P PS mechanism
*/
struct station_parameters {
const u8 *supported_rates;
@@ -841,6 +822,7 @@ struct station_parameters {
u8 supported_oper_classes_len;
u8 opmode_notif;
bool opmode_notif_used;
+ int support_p2p_ps;
};
/**
@@ -1063,11 +1045,12 @@ struct cfg80211_tid_stats {
* @rx_beacon: number of beacons received from this peer
* @rx_beacon_signal_avg: signal strength average (in dBm) for beacons received
* from this peer
+ * @rx_duration: aggregate PPDU duration(usecs) for all the frames from a peer
* @pertid: per-TID statistics, see &struct cfg80211_tid_stats, using the last
* (IEEE80211_NUM_TIDS) index for MSDUs not encapsulated in QoS-MPDUs.
*/
struct station_info {
- u32 filled;
+ u64 filled;
u32 connected_time;
u32 inactive_time;
u64 rx_bytes;
@@ -1106,6 +1089,7 @@ struct station_info {
u32 expected_throughput;
u64 rx_beacon;
+ u64 rx_duration;
u8 rx_beacon_signal_avg;
struct cfg80211_tid_stats pertid[IEEE80211_NUM_TIDS + 1];
};
@@ -1368,7 +1352,7 @@ struct mesh_setup {
bool user_mpm;
u8 dtim_period;
u16 beacon_interval;
- int mcast_rate[IEEE80211_NUM_BANDS];
+ int mcast_rate[NUM_NL80211_BANDS];
u32 basic_rates;
};
@@ -1455,6 +1439,7 @@ struct cfg80211_ssid {
* @mac_addr_mask: MAC address mask used with randomisation, bits that
* are 0 in the mask should be randomised, bits that are 1 should
* be taken from the @mac_addr
+ * @bssid: BSSID to scan for (most commonly, the wildcard BSSID)
*/
struct cfg80211_scan_request {
struct cfg80211_ssid *ssids;
@@ -1465,12 +1450,13 @@ struct cfg80211_scan_request {
size_t ie_len;
u32 flags;
- u32 rates[IEEE80211_NUM_BANDS];
+ u32 rates[NUM_NL80211_BANDS];
struct wireless_dev *wdev;
u8 mac_addr[ETH_ALEN] __aligned(2);
u8 mac_addr_mask[ETH_ALEN] __aligned(2);
+ u8 bssid[ETH_ALEN] __aligned(2);
/* internal */
struct wiphy *wiphy;
@@ -1617,7 +1603,7 @@ struct cfg80211_inform_bss {
};
/**
- * struct cfg80211_bss_ie_data - BSS entry IE data
+ * struct cfg80211_bss_ies - BSS entry IE data
* @tsf: TSF contained in the frame that carried these IEs
* @rcu_head: internal use, for freeing
* @len: length of the IEs
@@ -1746,7 +1732,12 @@ enum cfg80211_assoc_req_flags {
* @ie_len: Length of ie buffer in octets
* @use_mfp: Use management frame protection (IEEE 802.11w) in this association
* @crypto: crypto settings
- * @prev_bssid: previous BSSID, if not %NULL use reassociate frame
+ * @prev_bssid: previous BSSID, if not %NULL use reassociate frame. This is used
+ * to indicate a request to reassociate within the ESS instead of a request
+ * do the initial association with the ESS. When included, this is set to
+ * the BSSID of the current association, i.e., to the value that is
+ * included in the Current AP address field of the Reassociation Request
+ * frame.
* @flags: See &enum cfg80211_assoc_req_flags
* @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask
* will be used in ht_capa. Un-supported values will be ignored.
@@ -1851,12 +1842,39 @@ struct cfg80211_ibss_params {
bool privacy;
bool control_port;
bool userspace_handles_dfs;
- int mcast_rate[IEEE80211_NUM_BANDS];
+ int mcast_rate[NUM_NL80211_BANDS];
struct ieee80211_ht_cap ht_capa;
struct ieee80211_ht_cap ht_capa_mask;
};
/**
+ * struct cfg80211_bss_select_adjust - BSS selection with RSSI adjustment.
+ *
+ * @band: band of BSS which should match for RSSI level adjustment.
+ * @delta: value of RSSI level adjustment.
+ */
+struct cfg80211_bss_select_adjust {
+ enum nl80211_band band;
+ s8 delta;
+};
+
+/**
+ * struct cfg80211_bss_selection - connection parameters for BSS selection.
+ *
+ * @behaviour: requested BSS selection behaviour.
+ * @param: parameters for requestion behaviour.
+ * @band_pref: preferred band for %NL80211_BSS_SELECT_ATTR_BAND_PREF.
+ * @adjust: parameters for %NL80211_BSS_SELECT_ATTR_RSSI_ADJUST.
+ */
+struct cfg80211_bss_selection {
+ enum nl80211_bss_select_attr behaviour;
+ union {
+ enum nl80211_band band_pref;
+ struct cfg80211_bss_select_adjust adjust;
+ } param;
+};
+
+/**
* struct cfg80211_connect_params - Connection parameters
*
* This structure provides information needed to complete IEEE 802.11
@@ -1893,6 +1911,13 @@ struct cfg80211_ibss_params {
* @vht_capa_mask: The bits of vht_capa which are to be used.
* @pbss: if set, connect to a PCP instead of AP. Valid for DMG
* networks.
+ * @bss_select: criteria to be used for BSS selection.
+ * @prev_bssid: previous BSSID, if not %NULL use reassociate frame. This is used
+ * to indicate a request to reassociate within the ESS instead of a request
+ * do the initial association with the ESS. When included, this is set to
+ * the BSSID of the current association, i.e., to the value that is
+ * included in the Current AP address field of the Reassociation Request
+ * frame.
*/
struct cfg80211_connect_params {
struct ieee80211_channel *channel;
@@ -1916,6 +1941,8 @@ struct cfg80211_connect_params {
struct ieee80211_vht_cap vht_capa;
struct ieee80211_vht_cap vht_capa_mask;
bool pbss;
+ struct cfg80211_bss_selection bss_select;
+ const u8 *prev_bssid;
};
/**
@@ -1945,7 +1972,7 @@ struct cfg80211_bitrate_mask {
u8 ht_mcs[IEEE80211_HT_MCS_MASK_LEN];
u16 vht_mcs[NL80211_VHT_NSS_MAX];
enum nl80211_txrate_gi gi;
- } control[IEEE80211_NUM_BANDS];
+ } control[NUM_NL80211_BANDS];
};
/**
* struct cfg80211_pmksa - PMK Security Association
@@ -2342,7 +2369,17 @@ struct cfg80211_qos_map {
* @connect: Connect to the ESS with the specified parameters. When connected,
* call cfg80211_connect_result() with status code %WLAN_STATUS_SUCCESS.
* If the connection fails for some reason, call cfg80211_connect_result()
- * with the status from the AP.
+ * with the status from the AP. The driver is allowed to roam to other
+ * BSSes within the ESS when the other BSS matches the connect parameters.
+ * When such roaming is initiated by the driver, the driver is expected to
+ * verify that the target matches the configured security parameters and
+ * to use Reassociation Request frame instead of Association Request frame.
+ * The connect function can also be used to request the driver to perform
+ * a specific roam when connected to an ESS. In that case, the prev_bssid
+ * parameter is set to the BSSID of the currently associated BSS as an
+ * indication of requesting reassociation. In both the driver-initiated and
+ * new connect() call initiated roaming cases, the result of roaming is
+ * indicated with a call to cfg80211_roamed() or cfg80211_roamed_bss().
* (invoked with the wireless_dev mutex held)
* @disconnect: Disconnect from the BSS/ESS.
* (invoked with the wireless_dev mutex held)
@@ -2622,7 +2659,7 @@ struct cfg80211_ops {
int (*leave_ibss)(struct wiphy *wiphy, struct net_device *dev);
int (*set_mcast_rate)(struct wiphy *wiphy, struct net_device *dev,
- int rate[IEEE80211_NUM_BANDS]);
+ int rate[NUM_NL80211_BANDS]);
int (*set_wiphy_params)(struct wiphy *wiphy, u32 changed);
@@ -3152,6 +3189,9 @@ struct wiphy_vendor_command {
* @vht_capa_mod_mask: Specify what VHT capabilities can be over-ridden.
* If null, then none can be over-ridden.
*
+ * @wdev_list: the list of associated (virtual) interfaces; this list must
+ * not be modified by the driver, but can be read with RTNL/RCU protection.
+ *
* @max_acl_mac_addrs: Maximum number of MAC addresses that the device
* supports for ACL.
*
@@ -3184,6 +3224,9 @@ struct wiphy_vendor_command {
* low rssi when a frame is heard on different channel, then it should set
* this variable to the maximal offset for which it can compensate.
* This value should be set in MHz.
+ * @bss_select_support: bitmask indicating the BSS selection criteria supported
+ * by the driver in the .connect() callback. The bit position maps to the
+ * attribute indices defined in &enum nl80211_bss_select_attr.
*/
struct wiphy {
/* assign these fields before you register the wiphy */
@@ -3265,7 +3308,7 @@ struct wiphy {
* help determine whether you own this wiphy or not. */
const void *privid;
- struct ieee80211_supported_band *bands[IEEE80211_NUM_BANDS];
+ struct ieee80211_supported_band *bands[NUM_NL80211_BANDS];
/* Lets us get back the wiphy on the callback */
void (*reg_notifier)(struct wiphy *wiphy,
@@ -3288,6 +3331,8 @@ struct wiphy {
const struct ieee80211_ht_cap *ht_capa_mod_mask;
const struct ieee80211_vht_cap *vht_capa_mod_mask;
+ struct list_head wdev_list;
+
/* the network namespace this phy lives in currently */
possible_net_t _net;
@@ -3306,6 +3351,8 @@ struct wiphy {
u8 max_num_csa_counters;
u8 max_adj_channel_rssi_comp;
+ u32 bss_select_support;
+
char priv[0] __aligned(NETDEV_ALIGN);
};
@@ -3598,7 +3645,7 @@ static inline void *wdev_priv(struct wireless_dev *wdev)
* @band: band, necessary due to channel number overlap
* Return: The corresponding frequency (in MHz), or 0 if the conversion failed.
*/
-int ieee80211_channel_to_frequency(int chan, enum ieee80211_band band);
+int ieee80211_channel_to_frequency(int chan, enum nl80211_band band);
/**
* ieee80211_frequency_to_channel - convert frequency to channel number
@@ -3851,7 +3898,7 @@ const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len);
* cfg80211_find_vendor_ie - find vendor specific information element in data
*
* @oui: vendor OUI
- * @oui_type: vendor-specific OUI type
+ * @oui_type: vendor-specific OUI type (must be < 0xff), negative means any
* @ies: data consisting of IEs
* @len: length of data
*
@@ -3863,7 +3910,7 @@ const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len);
* Note: There are no checks on the element length other than having to fit into
* the given data.
*/
-const u8 *cfg80211_find_vendor_ie(unsigned int oui, u8 oui_type,
+const u8 *cfg80211_find_vendor_ie(unsigned int oui, int oui_type,
const u8 *ies, int len);
/**
@@ -4610,6 +4657,32 @@ static inline void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp)
#endif
/**
+ * cfg80211_connect_bss - notify cfg80211 of connection result
+ *
+ * @dev: network device
+ * @bssid: the BSSID of the AP
+ * @bss: entry of bss to which STA got connected to, can be obtained
+ * through cfg80211_get_bss (may be %NULL)
+ * @req_ie: association request IEs (maybe be %NULL)
+ * @req_ie_len: association request IEs length
+ * @resp_ie: association response IEs (may be %NULL)
+ * @resp_ie_len: assoc response IEs length
+ * @status: status code, 0 for successful connection, use
+ * %WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you
+ * the real status code for failures.
+ * @gfp: allocation flags
+ *
+ * It should be called by the underlying driver whenever connect() has
+ * succeeded. This is similar to cfg80211_connect_result(), but with the
+ * option of identifying the exact bss entry for the connection. Only one of
+ * these functions should be called.
+ */
+void cfg80211_connect_bss(struct net_device *dev, const u8 *bssid,
+ struct cfg80211_bss *bss, const u8 *req_ie,
+ size_t req_ie_len, const u8 *resp_ie,
+ size_t resp_ie_len, u16 status, gfp_t gfp);
+
+/**
* cfg80211_connect_result - notify cfg80211 of connection result
*
* @dev: network device
@@ -4626,10 +4699,15 @@ static inline void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp)
* It should be called by the underlying driver whenever connect() has
* succeeded.
*/
-void cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
- const u8 *req_ie, size_t req_ie_len,
- const u8 *resp_ie, size_t resp_ie_len,
- u16 status, gfp_t gfp);
+static inline void
+cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
+ const u8 *req_ie, size_t req_ie_len,
+ const u8 *resp_ie, size_t resp_ie_len,
+ u16 status, gfp_t gfp)
+{
+ cfg80211_connect_bss(dev, bssid, NULL, req_ie, req_ie_len, resp_ie,
+ resp_ie_len, status, gfp);
+}
/**
* cfg80211_roamed - notify cfg80211 of roaming
@@ -5029,7 +5107,7 @@ void cfg80211_ch_switch_started_notify(struct net_device *dev,
* Returns %true if the conversion was successful, %false otherwise.
*/
bool ieee80211_operating_class_to_band(u8 operating_class,
- enum ieee80211_band *band);
+ enum nl80211_band *band);
/**
* ieee80211_chandef_to_operating_class - convert chandef to operation class
diff --git a/include/net/codel.h b/include/net/codel.h
index d168aca115cc..a6e428f80135 100644
--- a/include/net/codel.h
+++ b/include/net/codel.h
@@ -87,27 +87,6 @@ static inline codel_time_t codel_get_time(void)
((s32)((a) - (b)) >= 0))
#define codel_time_before_eq(a, b) codel_time_after_eq(b, a)
-/* Qdiscs using codel plugin must use codel_skb_cb in their own cb[] */
-struct codel_skb_cb {
- codel_time_t enqueue_time;
-};
-
-static struct codel_skb_cb *get_codel_cb(const struct sk_buff *skb)
-{
- qdisc_cb_private_validate(skb, sizeof(struct codel_skb_cb));
- return (struct codel_skb_cb *)qdisc_skb_cb(skb)->data;
-}
-
-static codel_time_t codel_get_enqueue_time(const struct sk_buff *skb)
-{
- return get_codel_cb(skb)->enqueue_time;
-}
-
-static void codel_set_enqueue_time(struct sk_buff *skb)
-{
- get_codel_cb(skb)->enqueue_time = codel_get_time();
-}
-
static inline u32 codel_time_to_us(codel_time_t val)
{
u64 valns = ((u64)val << CODEL_SHIFT);
@@ -176,198 +155,10 @@ struct codel_stats {
#define CODEL_DISABLED_THRESHOLD INT_MAX
-static void codel_params_init(struct codel_params *params,
- const struct Qdisc *sch)
-{
- params->interval = MS2TIME(100);
- params->target = MS2TIME(5);
- params->mtu = psched_mtu(qdisc_dev(sch));
- params->ce_threshold = CODEL_DISABLED_THRESHOLD;
- params->ecn = false;
-}
-
-static void codel_vars_init(struct codel_vars *vars)
-{
- memset(vars, 0, sizeof(*vars));
-}
-
-static void codel_stats_init(struct codel_stats *stats)
-{
- stats->maxpacket = 0;
-}
-
-/*
- * http://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Iterative_methods_for_reciprocal_square_roots
- * new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2)
- *
- * Here, invsqrt is a fixed point number (< 1.0), 32bit mantissa, aka Q0.32
- */
-static void codel_Newton_step(struct codel_vars *vars)
-{
- u32 invsqrt = ((u32)vars->rec_inv_sqrt) << REC_INV_SQRT_SHIFT;
- u32 invsqrt2 = ((u64)invsqrt * invsqrt) >> 32;
- u64 val = (3LL << 32) - ((u64)vars->count * invsqrt2);
-
- val >>= 2; /* avoid overflow in following multiply */
- val = (val * invsqrt) >> (32 - 2 + 1);
-
- vars->rec_inv_sqrt = val >> REC_INV_SQRT_SHIFT;
-}
-
-/*
- * CoDel control_law is t + interval/sqrt(count)
- * We maintain in rec_inv_sqrt the reciprocal value of sqrt(count) to avoid
- * both sqrt() and divide operation.
- */
-static codel_time_t codel_control_law(codel_time_t t,
- codel_time_t interval,
- u32 rec_inv_sqrt)
-{
- return t + reciprocal_scale(interval, rec_inv_sqrt << REC_INV_SQRT_SHIFT);
-}
-
-static bool codel_should_drop(const struct sk_buff *skb,
- struct Qdisc *sch,
- struct codel_vars *vars,
- struct codel_params *params,
- struct codel_stats *stats,
- codel_time_t now)
-{
- bool ok_to_drop;
-
- if (!skb) {
- vars->first_above_time = 0;
- return false;
- }
-
- vars->ldelay = now - codel_get_enqueue_time(skb);
- sch->qstats.backlog -= qdisc_pkt_len(skb);
-
- if (unlikely(qdisc_pkt_len(skb) > stats->maxpacket))
- stats->maxpacket = qdisc_pkt_len(skb);
-
- if (codel_time_before(vars->ldelay, params->target) ||
- sch->qstats.backlog <= params->mtu) {
- /* went below - stay below for at least interval */
- vars->first_above_time = 0;
- return false;
- }
- ok_to_drop = false;
- if (vars->first_above_time == 0) {
- /* just went above from below. If we stay above
- * for at least interval we'll say it's ok to drop
- */
- vars->first_above_time = now + params->interval;
- } else if (codel_time_after(now, vars->first_above_time)) {
- ok_to_drop = true;
- }
- return ok_to_drop;
-}
-
+typedef u32 (*codel_skb_len_t)(const struct sk_buff *skb);
+typedef codel_time_t (*codel_skb_time_t)(const struct sk_buff *skb);
+typedef void (*codel_skb_drop_t)(struct sk_buff *skb, void *ctx);
typedef struct sk_buff * (*codel_skb_dequeue_t)(struct codel_vars *vars,
- struct Qdisc *sch);
+ void *ctx);
-static struct sk_buff *codel_dequeue(struct Qdisc *sch,
- struct codel_params *params,
- struct codel_vars *vars,
- struct codel_stats *stats,
- codel_skb_dequeue_t dequeue_func)
-{
- struct sk_buff *skb = dequeue_func(vars, sch);
- codel_time_t now;
- bool drop;
-
- if (!skb) {
- vars->dropping = false;
- return skb;
- }
- now = codel_get_time();
- drop = codel_should_drop(skb, sch, vars, params, stats, now);
- if (vars->dropping) {
- if (!drop) {
- /* sojourn time below target - leave dropping state */
- vars->dropping = false;
- } else if (codel_time_after_eq(now, vars->drop_next)) {
- /* It's time for the next drop. Drop the current
- * packet and dequeue the next. The dequeue might
- * take us out of dropping state.
- * If not, schedule the next drop.
- * A large backlog might result in drop rates so high
- * that the next drop should happen now,
- * hence the while loop.
- */
- while (vars->dropping &&
- codel_time_after_eq(now, vars->drop_next)) {
- vars->count++; /* dont care of possible wrap
- * since there is no more divide
- */
- codel_Newton_step(vars);
- if (params->ecn && INET_ECN_set_ce(skb)) {
- stats->ecn_mark++;
- vars->drop_next =
- codel_control_law(vars->drop_next,
- params->interval,
- vars->rec_inv_sqrt);
- goto end;
- }
- stats->drop_len += qdisc_pkt_len(skb);
- qdisc_drop(skb, sch);
- stats->drop_count++;
- skb = dequeue_func(vars, sch);
- if (!codel_should_drop(skb, sch,
- vars, params, stats, now)) {
- /* leave dropping state */
- vars->dropping = false;
- } else {
- /* and schedule the next drop */
- vars->drop_next =
- codel_control_law(vars->drop_next,
- params->interval,
- vars->rec_inv_sqrt);
- }
- }
- }
- } else if (drop) {
- u32 delta;
-
- if (params->ecn && INET_ECN_set_ce(skb)) {
- stats->ecn_mark++;
- } else {
- stats->drop_len += qdisc_pkt_len(skb);
- qdisc_drop(skb, sch);
- stats->drop_count++;
-
- skb = dequeue_func(vars, sch);
- drop = codel_should_drop(skb, sch, vars, params,
- stats, now);
- }
- vars->dropping = true;
- /* if min went above target close to when we last went below it
- * assume that the drop rate that controlled the queue on the
- * last cycle is a good starting point to control it now.
- */
- delta = vars->count - vars->lastcount;
- if (delta > 1 &&
- codel_time_before(now - vars->drop_next,
- 16 * params->interval)) {
- vars->count = delta;
- /* we dont care if rec_inv_sqrt approximation
- * is not very precise :
- * Next Newton steps will correct it quadratically.
- */
- codel_Newton_step(vars);
- } else {
- vars->count = 1;
- vars->rec_inv_sqrt = ~0U >> REC_INV_SQRT_SHIFT;
- }
- vars->lastcount = vars->count;
- vars->drop_next = codel_control_law(now, params->interval,
- vars->rec_inv_sqrt);
- }
-end:
- if (skb && codel_time_after(vars->ldelay, params->ce_threshold) &&
- INET_ECN_set_ce(skb))
- stats->ce_mark++;
- return skb;
-}
#endif
diff --git a/include/net/codel_impl.h b/include/net/codel_impl.h
new file mode 100644
index 000000000000..d289b91dcd65
--- /dev/null
+++ b/include/net/codel_impl.h
@@ -0,0 +1,255 @@
+#ifndef __NET_SCHED_CODEL_IMPL_H
+#define __NET_SCHED_CODEL_IMPL_H
+
+/*
+ * Codel - The Controlled-Delay Active Queue Management algorithm
+ *
+ * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
+ * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
+ * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
+ * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the authors may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ */
+
+/* Controlling Queue Delay (CoDel) algorithm
+ * =========================================
+ * Source : Kathleen Nichols and Van Jacobson
+ * http://queue.acm.org/detail.cfm?id=2209336
+ *
+ * Implemented on linux by Dave Taht and Eric Dumazet
+ */
+
+static void codel_params_init(struct codel_params *params)
+{
+ params->interval = MS2TIME(100);
+ params->target = MS2TIME(5);
+ params->ce_threshold = CODEL_DISABLED_THRESHOLD;
+ params->ecn = false;
+}
+
+static void codel_vars_init(struct codel_vars *vars)
+{
+ memset(vars, 0, sizeof(*vars));
+}
+
+static void codel_stats_init(struct codel_stats *stats)
+{
+ stats->maxpacket = 0;
+}
+
+/*
+ * http://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Iterative_methods_for_reciprocal_square_roots
+ * new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2)
+ *
+ * Here, invsqrt is a fixed point number (< 1.0), 32bit mantissa, aka Q0.32
+ */
+static void codel_Newton_step(struct codel_vars *vars)
+{
+ u32 invsqrt = ((u32)vars->rec_inv_sqrt) << REC_INV_SQRT_SHIFT;
+ u32 invsqrt2 = ((u64)invsqrt * invsqrt) >> 32;
+ u64 val = (3LL << 32) - ((u64)vars->count * invsqrt2);
+
+ val >>= 2; /* avoid overflow in following multiply */
+ val = (val * invsqrt) >> (32 - 2 + 1);
+
+ vars->rec_inv_sqrt = val >> REC_INV_SQRT_SHIFT;
+}
+
+/*
+ * CoDel control_law is t + interval/sqrt(count)
+ * We maintain in rec_inv_sqrt the reciprocal value of sqrt(count) to avoid
+ * both sqrt() and divide operation.
+ */
+static codel_time_t codel_control_law(codel_time_t t,
+ codel_time_t interval,
+ u32 rec_inv_sqrt)
+{
+ return t + reciprocal_scale(interval, rec_inv_sqrt << REC_INV_SQRT_SHIFT);
+}
+
+static bool codel_should_drop(const struct sk_buff *skb,
+ void *ctx,
+ struct codel_vars *vars,
+ struct codel_params *params,
+ struct codel_stats *stats,
+ codel_skb_len_t skb_len_func,
+ codel_skb_time_t skb_time_func,
+ u32 *backlog,
+ codel_time_t now)
+{
+ bool ok_to_drop;
+ u32 skb_len;
+
+ if (!skb) {
+ vars->first_above_time = 0;
+ return false;
+ }
+
+ skb_len = skb_len_func(skb);
+ vars->ldelay = now - skb_time_func(skb);
+
+ if (unlikely(skb_len > stats->maxpacket))
+ stats->maxpacket = skb_len;
+
+ if (codel_time_before(vars->ldelay, params->target) ||
+ *backlog <= params->mtu) {
+ /* went below - stay below for at least interval */
+ vars->first_above_time = 0;
+ return false;
+ }
+ ok_to_drop = false;
+ if (vars->first_above_time == 0) {
+ /* just went above from below. If we stay above
+ * for at least interval we'll say it's ok to drop
+ */
+ vars->first_above_time = now + params->interval;
+ } else if (codel_time_after(now, vars->first_above_time)) {
+ ok_to_drop = true;
+ }
+ return ok_to_drop;
+}
+
+static struct sk_buff *codel_dequeue(void *ctx,
+ u32 *backlog,
+ struct codel_params *params,
+ struct codel_vars *vars,
+ struct codel_stats *stats,
+ codel_skb_len_t skb_len_func,
+ codel_skb_time_t skb_time_func,
+ codel_skb_drop_t drop_func,
+ codel_skb_dequeue_t dequeue_func)
+{
+ struct sk_buff *skb = dequeue_func(vars, ctx);
+ codel_time_t now;
+ bool drop;
+
+ if (!skb) {
+ vars->dropping = false;
+ return skb;
+ }
+ now = codel_get_time();
+ drop = codel_should_drop(skb, ctx, vars, params, stats,
+ skb_len_func, skb_time_func, backlog, now);
+ if (vars->dropping) {
+ if (!drop) {
+ /* sojourn time below target - leave dropping state */
+ vars->dropping = false;
+ } else if (codel_time_after_eq(now, vars->drop_next)) {
+ /* It's time for the next drop. Drop the current
+ * packet and dequeue the next. The dequeue might
+ * take us out of dropping state.
+ * If not, schedule the next drop.
+ * A large backlog might result in drop rates so high
+ * that the next drop should happen now,
+ * hence the while loop.
+ */
+ while (vars->dropping &&
+ codel_time_after_eq(now, vars->drop_next)) {
+ vars->count++; /* dont care of possible wrap
+ * since there is no more divide
+ */
+ codel_Newton_step(vars);
+ if (params->ecn && INET_ECN_set_ce(skb)) {
+ stats->ecn_mark++;
+ vars->drop_next =
+ codel_control_law(vars->drop_next,
+ params->interval,
+ vars->rec_inv_sqrt);
+ goto end;
+ }
+ stats->drop_len += skb_len_func(skb);
+ drop_func(skb, ctx);
+ stats->drop_count++;
+ skb = dequeue_func(vars, ctx);
+ if (!codel_should_drop(skb, ctx,
+ vars, params, stats,
+ skb_len_func,
+ skb_time_func,
+ backlog, now)) {
+ /* leave dropping state */
+ vars->dropping = false;
+ } else {
+ /* and schedule the next drop */
+ vars->drop_next =
+ codel_control_law(vars->drop_next,
+ params->interval,
+ vars->rec_inv_sqrt);
+ }
+ }
+ }
+ } else if (drop) {
+ u32 delta;
+
+ if (params->ecn && INET_ECN_set_ce(skb)) {
+ stats->ecn_mark++;
+ } else {
+ stats->drop_len += skb_len_func(skb);
+ drop_func(skb, ctx);
+ stats->drop_count++;
+
+ skb = dequeue_func(vars, ctx);
+ drop = codel_should_drop(skb, ctx, vars, params,
+ stats, skb_len_func,
+ skb_time_func, backlog, now);
+ }
+ vars->dropping = true;
+ /* if min went above target close to when we last went below it
+ * assume that the drop rate that controlled the queue on the
+ * last cycle is a good starting point to control it now.
+ */
+ delta = vars->count - vars->lastcount;
+ if (delta > 1 &&
+ codel_time_before(now - vars->drop_next,
+ 16 * params->interval)) {
+ vars->count = delta;
+ /* we dont care if rec_inv_sqrt approximation
+ * is not very precise :
+ * Next Newton steps will correct it quadratically.
+ */
+ codel_Newton_step(vars);
+ } else {
+ vars->count = 1;
+ vars->rec_inv_sqrt = ~0U >> REC_INV_SQRT_SHIFT;
+ }
+ vars->lastcount = vars->count;
+ vars->drop_next = codel_control_law(now, params->interval,
+ vars->rec_inv_sqrt);
+ }
+end:
+ if (skb && codel_time_after(vars->ldelay, params->ce_threshold) &&
+ INET_ECN_set_ce(skb))
+ stats->ce_mark++;
+ return skb;
+}
+
+#endif
diff --git a/include/net/codel_qdisc.h b/include/net/codel_qdisc.h
new file mode 100644
index 000000000000..8144d9cd2908
--- /dev/null
+++ b/include/net/codel_qdisc.h
@@ -0,0 +1,73 @@
+#ifndef __NET_SCHED_CODEL_QDISC_H
+#define __NET_SCHED_CODEL_QDISC_H
+
+/*
+ * Codel - The Controlled-Delay Active Queue Management algorithm
+ *
+ * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
+ * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
+ * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
+ * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the authors may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ */
+
+/* Controlling Queue Delay (CoDel) algorithm
+ * =========================================
+ * Source : Kathleen Nichols and Van Jacobson
+ * http://queue.acm.org/detail.cfm?id=2209336
+ *
+ * Implemented on linux by Dave Taht and Eric Dumazet
+ */
+
+/* Qdiscs using codel plugin must use codel_skb_cb in their own cb[] */
+struct codel_skb_cb {
+ codel_time_t enqueue_time;
+};
+
+static struct codel_skb_cb *get_codel_cb(const struct sk_buff *skb)
+{
+ qdisc_cb_private_validate(skb, sizeof(struct codel_skb_cb));
+ return (struct codel_skb_cb *)qdisc_skb_cb(skb)->data;
+}
+
+static codel_time_t codel_get_enqueue_time(const struct sk_buff *skb)
+{
+ return get_codel_cb(skb)->enqueue_time;
+}
+
+static void codel_set_enqueue_time(struct sk_buff *skb)
+{
+ get_codel_cb(skb)->enqueue_time = codel_get_time();
+}
+
+#endif
diff --git a/include/net/devlink.h b/include/net/devlink.h
index c37d257891d6..1d45b61cb320 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -24,6 +24,7 @@ struct devlink_ops;
struct devlink {
struct list_head list;
struct list_head port_list;
+ struct list_head sb_list;
const struct devlink_ops *ops;
struct device *dev;
possible_net_t _net;
@@ -42,6 +43,12 @@ struct devlink_port {
u32 split_group;
};
+struct devlink_sb_pool_info {
+ enum devlink_sb_pool_type pool_type;
+ u32 size;
+ enum devlink_sb_threshold_type threshold_type;
+};
+
struct devlink_ops {
size_t priv_size;
int (*port_type_set)(struct devlink_port *devlink_port,
@@ -49,6 +56,40 @@ struct devlink_ops {
int (*port_split)(struct devlink *devlink, unsigned int port_index,
unsigned int count);
int (*port_unsplit)(struct devlink *devlink, unsigned int port_index);
+ int (*sb_pool_get)(struct devlink *devlink, unsigned int sb_index,
+ u16 pool_index,
+ struct devlink_sb_pool_info *pool_info);
+ int (*sb_pool_set)(struct devlink *devlink, unsigned int sb_index,
+ u16 pool_index, u32 size,
+ enum devlink_sb_threshold_type threshold_type);
+ int (*sb_port_pool_get)(struct devlink_port *devlink_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_threshold);
+ int (*sb_port_pool_set)(struct devlink_port *devlink_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 threshold);
+ int (*sb_tc_pool_bind_get)(struct devlink_port *devlink_port,
+ unsigned int sb_index,
+ u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 *p_pool_index, u32 *p_threshold);
+ int (*sb_tc_pool_bind_set)(struct devlink_port *devlink_port,
+ unsigned int sb_index,
+ u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 pool_index, u32 threshold);
+ int (*sb_occ_snapshot)(struct devlink *devlink,
+ unsigned int sb_index);
+ int (*sb_occ_max_clear)(struct devlink *devlink,
+ unsigned int sb_index);
+ int (*sb_occ_port_pool_get)(struct devlink_port *devlink_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_cur, u32 *p_max);
+ int (*sb_occ_tc_port_bind_get)(struct devlink_port *devlink_port,
+ unsigned int sb_index,
+ u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u32 *p_cur, u32 *p_max);
};
static inline void *devlink_priv(struct devlink *devlink)
@@ -82,6 +123,11 @@ void devlink_port_type_ib_set(struct devlink_port *devlink_port,
void devlink_port_type_clear(struct devlink_port *devlink_port);
void devlink_port_split_set(struct devlink_port *devlink_port,
u32 split_group);
+int devlink_sb_register(struct devlink *devlink, unsigned int sb_index,
+ u32 size, u16 ingress_pools_count,
+ u16 egress_pools_count, u16 ingress_tc_count,
+ u16 egress_tc_count);
+void devlink_sb_unregister(struct devlink *devlink, unsigned int sb_index);
#else
@@ -135,6 +181,21 @@ static inline void devlink_port_split_set(struct devlink_port *devlink_port,
{
}
+static inline int devlink_sb_register(struct devlink *devlink,
+ unsigned int sb_index, u32 size,
+ u16 ingress_pools_count,
+ u16 egress_pools_count,
+ u16 ingress_tc_count,
+ u16 egress_tc_count)
+{
+ return 0;
+}
+
+static inline void devlink_sb_unregister(struct devlink *devlink,
+ unsigned int sb_index)
+{
+}
+
#endif
#endif /* _NET_DEVLINK_H_ */
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 6463bb2863ac..17c3d37b6779 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -16,7 +16,6 @@
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/phy.h>
#include <linux/phy_fixed.h>
#include <linux/ethtool.h>
@@ -65,13 +64,6 @@ struct dsa_chip_data {
* NULL if there is only one switch chip.
*/
s8 *rtable;
-
- /*
- * A switch may have a GPIO line tied to its reset pin. Parse
- * this from the device tree, and use it before performing
- * switch soft reset.
- */
- struct gpio_desc *reset;
};
struct dsa_platform_data {
@@ -111,6 +103,11 @@ struct dsa_switch_tree {
enum dsa_tag_protocol tag_protocol;
/*
+ * Original copy of the master netdev ethtool_ops
+ */
+ struct ethtool_ops master_ethtool_ops;
+
+ /*
* The switch and port to which the CPU is attached.
*/
s8 cpu_switch;
@@ -123,6 +120,8 @@ struct dsa_switch_tree {
};
struct dsa_switch {
+ struct device *dev;
+
/*
* Parent switch tree, and switch index.
*/
@@ -130,25 +129,21 @@ struct dsa_switch {
int index;
/*
- * Tagging protocol understood by this switch
+ * Give the switch driver somewhere to hang its private data
+ * structure.
*/
- enum dsa_tag_protocol tag_protocol;
+ void *priv;
/*
* Configuration data for this switch.
*/
- struct dsa_chip_data *pd;
+ struct dsa_chip_data *cd;
/*
* The used switch driver.
*/
struct dsa_switch_driver *drv;
- /*
- * Reference to host device to use.
- */
- struct device *master_dev;
-
#ifdef CONFIG_NET_DSA_HWMON
/*
* Hardware monitoring information
@@ -161,7 +156,7 @@ struct dsa_switch {
* Slave mii_bus and devices for the individual ports.
*/
u32 dsa_port_mask;
- u32 phys_port_mask;
+ u32 enabled_port_mask;
u32 phys_mii_mask;
struct mii_bus *slave_mii_bus;
struct net_device *ports[DSA_MAX_PORTS];
@@ -179,7 +174,7 @@ static inline bool dsa_is_dsa_port(struct dsa_switch *ds, int p)
static inline bool dsa_is_port_initialized(struct dsa_switch *ds, int p)
{
- return ds->phys_port_mask & (1 << p) && ds->ports[p];
+ return ds->enabled_port_mask & (1 << p) && ds->ports[p];
}
static inline u8 dsa_upstream_port(struct dsa_switch *ds)
@@ -195,7 +190,7 @@ static inline u8 dsa_upstream_port(struct dsa_switch *ds)
if (dst->cpu_switch == ds->index)
return dst->cpu_port;
else
- return ds->pd->rtable[dst->cpu_switch];
+ return ds->cd->rtable[dst->cpu_switch];
}
struct switchdev_trans;
@@ -207,12 +202,13 @@ struct dsa_switch_driver {
struct list_head list;
enum dsa_tag_protocol tag_protocol;
- int priv_size;
/*
* Probing and setup.
*/
- char *(*probe)(struct device *host_dev, int sw_addr);
+ const char *(*probe)(struct device *dsa_dev,
+ struct device *host_dev, int sw_addr,
+ void **priv);
int (*setup)(struct dsa_switch *ds);
int (*set_addr)(struct dsa_switch *ds, u8 *addr);
u32 (*get_phy_flags)(struct dsa_switch *ds, int port);
@@ -299,8 +295,8 @@ struct dsa_switch_driver {
int (*port_bridge_join)(struct dsa_switch *ds, int port,
struct net_device *bridge);
void (*port_bridge_leave)(struct dsa_switch *ds, int port);
- int (*port_stp_update)(struct dsa_switch *ds, int port,
- u8 state);
+ void (*port_stp_state_set)(struct dsa_switch *ds, int port,
+ u8 state);
/*
* VLAN support
@@ -310,7 +306,7 @@ struct dsa_switch_driver {
int (*port_vlan_prepare)(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct switchdev_trans *trans);
- int (*port_vlan_add)(struct dsa_switch *ds, int port,
+ void (*port_vlan_add)(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct switchdev_trans *trans);
int (*port_vlan_del)(struct dsa_switch *ds, int port,
@@ -325,7 +321,7 @@ struct dsa_switch_driver {
int (*port_fdb_prepare)(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_fdb *fdb,
struct switchdev_trans *trans);
- int (*port_fdb_add)(struct dsa_switch *ds, int port,
+ void (*port_fdb_add)(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_fdb *fdb,
struct switchdev_trans *trans);
int (*port_fdb_del)(struct dsa_switch *ds, int port,
@@ -341,7 +337,7 @@ struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev);
static inline void *ds_to_priv(struct dsa_switch *ds)
{
- return (void *)(ds + 1);
+ return ds->priv;
}
static inline bool dsa_uses_tagged_protocol(struct dsa_switch_tree *dst)
diff --git a/include/net/dst.h b/include/net/dst.h
index 5c98443c1c9e..6835d224d47b 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -85,12 +85,11 @@ struct dst_entry {
#endif
#ifdef CONFIG_64BIT
- struct lwtunnel_state *lwtstate;
/*
* Align __refcnt to a 64 bytes alignment
* (L1_CACHE_SIZE would be too much)
*/
- long __pad_to_align_refcnt[1];
+ long __pad_to_align_refcnt[2];
#endif
/*
* __refcnt wants to be on a different cache line from
@@ -99,9 +98,7 @@ struct dst_entry {
atomic_t __refcnt; /* client references */
int __use;
unsigned long lastuse;
-#ifndef CONFIG_64BIT
struct lwtunnel_state *lwtstate;
-#endif
union {
struct dst_entry *next;
struct rtable __rcu *rt_next;
diff --git a/include/net/fou.h b/include/net/fou.h
index 19b8a0c62a98..f5cc6910a27e 100644
--- a/include/net/fou.h
+++ b/include/net/fou.h
@@ -9,11 +9,11 @@
#include <net/udp.h>
size_t fou_encap_hlen(struct ip_tunnel_encap *e);
-static size_t gue_encap_hlen(struct ip_tunnel_encap *e);
+size_t gue_encap_hlen(struct ip_tunnel_encap *e);
-int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
- u8 *protocol, struct flowi4 *fl4);
-int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
- u8 *protocol, struct flowi4 *fl4);
+int __fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
+ u8 *protocol, __be16 *sport, int type);
+int __gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
+ u8 *protocol, __be16 *sport, int type);
#endif
diff --git a/include/net/fq.h b/include/net/fq.h
new file mode 100644
index 000000000000..268b49049c37
--- /dev/null
+++ b/include/net/fq.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2016 Qualcomm Atheros, Inc
+ *
+ * GPL v2
+ *
+ * Based on net/sched/sch_fq_codel.c
+ */
+#ifndef __NET_SCHED_FQ_H
+#define __NET_SCHED_FQ_H
+
+struct fq_tin;
+
+/**
+ * struct fq_flow - per traffic flow queue
+ *
+ * @tin: owner of this flow. Used to manage collisions, i.e. when a packet
+ * hashes to an index which points to a flow that is already owned by a
+ * different tin the packet is destined to. In such case the implementer
+ * must provide a fallback flow
+ * @flowchain: can be linked to fq_tin's new_flows or old_flows. Used for DRR++
+ * (deficit round robin) based round robin queuing similar to the one
+ * found in net/sched/sch_fq_codel.c
+ * @backlogchain: can be linked to other fq_flow and fq. Used to keep track of
+ * fat flows and efficient head-dropping if packet limit is reached
+ * @queue: sk_buff queue to hold packets
+ * @backlog: number of bytes pending in the queue. The number of packets can be
+ * found in @queue.qlen
+ * @deficit: used for DRR++
+ */
+struct fq_flow {
+ struct fq_tin *tin;
+ struct list_head flowchain;
+ struct list_head backlogchain;
+ struct sk_buff_head queue;
+ u32 backlog;
+ int deficit;
+};
+
+/**
+ * struct fq_tin - a logical container of fq_flows
+ *
+ * Used to group fq_flows into a logical aggregate. DRR++ scheme is used to
+ * pull interleaved packets out of the associated flows.
+ *
+ * @new_flows: linked list of fq_flow
+ * @old_flows: linked list of fq_flow
+ */
+struct fq_tin {
+ struct list_head new_flows;
+ struct list_head old_flows;
+ u32 backlog_bytes;
+ u32 backlog_packets;
+ u32 overlimit;
+ u32 collisions;
+ u32 flows;
+ u32 tx_bytes;
+ u32 tx_packets;
+};
+
+/**
+ * struct fq - main container for fair queuing purposes
+ *
+ * @backlogs: linked to fq_flows. Used to maintain fat flows for efficient
+ * head-dropping when @backlog reaches @limit
+ * @limit: max number of packets that can be queued across all flows
+ * @backlog: number of packets queued across all flows
+ */
+struct fq {
+ struct fq_flow *flows;
+ struct list_head backlogs;
+ spinlock_t lock;
+ u32 flows_cnt;
+ u32 perturbation;
+ u32 limit;
+ u32 quantum;
+ u32 backlog;
+ u32 overlimit;
+ u32 collisions;
+};
+
+typedef struct sk_buff *fq_tin_dequeue_t(struct fq *,
+ struct fq_tin *,
+ struct fq_flow *flow);
+
+typedef void fq_skb_free_t(struct fq *,
+ struct fq_tin *,
+ struct fq_flow *,
+ struct sk_buff *);
+
+typedef struct fq_flow *fq_flow_get_default_t(struct fq *,
+ struct fq_tin *,
+ int idx,
+ struct sk_buff *);
+
+#endif
diff --git a/include/net/fq_impl.h b/include/net/fq_impl.h
new file mode 100644
index 000000000000..163f3ed0f05a
--- /dev/null
+++ b/include/net/fq_impl.h
@@ -0,0 +1,277 @@
+/*
+ * Copyright (c) 2016 Qualcomm Atheros, Inc
+ *
+ * GPL v2
+ *
+ * Based on net/sched/sch_fq_codel.c
+ */
+#ifndef __NET_SCHED_FQ_IMPL_H
+#define __NET_SCHED_FQ_IMPL_H
+
+#include <net/fq.h>
+
+/* functions that are embedded into includer */
+
+static struct sk_buff *fq_flow_dequeue(struct fq *fq,
+ struct fq_flow *flow)
+{
+ struct fq_tin *tin = flow->tin;
+ struct fq_flow *i;
+ struct sk_buff *skb;
+
+ lockdep_assert_held(&fq->lock);
+
+ skb = __skb_dequeue(&flow->queue);
+ if (!skb)
+ return NULL;
+
+ tin->backlog_bytes -= skb->len;
+ tin->backlog_packets--;
+ flow->backlog -= skb->len;
+ fq->backlog--;
+
+ if (flow->backlog == 0) {
+ list_del_init(&flow->backlogchain);
+ } else {
+ i = flow;
+
+ list_for_each_entry_continue(i, &fq->backlogs, backlogchain)
+ if (i->backlog < flow->backlog)
+ break;
+
+ list_move_tail(&flow->backlogchain,
+ &i->backlogchain);
+ }
+
+ return skb;
+}
+
+static struct sk_buff *fq_tin_dequeue(struct fq *fq,
+ struct fq_tin *tin,
+ fq_tin_dequeue_t dequeue_func)
+{
+ struct fq_flow *flow;
+ struct list_head *head;
+ struct sk_buff *skb;
+
+ lockdep_assert_held(&fq->lock);
+
+begin:
+ head = &tin->new_flows;
+ if (list_empty(head)) {
+ head = &tin->old_flows;
+ if (list_empty(head))
+ return NULL;
+ }
+
+ flow = list_first_entry(head, struct fq_flow, flowchain);
+
+ if (flow->deficit <= 0) {
+ flow->deficit += fq->quantum;
+ list_move_tail(&flow->flowchain,
+ &tin->old_flows);
+ goto begin;
+ }
+
+ skb = dequeue_func(fq, tin, flow);
+ if (!skb) {
+ /* force a pass through old_flows to prevent starvation */
+ if ((head == &tin->new_flows) &&
+ !list_empty(&tin->old_flows)) {
+ list_move_tail(&flow->flowchain, &tin->old_flows);
+ } else {
+ list_del_init(&flow->flowchain);
+ flow->tin = NULL;
+ }
+ goto begin;
+ }
+
+ flow->deficit -= skb->len;
+ tin->tx_bytes += skb->len;
+ tin->tx_packets++;
+
+ return skb;
+}
+
+static struct fq_flow *fq_flow_classify(struct fq *fq,
+ struct fq_tin *tin,
+ struct sk_buff *skb,
+ fq_flow_get_default_t get_default_func)
+{
+ struct fq_flow *flow;
+ u32 hash;
+ u32 idx;
+
+ lockdep_assert_held(&fq->lock);
+
+ hash = skb_get_hash_perturb(skb, fq->perturbation);
+ idx = reciprocal_scale(hash, fq->flows_cnt);
+ flow = &fq->flows[idx];
+
+ if (flow->tin && flow->tin != tin) {
+ flow = get_default_func(fq, tin, idx, skb);
+ tin->collisions++;
+ fq->collisions++;
+ }
+
+ if (!flow->tin)
+ tin->flows++;
+
+ return flow;
+}
+
+static void fq_recalc_backlog(struct fq *fq,
+ struct fq_tin *tin,
+ struct fq_flow *flow)
+{
+ struct fq_flow *i;
+
+ if (list_empty(&flow->backlogchain))
+ list_add_tail(&flow->backlogchain, &fq->backlogs);
+
+ i = flow;
+ list_for_each_entry_continue_reverse(i, &fq->backlogs,
+ backlogchain)
+ if (i->backlog > flow->backlog)
+ break;
+
+ list_move(&flow->backlogchain, &i->backlogchain);
+}
+
+static void fq_tin_enqueue(struct fq *fq,
+ struct fq_tin *tin,
+ struct sk_buff *skb,
+ fq_skb_free_t free_func,
+ fq_flow_get_default_t get_default_func)
+{
+ struct fq_flow *flow;
+
+ lockdep_assert_held(&fq->lock);
+
+ flow = fq_flow_classify(fq, tin, skb, get_default_func);
+
+ flow->tin = tin;
+ flow->backlog += skb->len;
+ tin->backlog_bytes += skb->len;
+ tin->backlog_packets++;
+ fq->backlog++;
+
+ fq_recalc_backlog(fq, tin, flow);
+
+ if (list_empty(&flow->flowchain)) {
+ flow->deficit = fq->quantum;
+ list_add_tail(&flow->flowchain,
+ &tin->new_flows);
+ }
+
+ __skb_queue_tail(&flow->queue, skb);
+
+ if (fq->backlog > fq->limit) {
+ flow = list_first_entry_or_null(&fq->backlogs,
+ struct fq_flow,
+ backlogchain);
+ if (!flow)
+ return;
+
+ skb = fq_flow_dequeue(fq, flow);
+ if (!skb)
+ return;
+
+ free_func(fq, flow->tin, flow, skb);
+
+ flow->tin->overlimit++;
+ fq->overlimit++;
+ }
+}
+
+static void fq_flow_reset(struct fq *fq,
+ struct fq_flow *flow,
+ fq_skb_free_t free_func)
+{
+ struct sk_buff *skb;
+
+ while ((skb = fq_flow_dequeue(fq, flow)))
+ free_func(fq, flow->tin, flow, skb);
+
+ if (!list_empty(&flow->flowchain))
+ list_del_init(&flow->flowchain);
+
+ if (!list_empty(&flow->backlogchain))
+ list_del_init(&flow->backlogchain);
+
+ flow->tin = NULL;
+
+ WARN_ON_ONCE(flow->backlog);
+}
+
+static void fq_tin_reset(struct fq *fq,
+ struct fq_tin *tin,
+ fq_skb_free_t free_func)
+{
+ struct list_head *head;
+ struct fq_flow *flow;
+
+ for (;;) {
+ head = &tin->new_flows;
+ if (list_empty(head)) {
+ head = &tin->old_flows;
+ if (list_empty(head))
+ break;
+ }
+
+ flow = list_first_entry(head, struct fq_flow, flowchain);
+ fq_flow_reset(fq, flow, free_func);
+ }
+
+ WARN_ON_ONCE(tin->backlog_bytes);
+ WARN_ON_ONCE(tin->backlog_packets);
+}
+
+static void fq_flow_init(struct fq_flow *flow)
+{
+ INIT_LIST_HEAD(&flow->flowchain);
+ INIT_LIST_HEAD(&flow->backlogchain);
+ __skb_queue_head_init(&flow->queue);
+}
+
+static void fq_tin_init(struct fq_tin *tin)
+{
+ INIT_LIST_HEAD(&tin->new_flows);
+ INIT_LIST_HEAD(&tin->old_flows);
+}
+
+static int fq_init(struct fq *fq, int flows_cnt)
+{
+ int i;
+
+ memset(fq, 0, sizeof(fq[0]));
+ INIT_LIST_HEAD(&fq->backlogs);
+ spin_lock_init(&fq->lock);
+ fq->flows_cnt = max_t(u32, flows_cnt, 1);
+ fq->perturbation = prandom_u32();
+ fq->quantum = 300;
+ fq->limit = 8192;
+
+ fq->flows = kcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL);
+ if (!fq->flows)
+ return -ENOMEM;
+
+ for (i = 0; i < fq->flows_cnt; i++)
+ fq_flow_init(&fq->flows[i]);
+
+ return 0;
+}
+
+static void fq_reset(struct fq *fq,
+ fq_skb_free_t free_func)
+{
+ int i;
+
+ for (i = 0; i < fq->flows_cnt; i++)
+ fq_flow_reset(fq, &fq->flows[i], free_func);
+
+ kfree(fq->flows);
+ fq->flows = NULL;
+}
+
+#endif
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
index cbafa3768d48..610cd397890e 100644
--- a/include/net/gen_stats.h
+++ b/include/net/gen_stats.h
@@ -19,17 +19,19 @@ struct gnet_dump {
/* Backward compatibility */
int compat_tc_stats;
int compat_xstats;
+ int padattr;
void * xstats;
int xstats_len;
struct tc_stats tc_stats;
};
int gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
- struct gnet_dump *d);
+ struct gnet_dump *d, int padattr);
int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
int tc_stats_type, int xstats_type,
- spinlock_t *lock, struct gnet_dump *d);
+ spinlock_t *lock, struct gnet_dump *d,
+ int padattr);
int gnet_stats_copy_basic(struct gnet_dump *d,
struct gnet_stats_basic_cpu __percpu *cpu,
diff --git a/include/net/geneve.h b/include/net/geneve.h
index e6c23dc765f7..cb544a530146 100644
--- a/include/net/geneve.h
+++ b/include/net/geneve.h
@@ -62,13 +62,11 @@ struct genevehdr {
struct geneve_opt options[];
};
-#if IS_ENABLED(CONFIG_GENEVE)
-void geneve_get_rx_port(struct net_device *netdev);
-#else
static inline void geneve_get_rx_port(struct net_device *netdev)
{
+ ASSERT_RTNL();
+ call_netdevice_notifiers(NETDEV_OFFLOAD_PUSH_GENEVE, netdev);
}
-#endif
#ifdef CONFIG_INET
struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
diff --git a/include/net/gre.h b/include/net/gre.h
index 97eafdc47eea..5dce30a6abe3 100644
--- a/include/net/gre.h
+++ b/include/net/gre.h
@@ -25,4 +25,108 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version);
struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
u8 name_assign_type);
+int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
+ bool *csum_err, __be16 proto);
+
+static inline int gre_calc_hlen(__be16 o_flags)
+{
+ int addend = 4;
+
+ if (o_flags & TUNNEL_CSUM)
+ addend += 4;
+ if (o_flags & TUNNEL_KEY)
+ addend += 4;
+ if (o_flags & TUNNEL_SEQ)
+ addend += 4;
+ return addend;
+}
+
+static inline __be16 gre_flags_to_tnl_flags(__be16 flags)
+{
+ __be16 tflags = 0;
+
+ if (flags & GRE_CSUM)
+ tflags |= TUNNEL_CSUM;
+ if (flags & GRE_ROUTING)
+ tflags |= TUNNEL_ROUTING;
+ if (flags & GRE_KEY)
+ tflags |= TUNNEL_KEY;
+ if (flags & GRE_SEQ)
+ tflags |= TUNNEL_SEQ;
+ if (flags & GRE_STRICT)
+ tflags |= TUNNEL_STRICT;
+ if (flags & GRE_REC)
+ tflags |= TUNNEL_REC;
+ if (flags & GRE_VERSION)
+ tflags |= TUNNEL_VERSION;
+
+ return tflags;
+}
+
+static inline __be16 gre_tnl_flags_to_gre_flags(__be16 tflags)
+{
+ __be16 flags = 0;
+
+ if (tflags & TUNNEL_CSUM)
+ flags |= GRE_CSUM;
+ if (tflags & TUNNEL_ROUTING)
+ flags |= GRE_ROUTING;
+ if (tflags & TUNNEL_KEY)
+ flags |= GRE_KEY;
+ if (tflags & TUNNEL_SEQ)
+ flags |= GRE_SEQ;
+ if (tflags & TUNNEL_STRICT)
+ flags |= GRE_STRICT;
+ if (tflags & TUNNEL_REC)
+ flags |= GRE_REC;
+ if (tflags & TUNNEL_VERSION)
+ flags |= GRE_VERSION;
+
+ return flags;
+}
+
+static inline __sum16 gre_checksum(struct sk_buff *skb)
+{
+ __wsum csum;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ csum = lco_csum(skb);
+ else
+ csum = skb_checksum(skb, 0, skb->len, 0);
+ return csum_fold(csum);
+}
+
+static inline void gre_build_header(struct sk_buff *skb, int hdr_len,
+ __be16 flags, __be16 proto,
+ __be32 key, __be32 seq)
+{
+ struct gre_base_hdr *greh;
+
+ skb_push(skb, hdr_len);
+
+ skb_reset_transport_header(skb);
+ greh = (struct gre_base_hdr *)skb->data;
+ greh->flags = gre_tnl_flags_to_gre_flags(flags);
+ greh->protocol = proto;
+
+ if (flags & (TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_SEQ)) {
+ __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
+
+ if (flags & TUNNEL_SEQ) {
+ *ptr = seq;
+ ptr--;
+ }
+ if (flags & TUNNEL_KEY) {
+ *ptr = key;
+ ptr--;
+ }
+ if (flags & TUNNEL_CSUM &&
+ !(skb_shinfo(skb)->gso_type &
+ (SKB_GSO_GRE | SKB_GSO_GRE_CSUM))) {
+ *ptr = 0;
+ *(__sum16 *)ptr = gre_checksum(skb);
+ }
+ }
+}
+
#endif
diff --git a/include/net/gtp.h b/include/net/gtp.h
new file mode 100644
index 000000000000..894a37b87d63
--- /dev/null
+++ b/include/net/gtp.h
@@ -0,0 +1,34 @@
+#ifndef _GTP_H_
+#define _GTP_H
+
+/* General GTP protocol related definitions. */
+
+#define GTP0_PORT 3386
+#define GTP1U_PORT 2152
+
+#define GTP_TPDU 255
+
+struct gtp0_header { /* According to GSM TS 09.60. */
+ __u8 flags;
+ __u8 type;
+ __be16 length;
+ __be16 seq;
+ __be16 flow;
+ __u8 number;
+ __u8 spare[3];
+ __be64 tid;
+} __attribute__ ((packed));
+
+struct gtp1_header { /* According to 3GPP TS 29.060. */
+ __u8 flags;
+ __u8 type;
+ __be16 length;
+ __be32 tid;
+} __attribute__ ((packed));
+
+#define GTP1_F_NPDU 0x01
+#define GTP1_F_SEQ 0x02
+#define GTP1_F_EXTHDR 0x04
+#define GTP1_F_MASK 0x07
+
+#endif
diff --git a/include/net/icmp.h b/include/net/icmp.h
index 970028e13382..3ef2743a8eec 100644
--- a/include/net/icmp.h
+++ b/include/net/icmp.h
@@ -30,9 +30,9 @@ struct icmp_err {
extern const struct icmp_err icmp_err_convert[];
#define ICMP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.icmp_statistics, field)
-#define ICMP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.icmp_statistics, field)
+#define __ICMP_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.icmp_statistics, field)
#define ICMPMSGOUT_INC_STATS(net, field) SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field+256)
-#define ICMPMSGIN_INC_STATS_BH(net, field) SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field)
+#define ICMPMSGIN_INC_STATS(net, field) SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field)
struct dst_entry;
struct net_proto_family;
diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
index 28332bdac333..b87becacd9d3 100644
--- a/include/net/inet6_hashtables.h
+++ b/include/net/inet6_hashtables.h
@@ -66,13 +66,15 @@ static inline struct sock *__inet6_lookup(struct net *net,
const __be16 sport,
const struct in6_addr *daddr,
const u16 hnum,
- const int dif)
+ const int dif,
+ bool *refcounted)
{
struct sock *sk = __inet6_lookup_established(net, hashinfo, saddr,
sport, daddr, hnum, dif);
+ *refcounted = true;
if (sk)
return sk;
-
+ *refcounted = false;
return inet6_lookup_listener(net, hashinfo, skb, doff, saddr, sport,
daddr, hnum, dif);
}
@@ -81,17 +83,19 @@ static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
const __be16 sport,
const __be16 dport,
- int iif)
+ int iif,
+ bool *refcounted)
{
struct sock *sk = skb_steal_sock(skb);
+ *refcounted = true;
if (sk)
return sk;
return __inet6_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb,
doff, &ipv6_hdr(skb)->saddr, sport,
&ipv6_hdr(skb)->daddr, ntohs(dport),
- iif);
+ iif, refcounted);
}
struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo,
diff --git a/include/net/inet_common.h b/include/net/inet_common.h
index 109e3ee9108c..5d683428fced 100644
--- a/include/net/inet_common.h
+++ b/include/net/inet_common.h
@@ -39,6 +39,11 @@ int inet_ctl_sock_create(struct sock **sk, unsigned short family,
int inet_recv_error(struct sock *sk, struct msghdr *msg, int len,
int *addr_len);
+struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb);
+int inet_gro_complete(struct sk_buff *skb, int nhoff);
+struct sk_buff *inet_gso_segment(struct sk_buff *skb,
+ netdev_features_t features);
+
static inline void inet_ctl_sock_destroy(struct sock *sk)
{
if (sk)
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 50f635c2c536..0574493e3899 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -100,14 +100,10 @@ struct inet_bind_hashbucket {
/*
* Sockets can be hashed in established or listening table
- * We must use different 'nulls' end-of-chain value for listening
- * hash table, or we might find a socket that was closed and
- * reallocated/inserted into established hash table
*/
-#define LISTENING_NULLS_BASE (1U << 29)
struct inet_listen_hashbucket {
spinlock_t lock;
- struct hlist_nulls_head head;
+ struct hlist_head head;
};
/* This is for listening sockets, thus all sockets which possess wildcards. */
@@ -280,11 +276,8 @@ static inline struct sock *inet_lookup_listener(struct net *net,
net_eq(sock_net(__sk), (__net)))
#endif /* 64-bit arch */
-/*
- * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need
+/* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need
* not check it for lookups anymore, thanks Alexey. -DaveM
- *
- * Local BH must be disabled here.
*/
struct sock *__inet_lookup_established(struct net *net,
struct inet_hashinfo *hashinfo,
@@ -307,14 +300,20 @@ static inline struct sock *__inet_lookup(struct net *net,
struct sk_buff *skb, int doff,
const __be32 saddr, const __be16 sport,
const __be32 daddr, const __be16 dport,
- const int dif)
+ const int dif,
+ bool *refcounted)
{
u16 hnum = ntohs(dport);
- struct sock *sk = __inet_lookup_established(net, hashinfo,
- saddr, sport, daddr, hnum, dif);
+ struct sock *sk;
- return sk ? : __inet_lookup_listener(net, hashinfo, skb, doff, saddr,
- sport, daddr, hnum, dif);
+ sk = __inet_lookup_established(net, hashinfo, saddr, sport,
+ daddr, hnum, dif);
+ *refcounted = true;
+ if (sk)
+ return sk;
+ *refcounted = false;
+ return __inet_lookup_listener(net, hashinfo, skb, doff, saddr,
+ sport, daddr, hnum, dif);
}
static inline struct sock *inet_lookup(struct net *net,
@@ -325,12 +324,13 @@ static inline struct sock *inet_lookup(struct net *net,
const int dif)
{
struct sock *sk;
+ bool refcounted;
- local_bh_disable();
sk = __inet_lookup(net, hashinfo, skb, doff, saddr, sport, daddr,
- dport, dif);
- local_bh_enable();
+ dport, dif, &refcounted);
+ if (sk && !refcounted && !atomic_inc_not_zero(&sk->sk_refcnt))
+ sk = NULL;
return sk;
}
@@ -338,17 +338,20 @@ static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
struct sk_buff *skb,
int doff,
const __be16 sport,
- const __be16 dport)
+ const __be16 dport,
+ bool *refcounted)
{
struct sock *sk = skb_steal_sock(skb);
const struct iphdr *iph = ip_hdr(skb);
+ *refcounted = true;
if (sk)
return sk;
- else
- return __inet_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb,
- doff, iph->saddr, sport,
- iph->daddr, dport, inet_iif(skb));
+
+ return __inet_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb,
+ doff, iph->saddr, sport,
+ iph->daddr, dport, inet_iif(skb),
+ refcounted);
}
u32 sk_ehashfn(const struct sock *sk);
diff --git a/include/net/ip.h b/include/net/ip.h
index fad74d323bd6..37165fba3741 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -36,6 +36,7 @@
struct sock;
struct inet_skb_parm {
+ int iif;
struct ip_options opt; /* Compiled IP options */
unsigned char flags;
@@ -56,6 +57,7 @@ static inline unsigned int ip_hdrlen(const struct sk_buff *skb)
}
struct ipcm_cookie {
+ struct sockcm_cookie sockc;
__be32 addr;
int oif;
struct ip_options_rcu *opt;
@@ -186,17 +188,15 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
unsigned int len);
#define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field)
-#define IP_INC_STATS_BH(net, field) SNMP_INC_STATS64_BH((net)->mib.ip_statistics, field)
+#define __IP_INC_STATS(net, field) __SNMP_INC_STATS64((net)->mib.ip_statistics, field)
#define IP_ADD_STATS(net, field, val) SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
-#define IP_ADD_STATS_BH(net, field, val) SNMP_ADD_STATS64_BH((net)->mib.ip_statistics, field, val)
+#define __IP_ADD_STATS(net, field, val) __SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
#define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
-#define IP_UPD_PO_STATS_BH(net, field, val) SNMP_UPD_PO_STATS64_BH((net)->mib.ip_statistics, field, val)
+#define __IP_UPD_PO_STATS(net, field, val) __SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
#define NET_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.net_statistics, field)
-#define NET_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.net_statistics, field)
-#define NET_INC_STATS_USER(net, field) SNMP_INC_STATS_USER((net)->mib.net_statistics, field)
+#define __NET_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.net_statistics, field)
#define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
-#define NET_ADD_STATS_BH(net, field, adnd) SNMP_ADD_STATS_BH((net)->mib.net_statistics, field, adnd)
-#define NET_ADD_STATS_USER(net, field, adnd) SNMP_ADD_STATS_USER((net)->mib.net_statistics, field, adnd)
+#define __NET_ADD_STATS(net, field, adnd) __SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct);
unsigned long snmp_fold_field(void __percpu *mib, int offt);
@@ -550,7 +550,7 @@ int ip_options_rcv_srr(struct sk_buff *skb);
void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, int offset);
-int ip_cmsg_send(struct net *net, struct msghdr *msg,
+int ip_cmsg_send(struct sock *sk, struct msghdr *msg,
struct ipcm_cookie *ipc, bool allow_ipv6);
int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
unsigned int optlen);
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index 499a707765ea..43a5a0e4524c 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -42,6 +42,7 @@ struct ip6_tnl {
struct __ip6_tnl_parm parms; /* tunnel configuration parameters */
struct flowi fl; /* flowi template for xmit */
struct dst_cache dst_cache; /* cached dst */
+ struct gro_cells gro_cells;
int err_count;
unsigned long err_time;
@@ -49,10 +50,72 @@ struct ip6_tnl {
/* These fields used only by GRE */
__u32 i_seqno; /* The last seen seqno */
__u32 o_seqno; /* The last output seqno */
- int hlen; /* Precalculated GRE header length */
+ int hlen; /* tun_hlen + encap_hlen */
+ int tun_hlen; /* Precalculated header length */
+ int encap_hlen; /* Encap header length (FOU,GUE) */
+ struct ip_tunnel_encap encap;
int mlink;
};
+struct ip6_tnl_encap_ops {
+ size_t (*encap_hlen)(struct ip_tunnel_encap *e);
+ int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e,
+ u8 *protocol, struct flowi6 *fl6);
+};
+
+#ifdef CONFIG_INET
+
+extern const struct ip6_tnl_encap_ops __rcu *
+ ip6tun_encaps[MAX_IPTUN_ENCAP_OPS];
+
+int ip6_tnl_encap_add_ops(const struct ip6_tnl_encap_ops *ops,
+ unsigned int num);
+int ip6_tnl_encap_del_ops(const struct ip6_tnl_encap_ops *ops,
+ unsigned int num);
+int ip6_tnl_encap_setup(struct ip6_tnl *t,
+ struct ip_tunnel_encap *ipencap);
+
+static inline int ip6_encap_hlen(struct ip_tunnel_encap *e)
+{
+ const struct ip6_tnl_encap_ops *ops;
+ int hlen = -EINVAL;
+
+ if (e->type == TUNNEL_ENCAP_NONE)
+ return 0;
+
+ if (e->type >= MAX_IPTUN_ENCAP_OPS)
+ return -EINVAL;
+
+ rcu_read_lock();
+ ops = rcu_dereference(ip6tun_encaps[e->type]);
+ if (likely(ops && ops->encap_hlen))
+ hlen = ops->encap_hlen(e);
+ rcu_read_unlock();
+
+ return hlen;
+}
+
+static inline int ip6_tnl_encap(struct sk_buff *skb, struct ip6_tnl *t,
+ u8 *protocol, struct flowi6 *fl6)
+{
+ const struct ip6_tnl_encap_ops *ops;
+ int ret = -EINVAL;
+
+ if (t->encap.type == TUNNEL_ENCAP_NONE)
+ return 0;
+
+ if (t->encap.type >= MAX_IPTUN_ENCAP_OPS)
+ return -EINVAL;
+
+ rcu_read_lock();
+ ops = rcu_dereference(ip6tun_encaps[t->encap.type]);
+ if (likely(ops && ops->build_header))
+ ret = ops->build_header(skb, &t->encap, protocol, fl6);
+ rcu_read_unlock();
+
+ return ret;
+}
+
/* Tunnel encapsulation limit destination sub-option */
struct ipv6_tlv_tnl_enc_lim {
@@ -63,15 +126,20 @@ struct ipv6_tlv_tnl_enc_lim {
int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
const struct in6_addr *raddr);
+int ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
+ const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst,
+ bool log_ecn_error);
int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
const struct in6_addr *raddr);
+int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
+ struct flowi6 *fl6, int encap_limit, __u32 *pmtu, __u8 proto);
__u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw);
__u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr,
const struct in6_addr *raddr);
struct net *ip6_tnl_get_link_net(const struct net_device *dev);
int ip6_tnl_get_iflink(const struct net_device *dev);
+int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu);
-#ifdef CONFIG_INET
static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
struct net_device *dev)
{
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 56050f913339..dbf444428437 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -105,24 +105,23 @@ struct ip_tunnel {
struct net_device *dev;
struct net *net; /* netns for packet i/o */
- int err_count; /* Number of arrived ICMP errors */
unsigned long err_time; /* Time when the last ICMP error
* arrived */
+ int err_count; /* Number of arrived ICMP errors */
/* These four fields used only by GRE */
u32 i_seqno; /* The last seen seqno */
u32 o_seqno; /* The last output seqno */
int tun_hlen; /* Precalculated header length */
- int mlink;
struct dst_cache dst_cache;
struct ip_tunnel_parm parms;
+ int mlink;
int encap_hlen; /* Encap header length (FOU,GUE) */
- struct ip_tunnel_encap encap;
-
int hlen; /* tun_hlen + encap_hlen */
+ struct ip_tunnel_encap encap;
/* for SIT */
#ifdef CONFIG_IPV6_SIT_6RD
@@ -161,6 +160,7 @@ struct tnl_ptk_info {
#define PACKET_RCVD 0
#define PACKET_REJECT 1
+#define PACKET_NEXT 2
#define IP_TNL_HASH_BITS 7
#define IP_TNL_HASH_SIZE (1 << IP_TNL_HASH_BITS)
@@ -171,22 +171,6 @@ struct ip_tunnel_net {
struct ip_tunnel __rcu *collect_md_tun;
};
-struct ip_tunnel_encap_ops {
- size_t (*encap_hlen)(struct ip_tunnel_encap *e);
- int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e,
- u8 *protocol, struct flowi4 *fl4);
-};
-
-#define MAX_IPTUN_ENCAP_OPS 8
-
-extern const struct ip_tunnel_encap_ops __rcu *
- iptun_encaps[MAX_IPTUN_ENCAP_OPS];
-
-int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops *op,
- unsigned int num);
-int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op,
- unsigned int num);
-
static inline void ip_tunnel_key_init(struct ip_tunnel_key *key,
__be32 saddr, __be32 daddr,
u8 tos, u8 ttl, __be32 label,
@@ -251,8 +235,6 @@ void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops);
void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *tnl_params, const u8 protocol);
int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
-int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
- u8 *protocol, struct flowi4 *fl4);
int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict);
int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
@@ -271,9 +253,67 @@ int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
struct ip_tunnel_parm *p);
void ip_tunnel_setup(struct net_device *dev, int net_id);
+
+struct ip_tunnel_encap_ops {
+ size_t (*encap_hlen)(struct ip_tunnel_encap *e);
+ int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e,
+ u8 *protocol, struct flowi4 *fl4);
+};
+
+#define MAX_IPTUN_ENCAP_OPS 8
+
+extern const struct ip_tunnel_encap_ops __rcu *
+ iptun_encaps[MAX_IPTUN_ENCAP_OPS];
+
+int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops *op,
+ unsigned int num);
+int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op,
+ unsigned int num);
+
int ip_tunnel_encap_setup(struct ip_tunnel *t,
struct ip_tunnel_encap *ipencap);
+static inline int ip_encap_hlen(struct ip_tunnel_encap *e)
+{
+ const struct ip_tunnel_encap_ops *ops;
+ int hlen = -EINVAL;
+
+ if (e->type == TUNNEL_ENCAP_NONE)
+ return 0;
+
+ if (e->type >= MAX_IPTUN_ENCAP_OPS)
+ return -EINVAL;
+
+ rcu_read_lock();
+ ops = rcu_dereference(iptun_encaps[e->type]);
+ if (likely(ops && ops->encap_hlen))
+ hlen = ops->encap_hlen(e);
+ rcu_read_unlock();
+
+ return hlen;
+}
+
+static inline int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
+ u8 *protocol, struct flowi4 *fl4)
+{
+ const struct ip_tunnel_encap_ops *ops;
+ int ret = -EINVAL;
+
+ if (t->encap.type == TUNNEL_ENCAP_NONE)
+ return 0;
+
+ if (t->encap.type >= MAX_IPTUN_ENCAP_OPS)
+ return -EINVAL;
+
+ rcu_read_lock();
+ ops = rcu_dereference(iptun_encaps[t->encap.type]);
+ if (likely(ops && ops->build_header))
+ ret = ops->build_header(skb, &t->encap, protocol, fl4);
+ rcu_read_unlock();
+
+ return ret;
+}
+
/* Extract dsfield from inner protocol */
static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph,
const struct sk_buff *skb)
@@ -295,15 +335,22 @@ static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph,
return INET_ECN_encapsulate(tos, inner);
}
-int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto,
- bool xnet);
+int __iptunnel_pull_header(struct sk_buff *skb, int hdr_len,
+ __be16 inner_proto, bool raw_proto, bool xnet);
+
+static inline int iptunnel_pull_header(struct sk_buff *skb, int hdr_len,
+ __be16 inner_proto, bool xnet)
+{
+ return __iptunnel_pull_header(skb, hdr_len, inner_proto, false, xnet);
+}
+
void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
__be32 src, __be32 dst, u8 proto,
u8 tos, u8 ttl, __be16 df, bool xnet);
struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
gfp_t flags);
-struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask);
+int iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask);
static inline int iptunnel_pull_offloads(struct sk_buff *skb)
{
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index a6cc576fd467..af4c10ebb241 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -731,6 +731,12 @@ struct ip_vs_pe {
u32 (*hashkey_raw)(const struct ip_vs_conn_param *p, u32 initval,
bool inverse);
int (*show_pe_data)(const struct ip_vs_conn *cp, char *buf);
+ /* create connections for real-server outgoing packets */
+ struct ip_vs_conn* (*conn_out)(struct ip_vs_service *svc,
+ struct ip_vs_dest *dest,
+ struct sk_buff *skb,
+ const struct ip_vs_iphdr *iph,
+ __be16 dport, __be16 cport);
};
/* The application module object (a.k.a. app incarnation) */
@@ -874,6 +880,7 @@ struct netns_ipvs {
/* Service counters */
atomic_t ftpsvc_counter;
atomic_t nullsvc_counter;
+ atomic_t conn_out_counter;
#ifdef CONFIG_SYSCTL
/* 1/rate drop and drop-entry variables */
@@ -1147,6 +1154,12 @@ static inline int sysctl_cache_bypass(struct netns_ipvs *ipvs)
*/
const char *ip_vs_proto_name(unsigned int proto);
void ip_vs_init_hash_table(struct list_head *table, int rows);
+struct ip_vs_conn *ip_vs_new_conn_out(struct ip_vs_service *svc,
+ struct ip_vs_dest *dest,
+ struct sk_buff *skb,
+ const struct ip_vs_iphdr *iph,
+ __be16 dport,
+ __be16 cport);
#define IP_VS_INIT_HASH_TABLE(t) ip_vs_init_hash_table((t), ARRAY_SIZE((t)))
#define IP_VS_APP_TYPE_FTP 1
@@ -1378,6 +1391,10 @@ ip_vs_service_find(struct netns_ipvs *ipvs, int af, __u32 fwmark, __u16 protocol
bool ip_vs_has_real_service(struct netns_ipvs *ipvs, int af, __u16 protocol,
const union nf_inet_addr *daddr, __be16 dport);
+struct ip_vs_dest *
+ip_vs_find_real_service(struct netns_ipvs *ipvs, int af, __u16 protocol,
+ const union nf_inet_addr *daddr, __be16 dport);
+
int ip_vs_use_count_inc(void);
void ip_vs_use_count_dec(void);
int ip_vs_register_nl_ioctl(void);
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 1be050ada8c5..11a045281948 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -121,21 +121,21 @@ struct frag_hdr {
extern int sysctl_mld_max_msf;
extern int sysctl_mld_qrv;
-#define _DEVINC(net, statname, modifier, idev, field) \
+#define _DEVINC(net, statname, mod, idev, field) \
({ \
struct inet6_dev *_idev = (idev); \
if (likely(_idev != NULL)) \
- SNMP_INC_STATS##modifier((_idev)->stats.statname, (field)); \
- SNMP_INC_STATS##modifier((net)->mib.statname##_statistics, (field));\
+ mod##SNMP_INC_STATS64((_idev)->stats.statname, (field));\
+ mod##SNMP_INC_STATS64((net)->mib.statname##_statistics, (field));\
})
/* per device counters are atomic_long_t */
-#define _DEVINCATOMIC(net, statname, modifier, idev, field) \
+#define _DEVINCATOMIC(net, statname, mod, idev, field) \
({ \
struct inet6_dev *_idev = (idev); \
if (likely(_idev != NULL)) \
SNMP_INC_STATS_ATOMIC_LONG((_idev)->stats.statname##dev, (field)); \
- SNMP_INC_STATS##modifier((net)->mib.statname##_statistics, (field));\
+ mod##SNMP_INC_STATS((net)->mib.statname##_statistics, (field));\
})
/* per device and per net counters are atomic_long_t */
@@ -147,46 +147,44 @@ extern int sysctl_mld_qrv;
SNMP_INC_STATS_ATOMIC_LONG((net)->mib.statname##_statistics, (field));\
})
-#define _DEVADD(net, statname, modifier, idev, field, val) \
+#define _DEVADD(net, statname, mod, idev, field, val) \
({ \
struct inet6_dev *_idev = (idev); \
if (likely(_idev != NULL)) \
- SNMP_ADD_STATS##modifier((_idev)->stats.statname, (field), (val)); \
- SNMP_ADD_STATS##modifier((net)->mib.statname##_statistics, (field), (val));\
+ mod##SNMP_ADD_STATS((_idev)->stats.statname, (field), (val)); \
+ mod##SNMP_ADD_STATS((net)->mib.statname##_statistics, (field), (val));\
})
-#define _DEVUPD(net, statname, modifier, idev, field, val) \
+#define _DEVUPD(net, statname, mod, idev, field, val) \
({ \
struct inet6_dev *_idev = (idev); \
if (likely(_idev != NULL)) \
- SNMP_UPD_PO_STATS##modifier((_idev)->stats.statname, field, (val)); \
- SNMP_UPD_PO_STATS##modifier((net)->mib.statname##_statistics, field, (val));\
+ mod##SNMP_UPD_PO_STATS((_idev)->stats.statname, field, (val)); \
+ mod##SNMP_UPD_PO_STATS((net)->mib.statname##_statistics, field, (val));\
})
/* MIBs */
#define IP6_INC_STATS(net, idev,field) \
- _DEVINC(net, ipv6, 64, idev, field)
-#define IP6_INC_STATS_BH(net, idev,field) \
- _DEVINC(net, ipv6, 64_BH, idev, field)
+ _DEVINC(net, ipv6, , idev, field)
+#define __IP6_INC_STATS(net, idev,field) \
+ _DEVINC(net, ipv6, __, idev, field)
#define IP6_ADD_STATS(net, idev,field,val) \
- _DEVADD(net, ipv6, 64, idev, field, val)
-#define IP6_ADD_STATS_BH(net, idev,field,val) \
- _DEVADD(net, ipv6, 64_BH, idev, field, val)
+ _DEVADD(net, ipv6, , idev, field, val)
+#define __IP6_ADD_STATS(net, idev,field,val) \
+ _DEVADD(net, ipv6, __, idev, field, val)
#define IP6_UPD_PO_STATS(net, idev,field,val) \
- _DEVUPD(net, ipv6, 64, idev, field, val)
-#define IP6_UPD_PO_STATS_BH(net, idev,field,val) \
- _DEVUPD(net, ipv6, 64_BH, idev, field, val)
+ _DEVUPD(net, ipv6, , idev, field, val)
+#define __IP6_UPD_PO_STATS(net, idev,field,val) \
+ _DEVUPD(net, ipv6, __, idev, field, val)
#define ICMP6_INC_STATS(net, idev, field) \
_DEVINCATOMIC(net, icmpv6, , idev, field)
-#define ICMP6_INC_STATS_BH(net, idev, field) \
- _DEVINCATOMIC(net, icmpv6, _BH, idev, field)
+#define __ICMP6_INC_STATS(net, idev, field) \
+ _DEVINCATOMIC(net, icmpv6, __, idev, field)
#define ICMP6MSGOUT_INC_STATS(net, idev, field) \
_DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field +256)
-#define ICMP6MSGOUT_INC_STATS_BH(net, idev, field) \
- _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field +256)
-#define ICMP6MSGIN_INC_STATS_BH(net, idev, field) \
+#define ICMP6MSGIN_INC_STATS(net, idev, field) \
_DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field)
struct ip6_ra_chain {
@@ -253,6 +251,13 @@ struct ipv6_fl_socklist {
struct rcu_head rcu;
};
+struct ipcm6_cookie {
+ __s16 hlimit;
+ __s16 tclass;
+ __s8 dontfrag;
+ struct ipv6_txoptions *opt;
+};
+
static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np)
{
struct ipv6_txoptions *opt;
@@ -865,9 +870,10 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr);
int ip6_append_data(struct sock *sk,
int getfrag(void *from, char *to, int offset, int len,
int odd, struct sk_buff *skb),
- void *from, int length, int transhdrlen, int hlimit,
- int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
- struct rt6_info *rt, unsigned int flags, int dontfrag);
+ void *from, int length, int transhdrlen,
+ struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
+ struct rt6_info *rt, unsigned int flags,
+ const struct sockcm_cookie *sockc);
int ip6_push_pending_frames(struct sock *sk);
@@ -882,9 +888,9 @@ struct sk_buff *ip6_make_skb(struct sock *sk,
int getfrag(void *from, char *to, int offset,
int len, int odd, struct sk_buff *skb),
void *from, int length, int transhdrlen,
- int hlimit, int tclass, struct ipv6_txoptions *opt,
- struct flowi6 *fl6, struct rt6_info *rt,
- unsigned int flags, int dontfrag);
+ struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
+ struct rt6_info *rt, unsigned int flags,
+ const struct sockcm_cookie *sockc);
static inline struct sk_buff *ip6_finish_skb(struct sock *sk)
{
diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h
index c43a9c73de5e..374388dc01c8 100644
--- a/include/net/l3mdev.h
+++ b/include/net/l3mdev.h
@@ -25,6 +25,8 @@
struct l3mdev_ops {
u32 (*l3mdev_fib_table)(const struct net_device *dev);
+ struct sk_buff * (*l3mdev_l3_rcv)(struct net_device *dev,
+ struct sk_buff *skb, u16 proto);
/* IPv4 ops */
struct rtable * (*l3mdev_get_rtable)(const struct net_device *dev,
@@ -130,51 +132,36 @@ static inline bool netif_index_is_l3_master(struct net *net, int ifindex)
return rc;
}
-static inline int l3mdev_get_saddr(struct net *net, int ifindex,
- struct flowi4 *fl4)
-{
- struct net_device *dev;
- int rc = 0;
+int l3mdev_get_saddr(struct net *net, int ifindex, struct flowi4 *fl4);
- if (ifindex) {
+struct dst_entry *l3mdev_get_rt6_dst(struct net *net, const struct flowi6 *fl6);
- rcu_read_lock();
+static inline
+struct sk_buff *l3mdev_l3_rcv(struct sk_buff *skb, u16 proto)
+{
+ struct net_device *master = NULL;
- dev = dev_get_by_index_rcu(net, ifindex);
- if (dev && netif_is_l3_master(dev) &&
- dev->l3mdev_ops->l3mdev_get_saddr) {
- rc = dev->l3mdev_ops->l3mdev_get_saddr(dev, fl4);
- }
+ if (netif_is_l3_slave(skb->dev))
+ master = netdev_master_upper_dev_get_rcu(skb->dev);
+ else if (netif_is_l3_master(skb->dev))
+ master = skb->dev;
- rcu_read_unlock();
- }
+ if (master && master->l3mdev_ops->l3mdev_l3_rcv)
+ skb = master->l3mdev_ops->l3mdev_l3_rcv(master, skb, proto);
- return rc;
+ return skb;
}
-static inline struct dst_entry *l3mdev_get_rt6_dst(const struct net_device *dev,
- const struct flowi6 *fl6)
+static inline
+struct sk_buff *l3mdev_ip_rcv(struct sk_buff *skb)
{
- if (netif_is_l3_master(dev) && dev->l3mdev_ops->l3mdev_get_rt6_dst)
- return dev->l3mdev_ops->l3mdev_get_rt6_dst(dev, fl6);
-
- return NULL;
+ return l3mdev_l3_rcv(skb, AF_INET);
}
static inline
-struct dst_entry *l3mdev_rt6_dst_by_oif(struct net *net,
- const struct flowi6 *fl6)
+struct sk_buff *l3mdev_ip6_rcv(struct sk_buff *skb)
{
- struct dst_entry *dst = NULL;
- struct net_device *dev;
-
- dev = dev_get_by_index(net, fl6->flowi6_oif);
- if (dev) {
- dst = l3mdev_get_rt6_dst(dev, fl6);
- dev_put(dev);
- }
-
- return dst;
+ return l3mdev_l3_rcv(skb, AF_INET6);
}
#else
@@ -233,16 +220,21 @@ static inline int l3mdev_get_saddr(struct net *net, int ifindex,
}
static inline
-struct dst_entry *l3mdev_get_rt6_dst(const struct net_device *dev,
- const struct flowi6 *fl6)
+struct dst_entry *l3mdev_get_rt6_dst(struct net *net, const struct flowi6 *fl6)
{
return NULL;
}
+
static inline
-struct dst_entry *l3mdev_rt6_dst_by_oif(struct net *net,
- const struct flowi6 *fl6)
+struct sk_buff *l3mdev_ip_rcv(struct sk_buff *skb)
{
- return NULL;
+ return skb;
+}
+
+static inline
+struct sk_buff *l3mdev_ip6_rcv(struct sk_buff *skb)
+{
+ return skb;
}
#endif
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index e385eb3076a1..be30b0549b88 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -291,7 +291,7 @@ struct ieee80211_vif_chanctx_switch {
* @BSS_CHANGED_PS: PS changed for this BSS (STA mode)
* @BSS_CHANGED_TXPOWER: TX power setting changed for this interface
* @BSS_CHANGED_P2P_PS: P2P powersave settings (CTWindow, opportunistic PS)
- * changed (currently only in P2P client mode, GO mode will be later)
+ * changed
* @BSS_CHANGED_BEACON_INFO: Data from the AP's beacon became available:
* currently dtim_period only is under consideration.
* @BSS_CHANGED_BANDWIDTH: The bandwidth used by this interface changed,
@@ -526,6 +526,9 @@ struct ieee80211_mu_group_data {
* userspace), whereas TPC is disabled if %txpower_type is set to
* NL80211_TX_POWER_FIXED (use value configured from userspace)
* @p2p_noa_attr: P2P NoA attribute for P2P powersave
+ * @allow_p2p_go_ps: indication for AP or P2P GO interface, whether it's allowed
+ * to use P2P PS mechanism or not. AP/P2P GO is not allowed to use P2P PS
+ * if it has associated clients without P2P PS support.
*/
struct ieee80211_bss_conf {
const u8 *bssid;
@@ -546,7 +549,7 @@ struct ieee80211_bss_conf {
u8 sync_dtim_count;
u32 basic_rates;
struct ieee80211_rate *beacon_rate;
- int mcast_rate[IEEE80211_NUM_BANDS];
+ int mcast_rate[NUM_NL80211_BANDS];
u16 ht_operation_mode;
s32 cqm_rssi_thold;
u32 cqm_rssi_hyst;
@@ -563,6 +566,7 @@ struct ieee80211_bss_conf {
int txpower;
enum nl80211_tx_power_setting txpower_type;
struct ieee80211_p2p_noa_attr p2p_noa_attr;
+ bool allow_p2p_go_ps;
};
/**
@@ -709,6 +713,7 @@ enum mac80211_tx_info_flags {
* @IEEE80211_TX_CTRL_PS_RESPONSE: This frame is a response to a poll
* frame (PS-Poll or uAPSD).
* @IEEE80211_TX_CTRL_RATE_INJECT: This frame is injected with rate information
+ * @IEEE80211_TX_CTRL_AMSDU: This frame is an A-MSDU frame
*
* These flags are used in tx_info->control.flags.
*/
@@ -716,6 +721,7 @@ enum mac80211_tx_control_flags {
IEEE80211_TX_CTRL_PORT_CTRL_PROTO = BIT(0),
IEEE80211_TX_CTRL_PS_RESPONSE = BIT(1),
IEEE80211_TX_CTRL_RATE_INJECT = BIT(2),
+ IEEE80211_TX_CTRL_AMSDU = BIT(3),
};
/*
@@ -932,8 +938,8 @@ struct ieee80211_tx_info {
* @common_ie_len: length of the common_ies
*/
struct ieee80211_scan_ies {
- const u8 *ies[IEEE80211_NUM_BANDS];
- size_t len[IEEE80211_NUM_BANDS];
+ const u8 *ies[NUM_NL80211_BANDS];
+ size_t len[NUM_NL80211_BANDS];
const u8 *common_ies;
size_t common_ie_len;
};
@@ -1036,6 +1042,8 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* on this subframe
* @RX_FLAG_AMPDU_DELIM_CRC_KNOWN: The delimiter CRC field is known (the CRC
* is stored in the @ampdu_delimiter_crc field)
+ * @RX_FLAG_MIC_STRIPPED: The mic was stripped of this packet. Decryption was
+ * done by the hardware
* @RX_FLAG_LDPC: LDPC was used
* @RX_FLAG_ONLY_MONITOR: Report frame only to monitor interfaces without
* processing it in any regular way.
@@ -1060,6 +1068,9 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* @RX_FLAG_RADIOTAP_VENDOR_DATA: This frame contains vendor-specific
* radiotap data in the skb->data (before the frame) as described by
* the &struct ieee80211_vendor_radiotap.
+ * @RX_FLAG_ALLOW_SAME_PN: Allow the same PN as same packet before.
+ * This is used for AMSDU subframes which can have the same PN as
+ * the first subframe.
*/
enum mac80211_rx_flags {
RX_FLAG_MMIC_ERROR = BIT(0),
@@ -1093,6 +1104,8 @@ enum mac80211_rx_flags {
RX_FLAG_5MHZ = BIT(29),
RX_FLAG_AMSDU_MORE = BIT(30),
RX_FLAG_RADIOTAP_VENDOR_DATA = BIT(31),
+ RX_FLAG_MIC_STRIPPED = BIT_ULL(32),
+ RX_FLAG_ALLOW_SAME_PN = BIT_ULL(33),
};
#define RX_FLAG_STBC_SHIFT 26
@@ -1122,6 +1135,8 @@ enum mac80211_rx_vht_flags {
*
* @mactime: value in microseconds of the 64-bit Time Synchronization Function
* (TSF) timer when the first data symbol (MPDU) arrived at the hardware.
+ * @boottime_ns: CLOCK_BOOTTIME timestamp the frame was received at, this is
+ * needed only for beacons and probe responses that update the scan cache.
* @device_timestamp: arbitrary timestamp for the device, mac80211 doesn't use
* it but can store it and pass it back to the driver for synchronisation
* @band: the active band when this frame was received
@@ -1148,9 +1163,10 @@ enum mac80211_rx_vht_flags {
*/
struct ieee80211_rx_status {
u64 mactime;
+ u64 boottime_ns;
u32 device_timestamp;
u32 ampdu_reference;
- u32 flag;
+ u64 flag;
u16 freq;
u8 vht_flag;
u8 rate_idx;
@@ -1737,10 +1753,12 @@ struct ieee80211_sta_rates {
* size is min(max_amsdu_len, 7935) bytes.
* Both additional HT limits must be enforced by the low level driver.
* This is defined by the spec (IEEE 802.11-2012 section 8.3.2.2 NOTE 2).
+ * @support_p2p_ps: indicates whether the STA supports P2P PS mechanism or not.
+ * @max_rc_amsdu_len: Maximum A-MSDU size in bytes recommended by rate control.
* @txq: per-TID data TX queues (if driver uses the TXQ abstraction)
*/
struct ieee80211_sta {
- u32 supp_rates[IEEE80211_NUM_BANDS];
+ u32 supp_rates[NUM_NL80211_BANDS];
u8 addr[ETH_ALEN];
u16 aid;
struct ieee80211_sta_ht_cap ht_cap;
@@ -1757,6 +1775,8 @@ struct ieee80211_sta {
bool mfp;
u8 max_amsdu_subframes;
u16 max_amsdu_len;
+ bool support_p2p_ps;
+ u16 max_rc_amsdu_len;
struct ieee80211_txq *txq[IEEE80211_NUM_TIDS];
@@ -1970,6 +1990,18 @@ struct ieee80211_txq {
* order and does not need to manage its own reorder buffer or BA session
* timeout.
*
+ * @IEEE80211_HW_USES_RSS: The device uses RSS and thus requires parallel RX,
+ * which implies using per-CPU station statistics.
+ *
+ * @IEEE80211_HW_TX_AMSDU: Hardware (or driver) supports software aggregated
+ * A-MSDU frames. Requires software tx queueing and fast-xmit support.
+ * When not using minstrel/minstrel_ht rate control, the driver must
+ * limit the maximum A-MSDU size based on the current tx rate by setting
+ * max_rc_amsdu_len in struct ieee80211_sta.
+ *
+ * @IEEE80211_HW_TX_FRAG_LIST: Hardware (or driver) supports sending frag_list
+ * skbs, needed for zero-copy software A-MSDU.
+ *
* @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
*/
enum ieee80211_hw_flags {
@@ -2007,6 +2039,9 @@ enum ieee80211_hw_flags {
IEEE80211_HW_BEACON_TX_STATUS,
IEEE80211_HW_NEEDS_UNIQUE_STA_ADDR,
IEEE80211_HW_SUPPORTS_REORDERING_BUFFER,
+ IEEE80211_HW_USES_RSS,
+ IEEE80211_HW_TX_AMSDU,
+ IEEE80211_HW_TX_FRAG_LIST,
/* keep last, obviously */
NUM_IEEE80211_HW_FLAGS
@@ -2079,6 +2114,9 @@ enum ieee80211_hw_flags {
* size is smaller (an example is LinkSys WRT120N with FW v1.0.07
* build 002 Jun 18 2012).
*
+ * @max_tx_fragments: maximum number of tx buffers per (A)-MSDU, sum
+ * of 1 + skb_shinfo(skb)->nr_frags for each skb in the frag_list.
+ *
* @offchannel_tx_hw_queue: HW queue ID to use for offchannel TX
* (if %IEEE80211_HW_QUEUE_CONTROL is set)
*
@@ -2133,6 +2171,7 @@ struct ieee80211_hw {
u8 max_rate_tries;
u8 max_rx_aggregation_subframes;
u8 max_tx_aggregation_subframes;
+ u8 max_tx_fragments;
u8 offchannel_tx_hw_queue;
u8 radiotap_mcs_details;
u16 radiotap_vht_details;
@@ -3350,6 +3389,10 @@ enum ieee80211_reconfig_type {
* the function call.
*
* @wake_tx_queue: Called when new packets have been added to the queue.
+ * @sync_rx_queues: Process all pending frames in RSS queues. This is a
+ * synchronization which is needed in case driver has in its RSS queues
+ * pending frames that were received prior to the control path action
+ * currently taken (e.g. disassociation) but are not processed yet.
*/
struct ieee80211_ops {
void (*tx)(struct ieee80211_hw *hw,
@@ -3587,6 +3630,7 @@ struct ieee80211_ops {
void (*wake_tx_queue)(struct ieee80211_hw *hw,
struct ieee80211_txq *txq);
+ void (*sync_rx_queues)(struct ieee80211_hw *hw);
};
/**
@@ -3840,11 +3884,12 @@ void ieee80211_restart_hw(struct ieee80211_hw *hw);
* This function must be called with BHs disabled.
*
* @hw: the hardware this frame came in on
+ * @sta: the station the frame was received from, or %NULL
* @skb: the buffer to receive, owned by mac80211 after this call
* @napi: the NAPI context
*/
-void ieee80211_rx_napi(struct ieee80211_hw *hw, struct sk_buff *skb,
- struct napi_struct *napi);
+void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
+ struct sk_buff *skb, struct napi_struct *napi);
/**
* ieee80211_rx - receive frame
@@ -3868,7 +3913,7 @@ void ieee80211_rx_napi(struct ieee80211_hw *hw, struct sk_buff *skb,
*/
static inline void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
- ieee80211_rx_napi(hw, skb, NULL);
+ ieee80211_rx_napi(hw, NULL, skb, NULL);
}
/**
@@ -3951,6 +3996,33 @@ static inline int ieee80211_sta_ps_transition_ni(struct ieee80211_sta *sta,
return ret;
}
+/**
+ * ieee80211_sta_pspoll - PS-Poll frame received
+ * @sta: currently connected station
+ *
+ * When operating in AP mode with the %IEEE80211_HW_AP_LINK_PS flag set,
+ * use this function to inform mac80211 that a PS-Poll frame from a
+ * connected station was received.
+ * This must be used in conjunction with ieee80211_sta_ps_transition()
+ * and possibly ieee80211_sta_uapsd_trigger(); calls to all three must
+ * be serialized.
+ */
+void ieee80211_sta_pspoll(struct ieee80211_sta *sta);
+
+/**
+ * ieee80211_sta_uapsd_trigger - (potential) U-APSD trigger frame received
+ * @sta: currently connected station
+ * @tid: TID of the received (potential) trigger frame
+ *
+ * When operating in AP mode with the %IEEE80211_HW_AP_LINK_PS flag set,
+ * use this function to inform mac80211 that a (potential) trigger frame
+ * from a connected station was received.
+ * This must be used in conjunction with ieee80211_sta_ps_transition()
+ * and possibly ieee80211_sta_pspoll(); calls to all three must be
+ * serialized.
+ */
+void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *sta, u8 tid);
+
/*
* The TX headroom reserved by mac80211 for its own tx_status functions.
* This is enough for the radiotap header.
@@ -4363,7 +4435,7 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
*/
__le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- enum ieee80211_band band,
+ enum nl80211_band band,
size_t frame_len,
struct ieee80211_rate *rate);
@@ -5316,7 +5388,7 @@ struct rate_control_ops {
};
static inline int rate_supported(struct ieee80211_sta *sta,
- enum ieee80211_band band,
+ enum nl80211_band band,
int index)
{
return (sta == NULL || sta->supp_rates[band] & BIT(index));
diff --git a/include/net/mac802154.h b/include/net/mac802154.h
index 6cd7a70706a9..e465c8551ac3 100644
--- a/include/net/mac802154.h
+++ b/include/net/mac802154.h
@@ -288,6 +288,16 @@ static inline void ieee802154_le16_to_be16(void *be16_dst, const void *le16_src)
}
/**
+ * ieee802154_be16_to_le16 - copies and convert be16 to le16
+ * @le16_dst: le16 destination pointer
+ * @be16_src: be16 source pointer
+ */
+static inline void ieee802154_be16_to_le16(void *le16_dst, const void *be16_src)
+{
+ put_unaligned_le16(get_unaligned_be16(be16_src), le16_dst);
+}
+
+/**
* ieee802154_alloc_hw - Allocate a new hardware device
*
* This must be called once for each hardware device. The returned pointer
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index fde4068eec0b..dd78bea227c8 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -289,8 +289,6 @@ struct kernel_param;
int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
extern unsigned int nf_conntrack_htable_size;
extern unsigned int nf_conntrack_max;
-extern unsigned int nf_conntrack_hash_rnd;
-void init_nf_conntrack_hash_rnd(void);
struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
const struct nf_conntrack_zone *zone,
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index 62e17d1319ff..3e2f3328945c 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -81,6 +81,7 @@ print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
#define CONNTRACK_LOCKS 1024
+extern struct hlist_nulls_head *nf_conntrack_hash;
extern spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
void nf_conntrack_lock(spinlock_t *lock);
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
index 57c880378443..fa36447371c6 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -73,6 +73,8 @@ void nf_conntrack_unregister_notifier(struct net *net,
struct nf_ct_event_notifier *nb);
void nf_ct_deliver_cached_events(struct nf_conn *ct);
+int nf_conntrack_eventmask_report(unsigned int eventmask, struct nf_conn *ct,
+ u32 portid, int report);
static inline void
nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
@@ -91,69 +93,25 @@ nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
}
static inline int
-nf_conntrack_eventmask_report(unsigned int eventmask,
- struct nf_conn *ct,
- u32 portid,
- int report)
-{
- int ret = 0;
- struct net *net = nf_ct_net(ct);
- struct nf_ct_event_notifier *notify;
- struct nf_conntrack_ecache *e;
-
- rcu_read_lock();
- notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
- if (notify == NULL)
- goto out_unlock;
-
- e = nf_ct_ecache_find(ct);
- if (e == NULL)
- goto out_unlock;
-
- if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct)) {
- struct nf_ct_event item = {
- .ct = ct,
- .portid = e->portid ? e->portid : portid,
- .report = report
- };
- /* This is a resent of a destroy event? If so, skip missed */
- unsigned long missed = e->portid ? 0 : e->missed;
-
- if (!((eventmask | missed) & e->ctmask))
- goto out_unlock;
-
- ret = notify->fcn(eventmask | missed, &item);
- if (unlikely(ret < 0 || missed)) {
- spin_lock_bh(&ct->lock);
- if (ret < 0) {
- /* This is a destroy event that has been
- * triggered by a process, we store the PORTID
- * to include it in the retransmission. */
- if (eventmask & (1 << IPCT_DESTROY) &&
- e->portid == 0 && portid != 0)
- e->portid = portid;
- else
- e->missed |= eventmask;
- } else
- e->missed &= ~missed;
- spin_unlock_bh(&ct->lock);
- }
- }
-out_unlock:
- rcu_read_unlock();
- return ret;
-}
-
-static inline int
nf_conntrack_event_report(enum ip_conntrack_events event, struct nf_conn *ct,
u32 portid, int report)
{
+ const struct net *net = nf_ct_net(ct);
+
+ if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb))
+ return 0;
+
return nf_conntrack_eventmask_report(1 << event, ct, portid, report);
}
static inline int
nf_conntrack_event(enum ip_conntrack_events event, struct nf_conn *ct)
{
+ const struct net *net = nf_ct_net(ct);
+
+ if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb))
+ return 0;
+
return nf_conntrack_eventmask_report(1 << event, ct, 0, 0);
}
@@ -172,43 +130,9 @@ int nf_ct_expect_register_notifier(struct net *net,
void nf_ct_expect_unregister_notifier(struct net *net,
struct nf_exp_event_notifier *nb);
-static inline void
-nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
- struct nf_conntrack_expect *exp,
- u32 portid,
- int report)
-{
- struct net *net = nf_ct_exp_net(exp);
- struct nf_exp_event_notifier *notify;
- struct nf_conntrack_ecache *e;
-
- rcu_read_lock();
- notify = rcu_dereference(net->ct.nf_expect_event_cb);
- if (notify == NULL)
- goto out_unlock;
-
- e = nf_ct_ecache_find(exp->master);
- if (e == NULL)
- goto out_unlock;
-
- if (e->expmask & (1 << event)) {
- struct nf_exp_event item = {
- .exp = exp,
- .portid = portid,
- .report = report
- };
- notify->fcn(1 << event, &item);
- }
-out_unlock:
- rcu_read_unlock();
-}
-
-static inline void
-nf_ct_expect_event(enum ip_conntrack_expect_events event,
- struct nf_conntrack_expect *exp)
-{
- nf_ct_expect_event_report(event, exp, 0, 0);
-}
+void nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
+ struct nf_conntrack_expect *exp,
+ u32 portid, int report);
int nf_conntrack_ecache_pernet_init(struct net *net);
void nf_conntrack_ecache_pernet_fini(struct net *net);
@@ -245,8 +169,6 @@ static inline int nf_conntrack_event_report(enum ip_conntrack_events event,
u32 portid,
int report) { return 0; }
static inline void nf_ct_deliver_cached_events(const struct nf_conn *ct) {}
-static inline void nf_ct_expect_event(enum ip_conntrack_expect_events event,
- struct nf_conntrack_expect *exp) {}
static inline void nf_ct_expect_event_report(enum ip_conntrack_expect_events e,
struct nf_conntrack_expect *exp,
u32 portid,
diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h
index dce56f09ac9a..5ed33ea4718e 100644
--- a/include/net/netfilter/nf_conntrack_expect.h
+++ b/include/net/netfilter/nf_conntrack_expect.h
@@ -10,6 +10,7 @@
extern unsigned int nf_ct_expect_hsize;
extern unsigned int nf_ct_expect_max;
+extern struct hlist_head *nf_ct_expect_hash;
struct nf_conntrack_expect {
/* Conntrack expectation list member */
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index 956d8a6ac069..1a5fb36f165f 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -23,6 +23,9 @@ struct nf_conntrack_l4proto {
/* L4 Protocol number. */
u_int8_t l4proto;
+ /* Resolve clashes on insertion races. */
+ bool allow_clash;
+
/* Try to fill in the third arg: dataoff is offset past network protocol
hdr. Return true if possible. */
bool (*pkt_to_tuple)(const struct sk_buff *skb, unsigned int dataoff,
diff --git a/include/net/netfilter/nf_conntrack_labels.h b/include/net/netfilter/nf_conntrack_labels.h
index 7e2b1d025f50..c5f8fc736b3d 100644
--- a/include/net/netfilter/nf_conntrack_labels.h
+++ b/include/net/netfilter/nf_conntrack_labels.h
@@ -45,7 +45,6 @@ static inline struct nf_conn_labels *nf_ct_labels_ext_add(struct nf_conn *ct)
#endif
}
-bool nf_connlabel_match(const struct nf_conn *ct, u16 bit);
int nf_connlabel_set(struct nf_conn *ct, u16 bit);
int nf_connlabels_replace(struct nf_conn *ct,
@@ -54,11 +53,11 @@ int nf_connlabels_replace(struct nf_conn *ct,
#ifdef CONFIG_NF_CONNTRACK_LABELS
int nf_conntrack_labels_init(void);
void nf_conntrack_labels_fini(void);
-int nf_connlabels_get(struct net *net, unsigned int n_bits);
+int nf_connlabels_get(struct net *net, unsigned int bit);
void nf_connlabels_put(struct net *net);
#else
static inline int nf_conntrack_labels_init(void) { return 0; }
static inline void nf_conntrack_labels_fini(void) {}
-static inline int nf_connlabels_get(struct net *net, unsigned int n_bits) { return 0; }
+static inline int nf_connlabels_get(struct net *net, unsigned int bit) { return 0; }
static inline void nf_connlabels_put(struct net *net) {}
#endif
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index f6b1daf2e698..092235458691 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -303,7 +303,7 @@ void nft_unregister_set(struct nft_set_ops *ops);
struct nft_set {
struct list_head list;
struct list_head bindings;
- char name[IFNAMSIZ];
+ char name[NFT_SET_MAXNAMELEN];
u32 ktype;
u32 dtype;
u32 size;
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 0e3172751755..254a0fc01800 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -98,14 +98,17 @@
* nla_put_u8(skb, type, value) add u8 attribute to skb
* nla_put_u16(skb, type, value) add u16 attribute to skb
* nla_put_u32(skb, type, value) add u32 attribute to skb
- * nla_put_u64(skb, type, value) add u64 attribute to skb
+ * nla_put_u64_64bits(skb, type,
+ * value, padattr) add u64 attribute to skb
* nla_put_s8(skb, type, value) add s8 attribute to skb
* nla_put_s16(skb, type, value) add s16 attribute to skb
* nla_put_s32(skb, type, value) add s32 attribute to skb
- * nla_put_s64(skb, type, value) add s64 attribute to skb
+ * nla_put_s64(skb, type, value,
+ * padattr) add s64 attribute to skb
* nla_put_string(skb, type, str) add string attribute to skb
* nla_put_flag(skb, type) add flag attribute to skb
- * nla_put_msecs(skb, type, jiffies) add msecs attribute to skb
+ * nla_put_msecs(skb, type, jiffies,
+ * padattr) add msecs attribute to skb
* nla_put_in_addr(skb, type, addr) add IPv4 address attribute to skb
* nla_put_in6_addr(skb, type, addr) add IPv6 address attribute to skb
*
@@ -244,13 +247,21 @@ int nla_memcpy(void *dest, const struct nlattr *src, int count);
int nla_memcmp(const struct nlattr *nla, const void *data, size_t size);
int nla_strcmp(const struct nlattr *nla, const char *str);
struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen);
+struct nlattr *__nla_reserve_64bit(struct sk_buff *skb, int attrtype,
+ int attrlen, int padattr);
void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen);
+struct nlattr *nla_reserve_64bit(struct sk_buff *skb, int attrtype,
+ int attrlen, int padattr);
void *nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
void __nla_put(struct sk_buff *skb, int attrtype, int attrlen,
const void *data);
+void __nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen,
+ const void *data, int padattr);
void __nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data);
int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data);
+int nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen,
+ const void *data, int padattr);
int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data);
int nla_append(struct sk_buff *skb, int attrlen, const void *data);
@@ -837,47 +848,56 @@ static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value)
}
/**
- * nla_put_u64 - Add a u64 netlink attribute to a socket buffer
+ * nla_put_u64_64bit - Add a u64 netlink attribute to a skb and align it
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @value: numeric value
+ * @padattr: attribute type for the padding
*/
-static inline int nla_put_u64(struct sk_buff *skb, int attrtype, u64 value)
+static inline int nla_put_u64_64bit(struct sk_buff *skb, int attrtype,
+ u64 value, int padattr)
{
- return nla_put(skb, attrtype, sizeof(u64), &value);
+ return nla_put_64bit(skb, attrtype, sizeof(u64), &value, padattr);
}
/**
- * nla_put_be64 - Add a __be64 netlink attribute to a socket buffer
+ * nla_put_be64 - Add a __be64 netlink attribute to a socket buffer and align it
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @value: numeric value
+ * @padattr: attribute type for the padding
*/
-static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value)
+static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value,
+ int padattr)
{
- return nla_put(skb, attrtype, sizeof(__be64), &value);
+ return nla_put_64bit(skb, attrtype, sizeof(__be64), &value, padattr);
}
/**
- * nla_put_net64 - Add 64-bit network byte order netlink attribute to a socket buffer
+ * nla_put_net64 - Add 64-bit network byte order nlattr to a skb and align it
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @value: numeric value
+ * @padattr: attribute type for the padding
*/
-static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value)
+static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value,
+ int padattr)
{
- return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, value);
+ return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, value,
+ padattr);
}
/**
- * nla_put_le64 - Add a __le64 netlink attribute to a socket buffer
+ * nla_put_le64 - Add a __le64 netlink attribute to a socket buffer and align it
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @value: numeric value
+ * @padattr: attribute type for the padding
*/
-static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value)
+static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value,
+ int padattr)
{
- return nla_put(skb, attrtype, sizeof(__le64), &value);
+ return nla_put_64bit(skb, attrtype, sizeof(__le64), &value, padattr);
}
/**
@@ -914,14 +934,16 @@ static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value)
}
/**
- * nla_put_s64 - Add a s64 netlink attribute to a socket buffer
+ * nla_put_s64 - Add a s64 netlink attribute to a socket buffer and align it
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @value: numeric value
+ * @padattr: attribute type for the padding
*/
-static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value)
+static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value,
+ int padattr)
{
- return nla_put(skb, attrtype, sizeof(s64), &value);
+ return nla_put_64bit(skb, attrtype, sizeof(s64), &value, padattr);
}
/**
@@ -947,16 +969,18 @@ static inline int nla_put_flag(struct sk_buff *skb, int attrtype)
}
/**
- * nla_put_msecs - Add a msecs netlink attribute to a socket buffer
+ * nla_put_msecs - Add a msecs netlink attribute to a skb and align it
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @njiffies: number of jiffies to convert to msecs
+ * @padattr: attribute type for the padding
*/
static inline int nla_put_msecs(struct sk_buff *skb, int attrtype,
- unsigned long njiffies)
+ unsigned long njiffies, int padattr)
{
u64 tmp = jiffies_to_msecs(njiffies);
- return nla_put(skb, attrtype, sizeof(u64), &tmp);
+
+ return nla_put_64bit(skb, attrtype, sizeof(u64), &tmp, padattr);
}
/**
@@ -1231,6 +1255,61 @@ static inline int nla_validate_nested(const struct nlattr *start, int maxtype,
}
/**
+ * nla_need_padding_for_64bit - test 64-bit alignment of the next attribute
+ * @skb: socket buffer the message is stored in
+ *
+ * Return true if padding is needed to align the next attribute (nla_data()) to
+ * a 64-bit aligned area.
+ */
+static inline bool nla_need_padding_for_64bit(struct sk_buff *skb)
+{
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ /* The nlattr header is 4 bytes in size, that's why we test
+ * if the skb->data _is_ aligned. A NOP attribute, plus
+ * nlattr header for next attribute, will make nla_data()
+ * 8-byte aligned.
+ */
+ if (IS_ALIGNED((unsigned long)skb_tail_pointer(skb), 8))
+ return true;
+#endif
+ return false;
+}
+
+/**
+ * nla_align_64bit - 64-bit align the nla_data() of next attribute
+ * @skb: socket buffer the message is stored in
+ * @padattr: attribute type for the padding
+ *
+ * Conditionally emit a padding netlink attribute in order to make
+ * the next attribute we emit have a 64-bit aligned nla_data() area.
+ * This will only be done in architectures which do not have
+ * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS defined.
+ *
+ * Returns zero on success or a negative error code.
+ */
+static inline int nla_align_64bit(struct sk_buff *skb, int padattr)
+{
+ if (nla_need_padding_for_64bit(skb) &&
+ !nla_reserve(skb, padattr, 0))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+/**
+ * nla_total_size_64bit - total length of attribute including padding
+ * @payload: length of payload
+ */
+static inline int nla_total_size_64bit(int payload)
+{
+ return NLA_ALIGN(nla_attr_size(payload))
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ + NLA_ALIGN(nla_attr_size(0))
+#endif
+ ;
+}
+
+/**
* nla_for_each_attr - iterate over a stream of attributes
* @pos: loop counter, set to current attribute
* @head: head of attribute stream
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index 723b61c82b3f..38b1a80517f0 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -84,7 +84,6 @@ struct netns_ct {
struct ctl_table_header *event_sysctl_header;
struct ctl_table_header *helper_sysctl_header;
#endif
- char *slabname;
unsigned int sysctl_log_invalid; /* Log invalid packets */
int sysctl_events;
int sysctl_acct;
@@ -93,11 +92,6 @@ struct netns_ct {
int sysctl_tstamp;
int sysctl_checksum;
- unsigned int htable_size;
- seqcount_t generation;
- struct kmem_cache *nf_conntrack_cachep;
- struct hlist_nulls_head *hash;
- struct hlist_head *expect_hash;
struct ct_pcpu __percpu *pcpu_lists;
struct ip_conntrack_stat __percpu *stat;
struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
@@ -107,9 +101,5 @@ struct netns_ct {
unsigned int labels_used;
u8 label_words;
#endif
-#ifdef CONFIG_NF_NAT_NEEDED
- struct hlist_head *nat_bysource;
- unsigned int nat_htable_size;
-#endif
};
#endif
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index a69cde3ce460..d061ffeb1e71 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -133,6 +133,9 @@ struct netns_ipv4 {
struct fib_rules_ops *mr_rules_ops;
#endif
#endif
+#ifdef CONFIG_IP_ROUTE_MULTIPATH
+ int sysctl_fib_multipath_use_neigh;
+#endif
atomic_t rt_genid;
};
#endif
diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
index 730d82ad6ee5..24cd3949a9a4 100644
--- a/include/net/netns/xfrm.h
+++ b/include/net/netns/xfrm.h
@@ -80,6 +80,7 @@ struct netns_xfrm {
struct flow_cache flow_cache_global;
atomic_t flow_cache_genid;
struct list_head flow_cache_gc_list;
+ atomic_t flow_cache_gc_count;
spinlock_t flow_cache_gc_lock;
struct work_struct flow_cache_gc_work;
struct work_struct flow_cache_flush_work;
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
index 57ce24fb0047..87499b6b35d6 100644
--- a/include/net/nfc/nci_core.h
+++ b/include/net/nfc/nci_core.h
@@ -109,7 +109,13 @@ struct nci_ops {
struct nci_conn_info {
struct list_head list;
- __u8 id; /* can be an RF Discovery ID or an NFCEE ID */
+ /* NCI specification 4.4.2 Connection Creation
+ * The combination of destination type and destination specific
+ * parameters shall uniquely identify a single destination for the
+ * Logical Connection
+ */
+ struct dest_spec_params *dest_params;
+ __u8 dest_type;
__u8 conn_id;
__u8 max_pkt_payload_len;
@@ -260,7 +266,9 @@ struct nci_dev {
__u32 manufact_specific_info;
/* Save RF Discovery ID or NFCEE ID under conn_create */
- __u8 cur_id;
+ struct dest_spec_params cur_params;
+ /* Save destination type under conn_create */
+ __u8 cur_dest_type;
/* stored during nci_data_exchange */
struct sk_buff *rx_data_reassembly;
@@ -298,6 +306,8 @@ int nci_core_conn_create(struct nci_dev *ndev, u8 destination_type,
size_t params_len,
struct core_conn_create_dest_spec_params *params);
int nci_core_conn_close(struct nci_dev *ndev, u8 conn_id);
+int nci_nfcc_loopback(struct nci_dev *ndev, void *data, size_t data_len,
+ struct sk_buff **resp);
struct nci_hci_dev *nci_hci_allocate(struct nci_dev *ndev);
int nci_hci_send_event(struct nci_dev *ndev, u8 gate, u8 event,
@@ -378,7 +388,8 @@ void nci_clear_target_list(struct nci_dev *ndev);
void nci_req_complete(struct nci_dev *ndev, int result);
struct nci_conn_info *nci_get_conn_info_by_conn_id(struct nci_dev *ndev,
int conn_id);
-int nci_get_conn_info_by_id(struct nci_dev *ndev, u8 id);
+int nci_get_conn_info_by_dest_type_params(struct nci_dev *ndev, u8 dest_type,
+ struct dest_spec_params *params);
/* ----- NCI status code ----- */
int nci_to_errno(__u8 code);
diff --git a/include/net/nl802154.h b/include/net/nl802154.h
index 32cb3e591e07..fcab4de49951 100644
--- a/include/net/nl802154.h
+++ b/include/net/nl802154.h
@@ -138,6 +138,8 @@ enum nl802154_attrs {
NL802154_ATTR_SEC_KEY,
#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
+ NL802154_ATTR_PAD,
+
__NL802154_ATTR_AFTER_LAST,
NL802154_ATTR_MAX = __NL802154_ATTR_AFTER_LAST - 1
};
@@ -295,6 +297,7 @@ enum nl802154_dev_addr_attrs {
NL802154_DEV_ADDR_ATTR_MODE,
NL802154_DEV_ADDR_ATTR_SHORT,
NL802154_DEV_ADDR_ATTR_EXTENDED,
+ NL802154_DEV_ADDR_ATTR_PAD,
/* keep last */
__NL802154_DEV_ADDR_ATTR_AFTER_LAST,
@@ -320,6 +323,7 @@ enum nl802154_key_id_attrs {
NL802154_KEY_ID_ATTR_IMPLICIT,
NL802154_KEY_ID_ATTR_SOURCE_SHORT,
NL802154_KEY_ID_ATTR_SOURCE_EXTENDED,
+ NL802154_KEY_ID_ATTR_PAD,
/* keep last */
__NL802154_KEY_ID_ATTR_AFTER_LAST,
@@ -402,6 +406,7 @@ enum nl802154_dev {
NL802154_DEV_ATTR_EXTENDED_ADDR,
NL802154_DEV_ATTR_SECLEVEL_EXEMPT,
NL802154_DEV_ATTR_KEY_MODE,
+ NL802154_DEV_ATTR_PAD,
/* keep last */
__NL802154_DEV_ATTR_AFTER_LAST,
@@ -414,6 +419,7 @@ enum nl802154_devkey {
NL802154_DEVKEY_ATTR_FRAME_COUNTER,
NL802154_DEVKEY_ATTR_EXTENDED_ADDR,
NL802154_DEVKEY_ATTR_ID,
+ NL802154_DEVKEY_ATTR_PAD,
/* keep last */
__NL802154_DEVKEY_ATTR_AFTER_LAST,
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index caa5e18636df..0f7efa88f210 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -392,9 +392,6 @@ struct tc_cls_u32_offload {
};
};
-/* tca flags definitions */
-#define TCA_CLS_FLAGS_SKIP_HW 1
-
static inline bool tc_should_offload(struct net_device *dev, u32 flags)
{
if (!(dev->features & NETIF_F_HW_TC))
@@ -409,9 +406,27 @@ static inline bool tc_should_offload(struct net_device *dev, u32 flags)
return true;
}
+static inline bool tc_skip_sw(u32 flags)
+{
+ return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
+}
+
+/* SKIP_HW and SKIP_SW are mutually exclusive flags. */
+static inline bool tc_flags_valid(u32 flags)
+{
+ if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW))
+ return false;
+
+ if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
+ return false;
+
+ return true;
+}
+
enum tc_fl_command {
TC_CLSFLOWER_REPLACE,
TC_CLSFLOWER_DESTROY,
+ TC_CLSFLOWER_STATS,
};
struct tc_cls_flower_offload {
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 401038d2f9b8..fea53f4d92ca 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -61,6 +61,7 @@ psched_tdiff_bounded(psched_time_t tv1, psched_time_t tv2, psched_time_t bound)
}
struct qdisc_watchdog {
+ u64 last_expires;
struct hrtimer timer;
struct Qdisc *qdisc;
};
diff --git a/include/net/protocol.h b/include/net/protocol.h
index da689f5432de..bf36ca34af7a 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -107,9 +107,6 @@ int inet_del_offload(const struct net_offload *prot, unsigned char num);
void inet_register_protosw(struct inet_protosw *p);
void inet_unregister_protosw(struct inet_protosw *p);
-int udp_add_offload(struct net *net, struct udp_offload *prot);
-void udp_del_offload(struct udp_offload *prot);
-
#if IS_ENABLED(CONFIG_IPV6)
int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char num);
int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char num);
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index f49759decb28..6ebe13eb1c4c 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -85,24 +85,23 @@ reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener,
struct request_sock *req;
req = kmem_cache_alloc(ops->slab, GFP_ATOMIC | __GFP_NOWARN);
-
- if (req) {
- req->rsk_ops = ops;
- if (attach_listener) {
- sock_hold(sk_listener);
- req->rsk_listener = sk_listener;
- } else {
- req->rsk_listener = NULL;
+ if (!req)
+ return NULL;
+ req->rsk_listener = NULL;
+ if (attach_listener) {
+ if (unlikely(!atomic_inc_not_zero(&sk_listener->sk_refcnt))) {
+ kmem_cache_free(ops->slab, req);
+ return NULL;
}
- req_to_sk(req)->sk_prot = sk_listener->sk_prot;
- sk_node_init(&req_to_sk(req)->sk_node);
- sk_tx_queue_clear(req_to_sk(req));
- req->saved_syn = NULL;
- /* Following is temporary. It is coupled with debugging
- * helpers in reqsk_put() & reqsk_free()
- */
- atomic_set(&req->rsk_refcnt, 0);
+ req->rsk_listener = sk_listener;
}
+ req->rsk_ops = ops;
+ req_to_sk(req)->sk_prot = sk_listener->sk_prot;
+ sk_node_init(&req_to_sk(req)->sk_node);
+ sk_tx_queue_clear(req_to_sk(req));
+ req->saved_syn = NULL;
+ atomic_set(&req->rsk_refcnt, 0);
+
return req;
}
diff --git a/include/net/route.h b/include/net/route.h
index 6de665bf1750..ad777d79af94 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -325,10 +325,11 @@ static inline struct rtable *ip_route_newports(struct flowi4 *fl4, struct rtable
static inline int inet_iif(const struct sk_buff *skb)
{
- int iif = skb_rtable(skb)->rt_iif;
+ struct rtable *rt = skb_rtable(skb);
+
+ if (rt && rt->rt_iif)
+ return rt->rt_iif;
- if (iif)
- return iif;
return skb->skb_iif;
}
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index 2f87c1ba13de..006a7b81d758 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -47,6 +47,9 @@ static inline int rtnl_msg_family(const struct nlmsghdr *nlh)
* @get_num_rx_queues: Function to determine number of receive queues
* to create when creating a new device.
* @get_link_net: Function to get the i/o netns of the device
+ * @get_linkxstats_size: Function to calculate the required room for
+ * dumping device-specific extended link stats
+ * @fill_linkxstats: Function to dump device-specific extended link stats
*/
struct rtnl_link_ops {
struct list_head list;
@@ -95,6 +98,10 @@ struct rtnl_link_ops {
const struct net_device *dev,
const struct net_device *slave_dev);
struct net *(*get_link_net)(const struct net_device *dev);
+ size_t (*get_linkxstats_size)(const struct net_device *dev);
+ int (*fill_linkxstats)(struct sk_buff *skb,
+ const struct net_device *dev,
+ int *prividx);
};
int __rtnl_link_register(struct rtnl_link_ops *ops);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 46e55f0202a6..a1fd76c22a59 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -527,11 +527,27 @@ static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
return q->flags & TCQ_F_CPUSTATS;
}
+static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
+ __u64 bytes, __u32 packets)
+{
+ bstats->bytes += bytes;
+ bstats->packets += packets;
+}
+
static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
const struct sk_buff *skb)
{
- bstats->bytes += qdisc_pkt_len(skb);
- bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
+ _bstats_update(bstats,
+ qdisc_pkt_len(skb),
+ skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1);
+}
+
+static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
+ __u64 bytes, __u32 packets)
+{
+ u64_stats_update_begin(&bstats->syncp);
+ _bstats_update(&bstats->bstats, bytes, packets);
+ u64_stats_update_end(&bstats->syncp);
}
static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 03fb33efcae2..b392ac8382f2 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -116,6 +116,22 @@ extern struct percpu_counter sctp_sockets_allocated;
int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *);
+int sctp_transport_walk_start(struct rhashtable_iter *iter);
+void sctp_transport_walk_stop(struct rhashtable_iter *iter);
+struct sctp_transport *sctp_transport_get_next(struct net *net,
+ struct rhashtable_iter *iter);
+struct sctp_transport *sctp_transport_get_idx(struct net *net,
+ struct rhashtable_iter *iter, int pos);
+int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
+ struct net *net,
+ const union sctp_addr *laddr,
+ const union sctp_addr *paddr, void *p);
+int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
+ struct net *net, int pos, void *p);
+int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), void *p);
+int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
+ struct sctp_info *info);
+
/*
* sctp/primitive.c
*/
@@ -189,10 +205,9 @@ extern int sysctl_sctp_wmem[3];
*/
/* SCTP SNMP MIB stats handlers */
-#define SCTP_INC_STATS(net, field) SNMP_INC_STATS((net)->sctp.sctp_statistics, field)
-#define SCTP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->sctp.sctp_statistics, field)
-#define SCTP_INC_STATS_USER(net, field) SNMP_INC_STATS_USER((net)->sctp.sctp_statistics, field)
-#define SCTP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->sctp.sctp_statistics, field)
+#define SCTP_INC_STATS(net, field) SNMP_INC_STATS((net)->sctp.sctp_statistics, field)
+#define __SCTP_INC_STATS(net, field) __SNMP_INC_STATS((net)->sctp.sctp_statistics, field)
+#define SCTP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->sctp.sctp_statistics, field)
/* sctp mib definitions */
enum {
@@ -359,21 +374,6 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp);
#define sctp_skb_for_each(pos, head, tmp) \
skb_queue_walk_safe(head, pos, tmp)
-/* A helper to append an entire skb list (list) to another (head). */
-static inline void sctp_skb_list_tail(struct sk_buff_head *list,
- struct sk_buff_head *head)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&head->lock, flags);
- spin_lock(&list->lock);
-
- skb_queue_splice_tail_init(list, head);
-
- spin_unlock(&list->lock);
- spin_unlock_irqrestore(&head->lock, flags);
-}
-
/**
* sctp_list_dequeue - remove from the head of the queue
* @list: list to dequeue from
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 5a404c354f4c..16b013a6191c 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -210,14 +210,15 @@ struct sctp_sock {
int user_frag;
__u32 autoclose;
- __u8 nodelay;
- __u8 disable_fragments;
- __u8 v4mapped;
- __u8 frag_interleave;
__u32 adaptation_ind;
__u32 pd_point;
- __u8 recvrcvinfo;
- __u8 recvnxtinfo;
+ __u16 nodelay:1,
+ disable_fragments:1,
+ v4mapped:1,
+ frag_interleave:1,
+ recvrcvinfo:1,
+ recvnxtinfo:1,
+ data_ready_signalled:1;
atomic_t pd_mode;
/* Receive to here while partial delivery is in effect. */
diff --git a/include/net/snmp.h b/include/net/snmp.h
index 35512ac6dcfb..c9228ad7ee91 100644
--- a/include/net/snmp.h
+++ b/include/net/snmp.h
@@ -123,12 +123,9 @@ struct linux_xfrm_mib {
#define DECLARE_SNMP_STAT(type, name) \
extern __typeof__(type) __percpu *name
-#define SNMP_INC_STATS_BH(mib, field) \
+#define __SNMP_INC_STATS(mib, field) \
__this_cpu_inc(mib->mibs[field])
-#define SNMP_INC_STATS_USER(mib, field) \
- this_cpu_inc(mib->mibs[field])
-
#define SNMP_INC_STATS_ATOMIC_LONG(mib, field) \
atomic_long_inc(&mib->mibs[field])
@@ -138,12 +135,9 @@ struct linux_xfrm_mib {
#define SNMP_DEC_STATS(mib, field) \
this_cpu_dec(mib->mibs[field])
-#define SNMP_ADD_STATS_BH(mib, field, addend) \
+#define __SNMP_ADD_STATS(mib, field, addend) \
__this_cpu_add(mib->mibs[field], addend)
-#define SNMP_ADD_STATS_USER(mib, field, addend) \
- this_cpu_add(mib->mibs[field], addend)
-
#define SNMP_ADD_STATS(mib, field, addend) \
this_cpu_add(mib->mibs[field], addend)
#define SNMP_UPD_PO_STATS(mib, basefield, addend) \
@@ -152,7 +146,7 @@ struct linux_xfrm_mib {
this_cpu_inc(ptr[basefield##PKTS]); \
this_cpu_add(ptr[basefield##OCTETS], addend); \
} while (0)
-#define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \
+#define __SNMP_UPD_PO_STATS(mib, basefield, addend) \
do { \
__typeof__((mib->mibs) + 0) ptr = mib->mibs; \
__this_cpu_inc(ptr[basefield##PKTS]); \
@@ -162,7 +156,7 @@ struct linux_xfrm_mib {
#if BITS_PER_LONG==32
-#define SNMP_ADD_STATS64_BH(mib, field, addend) \
+#define __SNMP_ADD_STATS64(mib, field, addend) \
do { \
__typeof__(*mib) *ptr = raw_cpu_ptr(mib); \
u64_stats_update_begin(&ptr->syncp); \
@@ -170,20 +164,16 @@ struct linux_xfrm_mib {
u64_stats_update_end(&ptr->syncp); \
} while (0)
-#define SNMP_ADD_STATS64_USER(mib, field, addend) \
+#define SNMP_ADD_STATS64(mib, field, addend) \
do { \
local_bh_disable(); \
- SNMP_ADD_STATS64_BH(mib, field, addend); \
- local_bh_enable(); \
+ __SNMP_ADD_STATS64(mib, field, addend); \
+ local_bh_enable(); \
} while (0)
-#define SNMP_ADD_STATS64(mib, field, addend) \
- SNMP_ADD_STATS64_USER(mib, field, addend)
-
-#define SNMP_INC_STATS64_BH(mib, field) SNMP_ADD_STATS64_BH(mib, field, 1)
-#define SNMP_INC_STATS64_USER(mib, field) SNMP_ADD_STATS64_USER(mib, field, 1)
+#define __SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1)
#define SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1)
-#define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend) \
+#define __SNMP_UPD_PO_STATS64(mib, basefield, addend) \
do { \
__typeof__(*mib) *ptr; \
ptr = raw_cpu_ptr((mib)); \
@@ -195,19 +185,17 @@ struct linux_xfrm_mib {
#define SNMP_UPD_PO_STATS64(mib, basefield, addend) \
do { \
local_bh_disable(); \
- SNMP_UPD_PO_STATS64_BH(mib, basefield, addend); \
- local_bh_enable(); \
+ __SNMP_UPD_PO_STATS64(mib, basefield, addend); \
+ local_bh_enable(); \
} while (0)
#else
-#define SNMP_INC_STATS64_BH(mib, field) SNMP_INC_STATS_BH(mib, field)
-#define SNMP_INC_STATS64_USER(mib, field) SNMP_INC_STATS_USER(mib, field)
+#define __SNMP_INC_STATS64(mib, field) __SNMP_INC_STATS(mib, field)
#define SNMP_INC_STATS64(mib, field) SNMP_INC_STATS(mib, field)
#define SNMP_DEC_STATS64(mib, field) SNMP_DEC_STATS(mib, field)
-#define SNMP_ADD_STATS64_BH(mib, field, addend) SNMP_ADD_STATS_BH(mib, field, addend)
-#define SNMP_ADD_STATS64_USER(mib, field, addend) SNMP_ADD_STATS_USER(mib, field, addend)
+#define __SNMP_ADD_STATS64(mib, field, addend) __SNMP_ADD_STATS(mib, field, addend)
#define SNMP_ADD_STATS64(mib, field, addend) SNMP_ADD_STATS(mib, field, addend)
#define SNMP_UPD_PO_STATS64(mib, basefield, addend) SNMP_UPD_PO_STATS(mib, basefield, addend)
-#define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend) SNMP_UPD_PO_STATS_BH(mib, basefield, addend)
+#define __SNMP_UPD_PO_STATS64(mib, basefield, addend) __SNMP_UPD_PO_STATS(mib, basefield, addend)
#endif
#endif
diff --git a/include/net/sock.h b/include/net/sock.h
index 121ffc115c4f..649d2a8c17fc 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -178,7 +178,7 @@ struct sock_common {
int skc_bound_dev_if;
union {
struct hlist_node skc_bind_node;
- struct hlist_nulls_node skc_portaddr_node;
+ struct hlist_node skc_portaddr_node;
};
struct proto *skc_prot;
possible_net_t skc_net;
@@ -382,8 +382,13 @@ struct sock {
atomic_t sk_omem_alloc;
int sk_sndbuf;
struct sk_buff_head sk_write_queue;
+
+ /*
+ * Because of non atomicity rules, all
+ * changes are protected by socket lock.
+ */
kmemcheck_bitfield_begin(flags);
- unsigned int sk_shutdown : 2,
+ unsigned int sk_padding : 2,
sk_no_check_tx : 1,
sk_no_check_rx : 1,
sk_userlocks : 4,
@@ -391,6 +396,7 @@ struct sock {
sk_type : 16;
#define SK_PROTOCOL_MAX U8_MAX
kmemcheck_bitfield_end(flags);
+
int sk_wmem_queued;
gfp_t sk_allocation;
u32 sk_pacing_rate; /* bytes per second */
@@ -418,6 +424,7 @@ struct sock {
struct timer_list sk_timer;
ktime_t sk_stamp;
u16 sk_tsflags;
+ u8 sk_shutdown;
u32 sk_tskey;
struct socket *sk_socket;
void *sk_user_data;
@@ -438,6 +445,7 @@ struct sock {
struct sk_buff *skb);
void (*sk_destruct)(struct sock *sk);
struct sock_reuseport __rcu *sk_reuseport_cb;
+ struct rcu_head sk_rcu;
};
#define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data)))
@@ -456,28 +464,32 @@ struct sock {
#define SK_CAN_REUSE 1
#define SK_FORCE_REUSE 2
+int sk_set_peek_off(struct sock *sk, int val);
+
static inline int sk_peek_offset(struct sock *sk, int flags)
{
- if ((flags & MSG_PEEK) && (sk->sk_peek_off >= 0))
- return sk->sk_peek_off;
- else
- return 0;
+ if (unlikely(flags & MSG_PEEK)) {
+ s32 off = READ_ONCE(sk->sk_peek_off);
+ if (off >= 0)
+ return off;
+ }
+
+ return 0;
}
static inline void sk_peek_offset_bwd(struct sock *sk, int val)
{
- if (sk->sk_peek_off >= 0) {
- if (sk->sk_peek_off >= val)
- sk->sk_peek_off -= val;
- else
- sk->sk_peek_off = 0;
+ s32 off = READ_ONCE(sk->sk_peek_off);
+
+ if (unlikely(off >= 0)) {
+ off = max_t(s32, off - val, 0);
+ WRITE_ONCE(sk->sk_peek_off, off);
}
}
static inline void sk_peek_offset_fwd(struct sock *sk, int val)
{
- if (sk->sk_peek_off >= 0)
- sk->sk_peek_off += val;
+ sk_peek_offset_bwd(sk, -val);
}
/*
@@ -564,7 +576,7 @@ static inline bool __sk_del_node_init(struct sock *sk)
modifications.
*/
-static inline void sock_hold(struct sock *sk)
+static __always_inline void sock_hold(struct sock *sk)
{
atomic_inc(&sk->sk_refcnt);
}
@@ -572,7 +584,7 @@ static inline void sock_hold(struct sock *sk)
/* Ungrab socket in the context, which assumes that socket refcnt
cannot hit zero, f.e. it is true in context of any socketcall.
*/
-static inline void __sock_put(struct sock *sk)
+static __always_inline void __sock_put(struct sock *sk)
{
atomic_dec(&sk->sk_refcnt);
}
@@ -625,7 +637,11 @@ static inline void sk_add_node(struct sock *sk, struct hlist_head *list)
static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
{
sock_hold(sk);
- hlist_add_head_rcu(&sk->sk_node, list);
+ if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
+ sk->sk_family == AF_INET6)
+ hlist_add_tail_rcu(&sk->sk_node, list);
+ else
+ hlist_add_head_rcu(&sk->sk_node, list);
}
static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
@@ -673,18 +689,18 @@ static inline void sk_add_bind_node(struct sock *sk,
hlist_for_each_entry(__sk, list, sk_bind_node)
/**
- * sk_nulls_for_each_entry_offset - iterate over a list at a given struct offset
+ * sk_for_each_entry_offset_rcu - iterate over a list at a given struct offset
* @tpos: the type * to use as a loop cursor.
* @pos: the &struct hlist_node to use as a loop cursor.
* @head: the head for your list.
* @offset: offset of hlist_node within the struct.
*
*/
-#define sk_nulls_for_each_entry_offset(tpos, pos, head, offset) \
- for (pos = (head)->first; \
- (!is_a_nulls(pos)) && \
+#define sk_for_each_entry_offset_rcu(tpos, pos, head, offset) \
+ for (pos = rcu_dereference((head)->first); \
+ pos != NULL && \
({ tpos = (typeof(*tpos) *)((void *)pos - offset); 1;}); \
- pos = pos->next)
+ pos = rcu_dereference(pos->next))
static inline struct user_namespace *sk_user_ns(struct sock *sk)
{
@@ -724,6 +740,7 @@ enum sock_flags {
*/
SOCK_FILTER_LOCKED, /* Filter cannot be changed anymore */
SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */
+ SOCK_RCU_FREE, /* wait rcu grace period in sk_destruct() */
};
#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
@@ -916,6 +933,17 @@ void sk_stream_kill_queues(struct sock *sk);
void sk_set_memalloc(struct sock *sk);
void sk_clear_memalloc(struct sock *sk);
+void __sk_flush_backlog(struct sock *sk);
+
+static inline bool sk_flush_backlog(struct sock *sk)
+{
+ if (unlikely(READ_ONCE(sk->sk_backlog.tail))) {
+ __sk_flush_backlog(sk);
+ return true;
+ }
+ return false;
+}
+
int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb);
struct request_sock_ops;
@@ -1314,24 +1342,14 @@ static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
__kfree_skb(skb);
}
-/* Used by processes to "lock" a socket state, so that
- * interrupts and bottom half handlers won't change it
- * from under us. It essentially blocks any incoming
- * packets, so that we won't get any new data or any
- * packets that change the state of the socket.
- *
- * While locked, BH processing will add new packets to
- * the backlog queue. This queue is processed by the
- * owner of the socket lock right before it is released.
- *
- * Since ~2.3.5 it is also exclusive sleep lock serializing
- * accesses from user process context.
- */
-#define sock_owned_by_user(sk) ((sk)->sk_lock.owned)
-
static inline void sock_release_ownership(struct sock *sk)
{
- sk->sk_lock.owned = 0;
+ if (sk->sk_lock.owned) {
+ sk->sk_lock.owned = 0;
+
+ /* The sk_lock has mutex_unlock() semantics: */
+ mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
+ }
}
/*
@@ -1353,6 +1371,16 @@ do { \
lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
} while (0)
+#ifdef CONFIG_LOCKDEP
+static inline bool lockdep_sock_is_held(const struct sock *csk)
+{
+ struct sock *sk = (struct sock *)csk;
+
+ return lockdep_is_held(&sk->sk_lock) ||
+ lockdep_is_held(&sk->sk_lock.slock);
+}
+#endif
+
void lock_sock_nested(struct sock *sk, int subclass);
static inline void lock_sock(struct sock *sk)
@@ -1386,6 +1414,40 @@ static inline void unlock_sock_fast(struct sock *sk, bool slow)
spin_unlock_bh(&sk->sk_lock.slock);
}
+/* Used by processes to "lock" a socket state, so that
+ * interrupts and bottom half handlers won't change it
+ * from under us. It essentially blocks any incoming
+ * packets, so that we won't get any new data or any
+ * packets that change the state of the socket.
+ *
+ * While locked, BH processing will add new packets to
+ * the backlog queue. This queue is processed by the
+ * owner of the socket lock right before it is released.
+ *
+ * Since ~2.3.5 it is also exclusive sleep lock serializing
+ * accesses from user process context.
+ */
+
+static inline void sock_owned_by_me(const struct sock *sk)
+{
+#ifdef CONFIG_LOCKDEP
+ WARN_ON_ONCE(!lockdep_sock_is_held(sk) && debug_locks);
+#endif
+}
+
+static inline bool sock_owned_by_user(const struct sock *sk)
+{
+ sock_owned_by_me(sk);
+ return sk->sk_lock.owned;
+}
+
+/* no reclassification while locks are held */
+static inline bool sock_allow_reclassification(const struct sock *csk)
+{
+ struct sock *sk = (struct sock *)csk;
+
+ return !sk->sk_lock.owned && !spin_is_locked(&sk->sk_lock.slock);
+}
struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
struct proto *prot, int kern);
@@ -1395,6 +1457,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
gfp_t priority);
+void __sock_wfree(struct sk_buff *skb);
void sock_wfree(struct sk_buff *skb);
void skb_orphan_partial(struct sk_buff *skb);
void sock_rfree(struct sk_buff *skb);
@@ -1422,8 +1485,11 @@ void sk_send_sigurg(struct sock *sk);
struct sockcm_cookie {
u32 mark;
+ u16 tsflags;
};
+int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
+ struct sockcm_cookie *sockc);
int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
struct sockcm_cookie *sockc);
@@ -1588,8 +1654,8 @@ static inline void sk_rethink_txhash(struct sock *sk)
static inline struct dst_entry *
__sk_dst_get(struct sock *sk)
{
- return rcu_dereference_check(sk->sk_dst_cache, sock_owned_by_user(sk) ||
- lockdep_is_held(&sk->sk_lock.slock));
+ return rcu_dereference_check(sk->sk_dst_cache,
+ lockdep_sock_is_held(sk));
}
static inline struct dst_entry *
@@ -1861,6 +1927,7 @@ void sk_reset_timer(struct sock *sk, struct timer_list *timer,
void sk_stop_timer(struct sock *sk, struct timer_list *timer);
+int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
@@ -1897,11 +1964,19 @@ static inline unsigned long sock_wspace(struct sock *sk)
*/
static inline void sk_set_bit(int nr, struct sock *sk)
{
+ if ((nr == SOCKWQ_ASYNC_NOSPACE || nr == SOCKWQ_ASYNC_WAITDATA) &&
+ !sock_flag(sk, SOCK_FASYNC))
+ return;
+
set_bit(nr, &sk->sk_wq_raw->flags);
}
static inline void sk_clear_bit(int nr, struct sock *sk)
{
+ if ((nr == SOCKWQ_ASYNC_NOSPACE || nr == SOCKWQ_ASYNC_WAITDATA) &&
+ !sock_flag(sk, SOCK_FASYNC))
+ return;
+
clear_bit(nr, &sk->sk_wq_raw->flags);
}
@@ -2011,6 +2086,13 @@ sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb)
SOCK_SKB_CB(skb)->dropcount = atomic_read(&sk->sk_drops);
}
+static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb)
+{
+ int segs = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
+
+ atomic_add(segs, &sk->sk_drops);
+}
+
void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb);
void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
@@ -2058,19 +2140,21 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
sk->sk_stamp = skb->tstamp;
}
-void __sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags);
+void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags);
/**
* sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
* @sk: socket sending this packet
+ * @tsflags: timestamping flags to use
* @tx_flags: completed with instructions for time stamping
*
* Note : callers should take care of initial *tx_flags value (usually 0)
*/
-static inline void sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags)
+static inline void sock_tx_timestamp(const struct sock *sk, __u16 tsflags,
+ __u8 *tx_flags)
{
- if (unlikely(sk->sk_tsflags))
- __sock_tx_timestamp(sk, tx_flags);
+ if (unlikely(tsflags))
+ __sock_tx_timestamp(tsflags, tx_flags);
if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS)))
*tx_flags |= SKBTX_WIFI_STATUS;
}
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index d451122e8404..985619a59323 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -54,6 +54,8 @@ struct switchdev_attr {
struct net_device *orig_dev;
enum switchdev_attr_id id;
u32 flags;
+ void *complete_priv;
+ void (*complete)(struct net_device *dev, int err, void *priv);
union {
struct netdev_phys_item_id ppid; /* PORT_PARENT_ID */
u8 stp_state; /* PORT_STP_STATE */
@@ -75,6 +77,8 @@ struct switchdev_obj {
struct net_device *orig_dev;
enum switchdev_obj_id id;
u32 flags;
+ void *complete_priv;
+ void (*complete)(struct net_device *dev, int err, void *priv);
};
/* SWITCHDEV_OBJ_ID_PORT_VLAN */
@@ -93,7 +97,7 @@ struct switchdev_obj_ipv4_fib {
struct switchdev_obj obj;
u32 dst;
int dst_len;
- struct fib_info fi;
+ struct fib_info *fi;
u8 tos;
u8 type;
u32 nlflags;
diff --git a/include/net/tc_act/tc_mirred.h b/include/net/tc_act/tc_mirred.h
index dae96bae1c19..e891835eb74e 100644
--- a/include/net/tc_act/tc_mirred.h
+++ b/include/net/tc_act/tc_mirred.h
@@ -2,6 +2,7 @@
#define __NET_TC_MIR_H
#include <net/act_api.h>
+#include <linux/tc_act/tc_mirred.h>
struct tcf_mirred {
struct tcf_common common;
@@ -14,4 +15,18 @@ struct tcf_mirred {
#define to_mirred(a) \
container_of(a->priv, struct tcf_mirred, common)
+static inline bool is_tcf_mirred_redirect(const struct tc_action *a)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ if (a->ops && a->ops->type == TCA_ACT_MIRRED)
+ return to_mirred(a)->tcfm_eaction == TCA_EGRESS_REDIR;
+#endif
+ return false;
+}
+
+static inline int tcf_mirred_ifindex(const struct tc_action *a)
+{
+ return to_mirred(a)->tcfm_ifindex;
+}
+
#endif /* __NET_TC_MIR_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 6db10228113f..0bcc70f4e1fb 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -332,9 +332,8 @@ bool tcp_check_oom(struct sock *sk, int shift);
extern struct proto tcp_prot;
#define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field)
-#define TCP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field)
+#define __TCP_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.tcp_statistics, field)
#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
-#define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
#define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
void tcp_tasklet_init(void);
@@ -452,10 +451,15 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
int tcp_connect(struct sock *sk);
+enum tcp_synack_type {
+ TCP_SYNACK_NORMAL,
+ TCP_SYNACK_FASTOPEN,
+ TCP_SYNACK_COOKIE,
+};
struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
struct request_sock *req,
struct tcp_fastopen_cookie *foc,
- bool attach_req);
+ enum tcp_synack_type synack_type);
int tcp_disconnect(struct sock *sk, int flags);
void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
@@ -533,8 +537,8 @@ __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
int nonagle);
bool tcp_may_send_now(struct sock *sk);
-int __tcp_retransmit_skb(struct sock *, struct sk_buff *);
-int tcp_retransmit_skb(struct sock *, struct sk_buff *);
+int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
+int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
void tcp_retransmit_timer(struct sock *sk);
void tcp_xmit_retransmit_queue(struct sock *);
void tcp_simple_retransmit(struct sock *);
@@ -756,14 +760,21 @@ struct tcp_skb_cb {
TCPCB_REPAIRED)
__u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
- /* 1 byte hole */
+ __u8 txstamp_ack:1, /* Record TX timestamp for ack? */
+ eor:1, /* Is skb MSG_EOR marked? */
+ unused:6;
__u32 ack_seq; /* Sequence number ACK'd */
union {
- struct inet_skb_parm h4;
+ struct {
+ /* There is space for up to 20 bytes */
+ } tx; /* only used for outgoing skbs */
+ union {
+ struct inet_skb_parm h4;
#if IS_ENABLED(CONFIG_IPV6)
- struct inet6_skb_parm h6;
+ struct inet6_skb_parm h6;
#endif
- } header; /* For incoming frames */
+ } header; /* For incoming skbs */
+ };
};
#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
@@ -775,7 +786,9 @@ struct tcp_skb_cb {
*/
static inline int tcp_v6_iif(const struct sk_buff *skb)
{
- return TCP_SKB_CB(skb)->header.h6.iif;
+ bool l3_slave = skb_l3mdev_slave(TCP_SKB_CB(skb)->header.h6.flags);
+
+ return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
}
#endif
@@ -803,6 +816,11 @@ static inline int tcp_skb_mss(const struct sk_buff *skb)
return TCP_SKB_CB(skb)->tcp_gso_size;
}
+static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
+{
+ return likely(!TCP_SKB_CB(skb)->eor);
+}
+
/* Events passed to congestion control interface */
enum tcp_ca_event {
CA_EVENT_TX_START, /* first transmit when no packets in flight */
@@ -838,6 +856,11 @@ enum tcp_ca_ack_event_flags {
union tcp_cc_info;
+struct ack_sample {
+ u32 pkts_acked;
+ s32 rtt_us;
+};
+
struct tcp_congestion_ops {
struct list_head list;
u32 key;
@@ -861,7 +884,7 @@ struct tcp_congestion_ops {
/* new value of cwnd after loss (optional) */
u32 (*undo_cwnd)(struct sock *sk);
/* hook for packet ack accounting (optional) */
- void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us);
+ void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
/* get info for inet_diag (optional) */
size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
union tcp_cc_info *info);
@@ -1041,17 +1064,6 @@ static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
return 3;
}
-/* Slow start with delack produces 3 packets of burst, so that
- * it is safe "de facto". This will be the default - same as
- * the default reordering threshold - but if reordering increases,
- * we must be able to allow cwnd to burst at least this much in order
- * to not pull it back when holes are filled.
- */
-static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp)
-{
- return tp->reordering;
-}
-
/* Returns end sequence number of the receiver's advertised window */
static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
{
@@ -1303,10 +1315,10 @@ bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
static inline void tcp_mib_init(struct net *net)
{
/* See RFC 2012 */
- TCP_ADD_STATS_USER(net, TCP_MIB_RTOALGORITHM, 1);
- TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
- TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
- TCP_ADD_STATS_USER(net, TCP_MIB_MAXCONN, -1);
+ TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1);
+ TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
+ TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
+ TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1);
}
/* from STCP */
@@ -1740,7 +1752,7 @@ struct tcp_request_sock_ops {
int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
struct flowi *fl, struct request_sock *req,
struct tcp_fastopen_cookie *foc,
- bool attach_req);
+ enum tcp_synack_type synack_type);
};
#ifdef CONFIG_SYN_COOKIES
@@ -1749,7 +1761,7 @@ static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
__u16 *mss)
{
tcp_synq_overflow(sk);
- NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
+ __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
return ops->cookie_init_seq(skb, mss);
}
#else
@@ -1848,4 +1860,17 @@ static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
tp->data_segs_in += segs_in;
}
+/*
+ * TCP listen path runs lockless.
+ * We forced "struct sock" to be const qualified to make sure
+ * we don't modify one of its field by mistake.
+ * Here, we increment sk_drops which is an atomic_t, so we can safely
+ * make sock writable again.
+ */
+static inline void tcp_listendrop(const struct sock *sk)
+{
+ atomic_inc(&((struct sock *)sk)->sk_drops);
+ __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
+}
+
#endif /* _TCP_H */
diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h
index b927413dde86..276f9760ab56 100644
--- a/include/net/transp_v6.h
+++ b/include/net/transp_v6.h
@@ -41,8 +41,8 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
struct sk_buff *skb);
int ip6_datagram_send_ctl(struct net *net, struct sock *sk, struct msghdr *msg,
- struct flowi6 *fl6, struct ipv6_txoptions *opt,
- int *hlimit, int *tclass, int *dontfrag);
+ struct flowi6 *fl6, struct ipcm6_cookie *ipc6,
+ struct sockcm_cookie *sockc);
void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
__u16 srcp, __u16 destp, int bucket);
diff --git a/include/net/udp.h b/include/net/udp.h
index 92927f729ac8..ae07f375370d 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -59,7 +59,7 @@ struct udp_skb_cb {
* @lock: spinlock protecting changes to head/count
*/
struct udp_hslot {
- struct hlist_nulls_head head;
+ struct hlist_head head;
int count;
spinlock_t lock;
} __attribute__((aligned(2 * sizeof(long))));
@@ -158,9 +158,21 @@ static inline __sum16 udp_v4_check(int len, __be32 saddr,
void udp_set_csum(bool nocheck, struct sk_buff *skb,
__be32 saddr, __be32 daddr, int len);
+static inline void udp_csum_pull_header(struct sk_buff *skb)
+{
+ if (skb->ip_summed == CHECKSUM_NONE)
+ skb->csum = csum_partial(udp_hdr(skb), sizeof(struct udphdr),
+ skb->csum);
+ skb_pull_rcsum(skb, sizeof(struct udphdr));
+ UDP_SKB_CB(skb)->cscov -= sizeof(struct udphdr);
+}
+
+typedef struct sock *(*udp_lookup_t)(struct sk_buff *skb, __be16 sport,
+ __be16 dport);
+
struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
- struct udphdr *uh);
-int udp_gro_complete(struct sk_buff *skb, int nhoff);
+ struct udphdr *uh, udp_lookup_t lookup);
+int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
{
@@ -260,6 +272,8 @@ struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
__be32 daddr, __be16 dport, int dif,
struct udp_table *tbl, struct sk_buff *skb);
+struct sock *udp4_lib_lookup_skb(struct sk_buff *skb,
+ __be16 sport, __be16 dport);
struct sock *udp6_lib_lookup(struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, __be16 dport,
@@ -269,36 +283,38 @@ struct sock *__udp6_lib_lookup(struct net *net,
const struct in6_addr *daddr, __be16 dport,
int dif, struct udp_table *tbl,
struct sk_buff *skb);
+struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
+ __be16 sport, __be16 dport);
/*
* SNMP statistics for UDP and UDP-Lite
*/
-#define UDP_INC_STATS_USER(net, field, is_udplite) do { \
- if (is_udplite) SNMP_INC_STATS_USER((net)->mib.udplite_statistics, field); \
- else SNMP_INC_STATS_USER((net)->mib.udp_statistics, field); } while(0)
-#define UDP_INC_STATS_BH(net, field, is_udplite) do { \
- if (is_udplite) SNMP_INC_STATS_BH((net)->mib.udplite_statistics, field); \
- else SNMP_INC_STATS_BH((net)->mib.udp_statistics, field); } while(0)
-
-#define UDP6_INC_STATS_BH(net, field, is_udplite) do { \
- if (is_udplite) SNMP_INC_STATS_BH((net)->mib.udplite_stats_in6, field);\
- else SNMP_INC_STATS_BH((net)->mib.udp_stats_in6, field); \
+#define UDP_INC_STATS(net, field, is_udplite) do { \
+ if (is_udplite) SNMP_INC_STATS((net)->mib.udplite_statistics, field); \
+ else SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0)
+#define __UDP_INC_STATS(net, field, is_udplite) do { \
+ if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_statistics, field); \
+ else __SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0)
+
+#define __UDP6_INC_STATS(net, field, is_udplite) do { \
+ if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_stats_in6, field);\
+ else __SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \
} while(0)
-#define UDP6_INC_STATS_USER(net, field, __lite) do { \
- if (__lite) SNMP_INC_STATS_USER((net)->mib.udplite_stats_in6, field); \
- else SNMP_INC_STATS_USER((net)->mib.udp_stats_in6, field); \
+#define UDP6_INC_STATS(net, field, __lite) do { \
+ if (__lite) SNMP_INC_STATS((net)->mib.udplite_stats_in6, field); \
+ else SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \
} while(0)
#if IS_ENABLED(CONFIG_IPV6)
-#define UDPX_INC_STATS_BH(sk, field) \
+#define __UDPX_INC_STATS(sk, field) \
do { \
if ((sk)->sk_family == AF_INET) \
- UDP_INC_STATS_BH(sock_net(sk), field, 0); \
+ __UDP_INC_STATS(sock_net(sk), field, 0); \
else \
- UDP6_INC_STATS_BH(sock_net(sk), field, 0); \
+ __UDP6_INC_STATS(sock_net(sk), field, 0); \
} while (0)
#else
-#define UDPX_INC_STATS_BH(sk, field) UDP_INC_STATS_BH(sock_net(sk), field, 0)
+#define __UDPX_INC_STATS(sk, field) __UDP_INC_STATS(sock_net(sk), field, 0)
#endif
/* /proc */
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index b83114077cee..9d14f707e534 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -64,6 +64,11 @@ static inline int udp_sock_create(struct net *net,
typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb);
typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk);
+typedef struct sk_buff **(*udp_tunnel_gro_receive_t)(struct sock *sk,
+ struct sk_buff **head,
+ struct sk_buff *skb);
+typedef int (*udp_tunnel_gro_complete_t)(struct sock *sk, struct sk_buff *skb,
+ int nhoff);
struct udp_tunnel_sock_cfg {
void *sk_user_data; /* user data used by encap_rcv call back */
@@ -71,6 +76,8 @@ struct udp_tunnel_sock_cfg {
__u8 encap_type;
udp_tunnel_encap_rcv_t encap_rcv;
udp_tunnel_encap_destroy_t encap_destroy;
+ udp_tunnel_gro_receive_t gro_receive;
+ udp_tunnel_gro_complete_t gro_complete;
};
/* Setup the given (UDP) sock to receive UDP encapsulated packets */
@@ -98,23 +105,13 @@ struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family,
__be16 flags, __be64 tunnel_id,
int md_size);
-static inline struct sk_buff *udp_tunnel_handle_offloads(struct sk_buff *skb,
- bool udp_csum)
+static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum)
{
int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
return iptunnel_handle_offloads(skb, type);
}
-static inline void udp_tunnel_gro_complete(struct sk_buff *skb, int nhoff)
-{
- struct udphdr *uh;
-
- uh = (struct udphdr *)(skb->data + nhoff - sizeof(struct udphdr));
- skb_shinfo(skb)->gso_type |= uh->check ?
- SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
-}
-
static inline void udp_tunnel_encap_enable(struct socket *sock)
{
#if IS_ENABLED(CONFIG_IPV6)
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 73ed2e951c02..b8803165df91 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -119,6 +119,64 @@ struct vxlanhdr_gbp {
#define VXLAN_GBP_POLICY_APPLIED (BIT(3) << 16)
#define VXLAN_GBP_ID_MASK (0xFFFF)
+/*
+ * VXLAN Generic Protocol Extension (VXLAN_F_GPE):
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |R|R|Ver|I|P|R|O| Reserved |Next Protocol |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | VXLAN Network Identifier (VNI) | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Ver = Version. Indicates VXLAN GPE protocol version.
+ *
+ * P = Next Protocol Bit. The P bit is set to indicate that the
+ * Next Protocol field is present.
+ *
+ * O = OAM Flag Bit. The O bit is set to indicate that the packet
+ * is an OAM packet.
+ *
+ * Next Protocol = This 8 bit field indicates the protocol header
+ * immediately following the VXLAN GPE header.
+ *
+ * https://tools.ietf.org/html/draft-ietf-nvo3-vxlan-gpe-01
+ */
+
+struct vxlanhdr_gpe {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 oam_flag:1,
+ reserved_flags1:1,
+ np_applied:1,
+ instance_applied:1,
+ version:2,
+reserved_flags2:2;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u8 reserved_flags2:2,
+ version:2,
+ instance_applied:1,
+ np_applied:1,
+ reserved_flags1:1,
+ oam_flag:1;
+#endif
+ u8 reserved_flags3;
+ u8 reserved_flags4;
+ u8 next_protocol;
+ __be32 vx_vni;
+};
+
+/* VXLAN-GPE header flags. */
+#define VXLAN_HF_VER cpu_to_be32(BIT(29) | BIT(28))
+#define VXLAN_HF_NP cpu_to_be32(BIT(26))
+#define VXLAN_HF_OAM cpu_to_be32(BIT(24))
+
+#define VXLAN_GPE_USED_BITS (VXLAN_HF_VER | VXLAN_HF_NP | VXLAN_HF_OAM | \
+ cpu_to_be32(0xff))
+
+/* VXLAN-GPE header Next Protocol. */
+#define VXLAN_GPE_NP_IPV4 0x01
+#define VXLAN_GPE_NP_IPV6 0x02
+#define VXLAN_GPE_NP_ETHERNET 0x03
+#define VXLAN_GPE_NP_NSH 0x04
+
struct vxlan_metadata {
u32 gbp;
};
@@ -126,12 +184,9 @@ struct vxlan_metadata {
/* per UDP socket information */
struct vxlan_sock {
struct hlist_node hlist;
- struct work_struct del_work;
struct socket *sock;
- struct rcu_head rcu;
struct hlist_head vni_list[VNI_HASH_SIZE];
atomic_t refcnt;
- struct udp_offload udp_offloads;
u32 flags;
};
@@ -206,16 +261,26 @@ struct vxlan_dev {
#define VXLAN_F_GBP 0x800
#define VXLAN_F_REMCSUM_NOPARTIAL 0x1000
#define VXLAN_F_COLLECT_METADATA 0x2000
+#define VXLAN_F_GPE 0x4000
/* Flags that are used in the receive path. These flags must match in
* order for a socket to be shareable
*/
#define VXLAN_F_RCV_FLAGS (VXLAN_F_GBP | \
+ VXLAN_F_GPE | \
VXLAN_F_UDP_ZERO_CSUM6_RX | \
VXLAN_F_REMCSUM_RX | \
VXLAN_F_REMCSUM_NOPARTIAL | \
VXLAN_F_COLLECT_METADATA)
+/* Flags that can be set together with VXLAN_F_GPE. */
+#define VXLAN_F_ALLOWED_GPE (VXLAN_F_GPE | \
+ VXLAN_F_IPV6 | \
+ VXLAN_F_UDP_ZERO_CSUM_TX | \
+ VXLAN_F_UDP_ZERO_CSUM6_TX | \
+ VXLAN_F_UDP_ZERO_CSUM6_RX | \
+ VXLAN_F_COLLECT_METADATA)
+
struct net_device *vxlan_dev_create(struct net *net, const char *name,
u8 name_assign_type, struct vxlan_config *conf);
@@ -252,7 +317,9 @@ static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
(skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
skb->inner_protocol != htons(ETH_P_TEB) ||
(skb_inner_mac_header(skb) - skb_transport_header(skb) !=
- sizeof(struct udphdr) + sizeof(struct vxlanhdr))))
+ sizeof(struct udphdr) + sizeof(struct vxlanhdr)) ||
+ (skb->ip_summed != CHECKSUM_NONE &&
+ !can_checksum_protocol(features, inner_eth_hdr(skb)->h_proto))))
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
return features;
@@ -325,13 +392,11 @@ static inline __be32 vxlan_compute_rco(unsigned int start, unsigned int offset)
return vni_field;
}
-#if IS_ENABLED(CONFIG_VXLAN)
-void vxlan_get_rx_port(struct net_device *netdev);
-#else
static inline void vxlan_get_rx_port(struct net_device *netdev)
{
+ ASSERT_RTNL();
+ call_netdevice_notifiers(NETDEV_OFFLOAD_PUSH_VXLAN, netdev);
}
-#endif
static inline unsigned short vxlan_get_sk_family(struct vxlan_sock *vs)
{
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index d6f6e5006ee9..adfebd6f243c 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -45,12 +45,8 @@
#ifdef CONFIG_XFRM_STATISTICS
#define XFRM_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.xfrm_statistics, field)
-#define XFRM_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.xfrm_statistics, field)
-#define XFRM_INC_STATS_USER(net, field) SNMP_INC_STATS_USER((net)-mib.xfrm_statistics, field)
#else
#define XFRM_INC_STATS(net, field) ((void)(net))
-#define XFRM_INC_STATS_BH(net, field) ((void)(net))
-#define XFRM_INC_STATS_USER(net, field) ((void)(net))
#endif
diff --git a/include/rdma/ib.h b/include/rdma/ib.h
index cf8f9e700e48..a6b93706b0fc 100644
--- a/include/rdma/ib.h
+++ b/include/rdma/ib.h
@@ -34,6 +34,7 @@
#define _RDMA_IB_H
#include <linux/types.h>
+#include <linux/sched.h>
struct ib_addr {
union {
@@ -86,4 +87,19 @@ struct sockaddr_ib {
__u64 sib_scope_id;
};
+/*
+ * The IB interfaces that use write() as bi-directional ioctl() are
+ * fundamentally unsafe, since there are lots of ways to trigger "write()"
+ * calls from various contexts with elevated privileges. That includes the
+ * traditional suid executable error message writes, but also various kernel
+ * interfaces that can write to file descriptors.
+ *
+ * This function provides protection for the legacy API by restricting the
+ * calling context.
+ */
+static inline bool ib_safe_file_access(struct file *filp)
+{
+ return filp->f_cred == current_cred() && segment_eq(get_fs(), USER_DS);
+}
+
#endif /* _RDMA_IB_H */
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index 37dd534cbeab..c8a773ffe23b 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -239,12 +239,15 @@ struct ib_vendor_mad {
#define IB_MGMT_CLASSPORTINFO_ATTR_ID cpu_to_be16(0x0001)
+#define IB_CLASS_PORT_INFO_RESP_TIME_MASK 0x1F
+#define IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE 5
+
struct ib_class_port_info {
u8 base_version;
u8 class_version;
__be16 capability_mask;
- u8 reserved[3];
- u8 resp_time_value;
+ /* 27 bits for cap_mask2, 5 bits for resp_time */
+ __be32 cap_mask2_resp_time;
u8 redirect_gid[16];
__be32 redirect_tcslfl;
__be16 redirect_lid;
@@ -259,6 +262,59 @@ struct ib_class_port_info {
__be32 trap_qkey;
};
+/**
+ * ib_get_cpi_resp_time - Returns the resp_time value from
+ * cap_mask2_resp_time in ib_class_port_info.
+ * @cpi: A struct ib_class_port_info mad.
+ */
+static inline u8 ib_get_cpi_resp_time(struct ib_class_port_info *cpi)
+{
+ return (u8)(be32_to_cpu(cpi->cap_mask2_resp_time) &
+ IB_CLASS_PORT_INFO_RESP_TIME_MASK);
+}
+
+/**
+ * ib_set_cpi_resptime - Sets the response time in an
+ * ib_class_port_info mad.
+ * @cpi: A struct ib_class_port_info.
+ * @rtime: The response time to set.
+ */
+static inline void ib_set_cpi_resp_time(struct ib_class_port_info *cpi,
+ u8 rtime)
+{
+ cpi->cap_mask2_resp_time =
+ (cpi->cap_mask2_resp_time &
+ cpu_to_be32(~IB_CLASS_PORT_INFO_RESP_TIME_MASK)) |
+ cpu_to_be32(rtime & IB_CLASS_PORT_INFO_RESP_TIME_MASK);
+}
+
+/**
+ * ib_get_cpi_capmask2 - Returns the capmask2 value from
+ * cap_mask2_resp_time in ib_class_port_info.
+ * @cpi: A struct ib_class_port_info mad.
+ */
+static inline u32 ib_get_cpi_capmask2(struct ib_class_port_info *cpi)
+{
+ return (be32_to_cpu(cpi->cap_mask2_resp_time) >>
+ IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE);
+}
+
+/**
+ * ib_set_cpi_capmask2 - Sets the capmask2 in an
+ * ib_class_port_info mad.
+ * @cpi: A struct ib_class_port_info.
+ * @capmask2: The capmask2 to set.
+ */
+static inline void ib_set_cpi_capmask2(struct ib_class_port_info *cpi,
+ u32 capmask2)
+{
+ cpi->cap_mask2_resp_time =
+ (cpi->cap_mask2_resp_time &
+ cpu_to_be32(IB_CLASS_PORT_INFO_RESP_TIME_MASK)) |
+ cpu_to_be32(capmask2 <<
+ IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE);
+}
+
struct ib_mad_notice_attr {
u8 generic_type;
u8 prod_type_msb;
diff --git a/include/rdma/ib_pack.h b/include/rdma/ib_pack.h
index 0f3daae44bf9..b13419ce99ff 100644
--- a/include/rdma/ib_pack.h
+++ b/include/rdma/ib_pack.h
@@ -103,6 +103,9 @@ enum {
IB_OPCODE_ATOMIC_ACKNOWLEDGE = 0x12,
IB_OPCODE_COMPARE_SWAP = 0x13,
IB_OPCODE_FETCH_ADD = 0x14,
+ /* opcode 0x15 is reserved */
+ IB_OPCODE_SEND_LAST_WITH_INVALIDATE = 0x16,
+ IB_OPCODE_SEND_ONLY_WITH_INVALIDATE = 0x17,
/* real constants follow -- see comment about above IB_OPCODE()
macro for more details */
@@ -129,6 +132,8 @@ enum {
IB_OPCODE(RC, ATOMIC_ACKNOWLEDGE),
IB_OPCODE(RC, COMPARE_SWAP),
IB_OPCODE(RC, FETCH_ADD),
+ IB_OPCODE(RC, SEND_LAST_WITH_INVALIDATE),
+ IB_OPCODE(RC, SEND_ONLY_WITH_INVALIDATE),
/* UC */
IB_OPCODE(UC, SEND_FIRST),
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
index cdc1c81aa275..384041669489 100644
--- a/include/rdma/ib_sa.h
+++ b/include/rdma/ib_sa.h
@@ -94,6 +94,8 @@ enum ib_sa_selector {
IB_SA_BEST = 3
};
+#define IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT BIT(12)
+
/*
* Structures for SA records are named "struct ib_sa_xxx_rec." No
* attempt is made to pack structures to match the physical layout of
@@ -439,4 +441,14 @@ int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
void *context,
struct ib_sa_query **sa_query);
+/* Support get SA ClassPortInfo */
+int ib_sa_classport_info_rec_query(struct ib_sa_client *client,
+ struct ib_device *device, u8 port_num,
+ int timeout_ms, gfp_t gfp_mask,
+ void (*callback)(int status,
+ struct ib_class_port_info *resp,
+ void *context),
+ void *context,
+ struct ib_sa_query **sa_query);
+
#endif /* IB_SA_H */
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index fb2cef4e9747..432bed510369 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -220,6 +220,7 @@ enum ib_device_cap_flags {
IB_DEVICE_ON_DEMAND_PAGING = (1 << 31),
IB_DEVICE_SG_GAPS_REG = (1ULL << 32),
IB_DEVICE_VIRTUAL_FUNCTION = ((u64)1 << 33),
+ IB_DEVICE_RAW_SCATTER_FCS = ((u64)1 << 34),
};
enum ib_signature_prot_cap {
@@ -402,56 +403,55 @@ enum ib_port_speed {
IB_SPEED_EDR = 32
};
-struct ib_protocol_stats {
- /* TBD... */
-};
-
-struct iw_protocol_stats {
- u64 ipInReceives;
- u64 ipInHdrErrors;
- u64 ipInTooBigErrors;
- u64 ipInNoRoutes;
- u64 ipInAddrErrors;
- u64 ipInUnknownProtos;
- u64 ipInTruncatedPkts;
- u64 ipInDiscards;
- u64 ipInDelivers;
- u64 ipOutForwDatagrams;
- u64 ipOutRequests;
- u64 ipOutDiscards;
- u64 ipOutNoRoutes;
- u64 ipReasmTimeout;
- u64 ipReasmReqds;
- u64 ipReasmOKs;
- u64 ipReasmFails;
- u64 ipFragOKs;
- u64 ipFragFails;
- u64 ipFragCreates;
- u64 ipInMcastPkts;
- u64 ipOutMcastPkts;
- u64 ipInBcastPkts;
- u64 ipOutBcastPkts;
-
- u64 tcpRtoAlgorithm;
- u64 tcpRtoMin;
- u64 tcpRtoMax;
- u64 tcpMaxConn;
- u64 tcpActiveOpens;
- u64 tcpPassiveOpens;
- u64 tcpAttemptFails;
- u64 tcpEstabResets;
- u64 tcpCurrEstab;
- u64 tcpInSegs;
- u64 tcpOutSegs;
- u64 tcpRetransSegs;
- u64 tcpInErrs;
- u64 tcpOutRsts;
-};
-
-union rdma_protocol_stats {
- struct ib_protocol_stats ib;
- struct iw_protocol_stats iw;
-};
+/**
+ * struct rdma_hw_stats
+ * @timestamp - Used by the core code to track when the last update was
+ * @lifespan - Used by the core code to determine how old the counters
+ * should be before being updated again. Stored in jiffies, defaults
+ * to 10 milliseconds, drivers can override the default be specifying
+ * their own value during their allocation routine.
+ * @name - Array of pointers to static names used for the counters in
+ * directory.
+ * @num_counters - How many hardware counters there are. If name is
+ * shorter than this number, a kernel oops will result. Driver authors
+ * are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
+ * in their code to prevent this.
+ * @value - Array of u64 counters that are accessed by the sysfs code and
+ * filled in by the drivers get_stats routine
+ */
+struct rdma_hw_stats {
+ unsigned long timestamp;
+ unsigned long lifespan;
+ const char * const *names;
+ int num_counters;
+ u64 value[];
+};
+
+#define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
+/**
+ * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
+ * for drivers.
+ * @names - Array of static const char *
+ * @num_counters - How many elements in array
+ * @lifespan - How many milliseconds between updates
+ */
+static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
+ const char * const *names, int num_counters,
+ unsigned long lifespan)
+{
+ struct rdma_hw_stats *stats;
+
+ stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
+ GFP_KERNEL);
+ if (!stats)
+ return NULL;
+ stats->names = names;
+ stats->num_counters = num_counters;
+ stats->lifespan = msecs_to_jiffies(lifespan);
+
+ return stats;
+}
+
/* Define bits for the various functionality this port needs to be supported by
* the core.
@@ -931,6 +931,13 @@ struct ib_qp_cap {
u32 max_send_sge;
u32 max_recv_sge;
u32 max_inline_data;
+
+ /*
+ * Maximum number of rdma_rw_ctx structures in flight at a time.
+ * ib_create_qp() will calculate the right amount of neededed WRs
+ * and MRs based on this.
+ */
+ u32 max_rdma_ctxs;
};
enum ib_sig_type {
@@ -981,6 +988,7 @@ enum ib_qp_create_flags {
IB_QP_CREATE_NETIF_QP = 1 << 5,
IB_QP_CREATE_SIGNATURE_EN = 1 << 6,
IB_QP_CREATE_USE_GFP_NOIO = 1 << 7,
+ IB_QP_CREATE_SCATTER_FCS = 1 << 8,
/* reserve bits 26-31 for low level drivers' internal use */
IB_QP_CREATE_RESERVED_START = 1 << 26,
IB_QP_CREATE_RESERVED_END = 1 << 31,
@@ -1002,7 +1010,11 @@ struct ib_qp_init_attr {
enum ib_sig_type sq_sig_type;
enum ib_qp_type qp_type;
enum ib_qp_create_flags create_flags;
- u8 port_num; /* special QP types only */
+
+ /*
+ * Only needed for special QP types, or when using the RW API.
+ */
+ u8 port_num;
};
struct ib_qp_open_attr {
@@ -1421,9 +1433,14 @@ struct ib_qp {
struct ib_pd *pd;
struct ib_cq *send_cq;
struct ib_cq *recv_cq;
+ spinlock_t mr_lock;
+ int mrs_used;
+ struct list_head rdma_mrs;
+ struct list_head sig_mrs;
struct ib_srq *srq;
struct ib_xrcd *xrcd; /* XRC TGT QPs only */
struct list_head xrcd_list;
+
/* count times opened, mcast attaches, flow attaches */
atomic_t usecnt;
struct list_head open_list;
@@ -1438,12 +1455,16 @@ struct ib_qp {
struct ib_mr {
struct ib_device *device;
struct ib_pd *pd;
- struct ib_uobject *uobject;
u32 lkey;
u32 rkey;
u64 iova;
u32 length;
unsigned int page_size;
+ bool need_inval;
+ union {
+ struct ib_uobject *uobject; /* user */
+ struct list_head qp_entry; /* FR */
+ };
};
struct ib_mw {
@@ -1685,8 +1706,29 @@ struct ib_device {
struct iw_cm_verbs *iwcm;
- int (*get_protocol_stats)(struct ib_device *device,
- union rdma_protocol_stats *stats);
+ /**
+ * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the
+ * driver initialized data. The struct is kfree()'ed by the sysfs
+ * core when the device is removed. A lifespan of -1 in the return
+ * struct tells the core to set a default lifespan.
+ */
+ struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device,
+ u8 port_num);
+ /**
+ * get_hw_stats - Fill in the counter value(s) in the stats struct.
+ * @index - The index in the value array we wish to have updated, or
+ * num_counters if we want all stats updated
+ * Return codes -
+ * < 0 - Error, no counters updated
+ * index - Updated the single counter pointed to by index
+ * num_counters - Updated all counters (will reset the timestamp
+ * and prevent further calls for lifespan milliseconds)
+ * Drivers are allowed to update all counters in leiu of just the
+ * one given in index at their option
+ */
+ int (*get_hw_stats)(struct ib_device *device,
+ struct rdma_hw_stats *stats,
+ u8 port, int index);
int (*query_device)(struct ib_device *device,
struct ib_device_attr *device_attr,
struct ib_udata *udata);
@@ -1827,7 +1869,8 @@ struct ib_device {
u32 max_num_sg);
int (*map_mr_sg)(struct ib_mr *mr,
struct scatterlist *sg,
- int sg_nents);
+ int sg_nents,
+ unsigned int *sg_offset);
struct ib_mw * (*alloc_mw)(struct ib_pd *pd,
enum ib_mw_type type,
struct ib_udata *udata);
@@ -1903,6 +1946,8 @@ struct ib_device {
u8 node_type;
u8 phys_port_cnt;
struct ib_device_attr attrs;
+ struct attribute_group *hw_stats_ag;
+ struct rdma_hw_stats *hw_stats;
/**
* The following mandatory functions are used only at device
@@ -2317,6 +2362,18 @@ static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
device->add_gid && device->del_gid;
}
+/*
+ * Check if the device supports READ W/ INVALIDATE.
+ */
+static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
+{
+ /*
+ * iWarp drivers must support READ W/ INVALIDATE. No other protocol
+ * has support for it yet.
+ */
+ return rdma_protocol_iwarp(dev, port_num);
+}
+
int ib_query_gid(struct ib_device *device,
u8 port_num, int index, union ib_gid *gid,
struct ib_gid_attr *attr);
@@ -3111,29 +3168,23 @@ struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
u16 pkey, const union ib_gid *gid,
const struct sockaddr *addr);
-int ib_map_mr_sg(struct ib_mr *mr,
- struct scatterlist *sg,
- int sg_nents,
- unsigned int page_size);
+int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset, unsigned int page_size);
static inline int
-ib_map_mr_sg_zbva(struct ib_mr *mr,
- struct scatterlist *sg,
- int sg_nents,
- unsigned int page_size)
+ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset, unsigned int page_size)
{
int n;
- n = ib_map_mr_sg(mr, sg, sg_nents, page_size);
+ n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
mr->iova = 0;
return n;
}
-int ib_sg_to_pages(struct ib_mr *mr,
- struct scatterlist *sgl,
- int sg_nents,
- int (*set_page)(struct ib_mr *, u64));
+int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
+ unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
void ib_drain_rq(struct ib_qp *qp);
void ib_drain_sq(struct ib_qp *qp);
diff --git a/include/rdma/mr_pool.h b/include/rdma/mr_pool.h
new file mode 100644
index 000000000000..986010b812eb
--- /dev/null
+++ b/include/rdma/mr_pool.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2016 HGST, a Western Digital Company.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#ifndef _RDMA_MR_POOL_H
+#define _RDMA_MR_POOL_H 1
+
+#include <rdma/ib_verbs.h>
+
+struct ib_mr *ib_mr_pool_get(struct ib_qp *qp, struct list_head *list);
+void ib_mr_pool_put(struct ib_qp *qp, struct list_head *list, struct ib_mr *mr);
+
+int ib_mr_pool_init(struct ib_qp *qp, struct list_head *list, int nr,
+ enum ib_mr_type type, u32 max_num_sg);
+void ib_mr_pool_destroy(struct ib_qp *qp, struct list_head *list);
+
+#endif /* _RDMA_MR_POOL_H */
diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
index a8696551abb1..16274e2133cd 100644
--- a/include/rdma/rdma_vt.h
+++ b/include/rdma/rdma_vt.h
@@ -149,15 +149,15 @@ struct rvt_driver_params {
int qpn_res_end;
int nports;
int npkeys;
- u8 qos_shift;
char cq_name[RVT_CQN_MAX];
int node;
- int max_rdma_atomic;
int psn_mask;
int psn_shift;
int psn_modify_mask;
u32 core_cap_flags;
u32 max_mad_size;
+ u8 qos_shift;
+ u8 max_rdma_atomic;
};
/* Protection domain */
@@ -426,6 +426,15 @@ static inline unsigned rvt_get_npkeys(struct rvt_dev_info *rdi)
}
/*
+ * Return the max atomic suitable for determining
+ * the size of the ack ring buffer in a QP.
+ */
+static inline unsigned int rvt_max_atomic(struct rvt_dev_info *rdi)
+{
+ return rdi->dparms.max_rdma_atomic + 1;
+}
+
+/*
* Return the indexed PKEY from the port PKEY table.
*/
static inline u16 rvt_get_pkey(struct rvt_dev_info *rdi,
@@ -467,6 +476,7 @@ static inline struct rvt_qp *rvt_lookup_qpn(struct rvt_dev_info *rdi,
}
struct rvt_dev_info *rvt_alloc_device(size_t size, int nports);
+void rvt_dealloc_device(struct rvt_dev_info *rdi);
int rvt_register_device(struct rvt_dev_info *rvd);
void rvt_unregister_device(struct rvt_dev_info *rvd);
int rvt_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr);
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index 497e59065c2c..6d23b879416a 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -117,8 +117,9 @@
/*
* Wait flags that would prevent any packet type from being sent.
*/
-#define RVT_S_ANY_WAIT_IO (RVT_S_WAIT_PIO | RVT_S_WAIT_TX | \
- RVT_S_WAIT_DMA_DESC | RVT_S_WAIT_KMEM)
+#define RVT_S_ANY_WAIT_IO \
+ (RVT_S_WAIT_PIO | RVT_S_WAIT_PIO_DRAIN | RVT_S_WAIT_TX | \
+ RVT_S_WAIT_DMA_DESC | RVT_S_WAIT_KMEM)
/*
* Wait flags that would prevent send work requests from making progress.
@@ -210,8 +211,6 @@ struct rvt_mmap_info {
unsigned size;
};
-#define RVT_MAX_RDMA_ATOMIC 16
-
/*
* This structure holds the information that the send tasklet needs
* to send a RDMA read response or atomic operation.
@@ -281,8 +280,7 @@ struct rvt_qp {
atomic_t refcount ____cacheline_aligned_in_smp;
wait_queue_head_t wait;
- struct rvt_ack_entry s_ack_queue[RVT_MAX_RDMA_ATOMIC + 1]
- ____cacheline_aligned_in_smp;
+ struct rvt_ack_entry *s_ack_queue;
struct rvt_sge_state s_rdma_read_sge;
spinlock_t r_lock ____cacheline_aligned_in_smp; /* used for APM */
diff --git a/include/rdma/rw.h b/include/rdma/rw.h
new file mode 100644
index 000000000000..377d865e506d
--- /dev/null
+++ b/include/rdma/rw.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2016 HGST, a Western Digital Company.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#ifndef _RDMA_RW_H
+#define _RDMA_RW_H
+
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/rdma_cm.h>
+#include <rdma/mr_pool.h>
+
+struct rdma_rw_ctx {
+ /* number of RDMA READ/WRITE WRs (not counting MR WRs) */
+ u32 nr_ops;
+
+ /* tag for the union below: */
+ u8 type;
+
+ union {
+ /* for mapping a single SGE: */
+ struct {
+ struct ib_sge sge;
+ struct ib_rdma_wr wr;
+ } single;
+
+ /* for mapping of multiple SGEs: */
+ struct {
+ struct ib_sge *sges;
+ struct ib_rdma_wr *wrs;
+ } map;
+
+ /* for registering multiple WRs: */
+ struct rdma_rw_reg_ctx {
+ struct ib_sge sge;
+ struct ib_rdma_wr wr;
+ struct ib_reg_wr reg_wr;
+ struct ib_send_wr inv_wr;
+ struct ib_mr *mr;
+ } *reg;
+
+ struct {
+ struct rdma_rw_reg_ctx data;
+ struct rdma_rw_reg_ctx prot;
+ struct ib_send_wr sig_inv_wr;
+ struct ib_mr *sig_mr;
+ struct ib_sge sig_sge;
+ struct ib_sig_handover_wr sig_wr;
+ } *sig;
+ };
+};
+
+int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
+ struct scatterlist *sg, u32 sg_cnt, u32 sg_offset,
+ u64 remote_addr, u32 rkey, enum dma_data_direction dir);
+void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
+ struct scatterlist *sg, u32 sg_cnt,
+ enum dma_data_direction dir);
+
+int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
+ u8 port_num, struct scatterlist *sg, u32 sg_cnt,
+ struct scatterlist *prot_sg, u32 prot_sg_cnt,
+ struct ib_sig_attrs *sig_attrs, u64 remote_addr, u32 rkey,
+ enum dma_data_direction dir);
+void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
+ u8 port_num, struct scatterlist *sg, u32 sg_cnt,
+ struct scatterlist *prot_sg, u32 prot_sg_cnt,
+ enum dma_data_direction dir);
+
+struct ib_send_wr *rdma_rw_ctx_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
+ u8 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr);
+int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
+ struct ib_cqe *cqe, struct ib_send_wr *chain_wr);
+
+void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr);
+int rdma_rw_init_mrs(struct ib_qp *qp, struct ib_qp_init_attr *attr);
+void rdma_rw_cleanup_mrs(struct ib_qp *qp);
+
+#endif /* _RDMA_RW_H */
diff --git a/include/rxrpc/packet.h b/include/rxrpc/packet.h
index 9ebab3a8cf0a..b2017440b765 100644
--- a/include/rxrpc/packet.h
+++ b/include/rxrpc/packet.h
@@ -68,8 +68,6 @@ struct rxrpc_wire_header {
} __packed;
-extern const char *rxrpc_pkts[];
-
#define RXRPC_SUPPORTED_PACKET_TYPES ( \
(1 << RXRPC_PACKET_TYPE_DATA) | \
(1 << RXRPC_PACKET_TYPE_ACK) | \
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index e0a3398b1547..8ec7c30e35af 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -18,25 +18,6 @@ enum scsi_timeouts {
};
/*
- * The maximum number of SG segments that we will put inside a
- * scatterlist (unless chaining is used). Should ideally fit inside a
- * single page, to avoid a higher order allocation. We could define this
- * to SG_MAX_SINGLE_ALLOC to pack correctly at the highest order. The
- * minimum value is 32
- */
-#define SCSI_MAX_SG_SEGMENTS 128
-
-/*
- * Like SCSI_MAX_SG_SEGMENTS, but for archs that have sg chaining. This limit
- * is totally arbitrary, a setting of 2048 will get you at least 8mb ios.
- */
-#ifdef CONFIG_ARCH_HAS_SG_CHAIN
-#define SCSI_MAX_SG_CHAIN_SEGMENTS 2048
-#else
-#define SCSI_MAX_SG_CHAIN_SEGMENTS SCSI_MAX_SG_SEGMENTS
-#endif
-
-/*
* DIX-capable adapters effectively support infinite chaining for the
* protection information scatterlist
*/
diff --git a/include/scsi/scsi_common.h b/include/scsi/scsi_common.h
index 11571b2a831e..20bf7eaef05a 100644
--- a/include/scsi/scsi_common.h
+++ b/include/scsi/scsi_common.h
@@ -63,6 +63,7 @@ extern bool scsi_normalize_sense(const u8 *sense_buffer, int sb_len,
extern void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq);
int scsi_set_sense_information(u8 *buf, int buf_len, u64 info);
+int scsi_set_sense_field_pointer(u8 *buf, int buf_len, u16 fp, u8 bp, bool cd);
extern const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len,
int desc_type);
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 74d79bde7075..a6c346df290d 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -50,6 +50,12 @@ enum scsi_device_state {
SDEV_CREATED_BLOCK, /* same as above but for created devices */
};
+enum scsi_scan_mode {
+ SCSI_SCAN_INITIAL = 0,
+ SCSI_SCAN_RESCAN,
+ SCSI_SCAN_MANUAL,
+};
+
enum scsi_device_event {
SDEV_EVT_MEDIA_CHANGE = 1, /* media has changed */
SDEV_EVT_INQUIRY_CHANGE_REPORTED, /* 3F 03 UA reported */
@@ -242,6 +248,7 @@ scmd_printk(const char *, const struct scsi_cmnd *, const char *, ...);
enum scsi_target_state {
STARGET_CREATED = 1,
STARGET_RUNNING,
+ STARGET_REMOVE,
STARGET_DEL,
};
@@ -391,7 +398,8 @@ extern void scsi_device_resume(struct scsi_device *sdev);
extern void scsi_target_quiesce(struct scsi_target *);
extern void scsi_target_resume(struct scsi_target *);
extern void scsi_scan_target(struct device *parent, unsigned int channel,
- unsigned int id, u64 lun, int rescan);
+ unsigned int id, u64 lun,
+ enum scsi_scan_mode rescan);
extern void scsi_target_reap(struct scsi_target *);
extern void scsi_target_block(struct device *);
extern void scsi_target_unblock(struct device *, enum scsi_device_state);
@@ -534,9 +542,9 @@ static inline int scsi_device_supports_vpd(struct scsi_device *sdev)
/*
* Although VPD inquiries can go to SCSI-2 type devices,
* some USB ones crash on receiving them, and the pages
- * we currently ask for are for SPC-3 and beyond
+ * we currently ask for are mandatory for SPC-2 and beyond
*/
- if (sdev->scsi_level > SCSI_SPC_2 && !sdev->skip_vpd_pages)
+ if (sdev->scsi_level >= SCSI_SPC_2 && !sdev->skip_vpd_pages)
return 1;
return 0;
}
diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
index dbb8c640e26f..98d366b55770 100644
--- a/include/scsi/scsi_eh.h
+++ b/include/scsi/scsi_eh.h
@@ -16,6 +16,7 @@ extern void scsi_report_device_reset(struct Scsi_Host *, int, int);
extern int scsi_block_when_processing_errors(struct scsi_device *);
extern bool scsi_command_normalize_sense(const struct scsi_cmnd *cmd,
struct scsi_sense_hdr *sshdr);
+extern int scsi_check_sense(struct scsi_cmnd *);
static inline bool scsi_sense_is_deferred(const struct scsi_sense_hdr *sshdr)
{
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index fcfa3d7f5e7e..76e9d278c334 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -37,7 +37,7 @@ struct blk_queue_tags;
* used in one scatter-gather request.
*/
#define SG_NONE 0
-#define SG_ALL SCSI_MAX_SG_SEGMENTS
+#define SG_ALL SG_CHUNK_SIZE
#define MODE_UNKNOWN 0x00
#define MODE_INITIATOR 0x01
diff --git a/include/scsi/scsi_proto.h b/include/scsi/scsi_proto.h
index c2ae21cbaa2c..d1defd1ebd95 100644
--- a/include/scsi/scsi_proto.h
+++ b/include/scsi/scsi_proto.h
@@ -115,6 +115,8 @@
#define VERIFY_16 0x8f
#define SYNCHRONIZE_CACHE_16 0x91
#define WRITE_SAME_16 0x93
+#define ZBC_OUT 0x94
+#define ZBC_IN 0x95
#define SERVICE_ACTION_BIDIRECTIONAL 0x9d
#define SERVICE_ACTION_IN_16 0x9e
#define SERVICE_ACTION_OUT_16 0x9f
@@ -143,6 +145,13 @@
#define MO_SET_PRIORITY 0x0e
#define MO_SET_TIMESTAMP 0x0f
#define MO_MANAGEMENT_PROTOCOL_OUT 0x10
+/* values for ZBC_IN */
+#define ZI_REPORT_ZONES 0x00
+/* values for ZBC_OUT */
+#define ZO_CLOSE_ZONE 0x01
+#define ZO_FINISH_ZONE 0x02
+#define ZO_OPEN_ZONE 0x03
+#define ZO_RESET_WRITE_POINTER 0x04
/* values for variable length command */
#define XDREAD_32 0x03
#define XDWRITE_32 0x04
diff --git a/include/soc/at91/atmel-sfr.h b/include/soc/at91/atmel-sfr.h
new file mode 100644
index 000000000000..2f9bb984a4df
--- /dev/null
+++ b/include/soc/at91/atmel-sfr.h
@@ -0,0 +1,18 @@
+/*
+ * Atmel SFR (Special Function Registers) register offsets and bit definitions.
+ *
+ * Copyright (C) 2016 Atmel
+ *
+ * Author: Ludovic Desroches <ludovic.desroches@atmel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_MFD_SYSCON_ATMEL_SFR_H
+#define _LINUX_MFD_SYSCON_ATMEL_SFR_H
+
+#define AT91_SFR_I2SCLKSEL 0x90 /* I2SC Register */
+
+#endif /* _LINUX_MFD_SYSCON_ATMEL_SFR_H */
diff --git a/include/soc/nps/common.h b/include/soc/nps/common.h
new file mode 100644
index 000000000000..9b1d43d671a3
--- /dev/null
+++ b/include/soc/nps/common.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef SOC_NPS_COMMON_H
+#define SOC_NPS_COMMON_H
+
+#ifdef CONFIG_SMP
+#define NPS_IPI_IRQ 5
+#endif
+
+#define NPS_HOST_REG_BASE 0xF6000000
+
+#define NPS_MSU_BLKID 0x018
+
+#define CTOP_INST_RSPI_GIC_0_R12 0x3C56117E
+#define CTOP_INST_MOV2B_FLIP_R3_B1_B2_INST 0x5B60
+#define CTOP_INST_MOV2B_FLIP_R3_B1_B2_LIMM 0x00010422
+
+#ifndef __ASSEMBLY__
+
+/* In order to increase compilation test coverage */
+#ifdef CONFIG_ARC
+static inline void nps_ack_gic(void)
+{
+ __asm__ __volatile__ (
+ " .word %0\n"
+ :
+ : "i"(CTOP_INST_RSPI_GIC_0_R12)
+ : "memory");
+}
+#else
+static inline void nps_ack_gic(void) { }
+#define write_aux_reg(r, v)
+#define read_aux_reg(r) 0
+#endif
+
+/* CPU global ID */
+struct global_id {
+ union {
+ struct {
+#ifdef CONFIG_EZNPS_MTM_EXT
+ u32 __reserved:20, cluster:4, core:4, thread:4;
+#else
+ u32 __reserved:24, cluster:4, core:4;
+#endif
+ };
+ u32 value;
+ };
+};
+
+/*
+ * Convert logical to physical CPU IDs
+ *
+ * The conversion swap bits 1 and 2 of cluster id (out of 4 bits)
+ * Now quad of logical clusters id's are adjacent physically,
+ * and not like the id's physically came with each cluster.
+ * Below table is 4x4 mesh of core clusters as it layout on chip.
+ * Cluster ids are in format: logical (physical)
+ *
+ * ----------------- ------------------
+ * 3 | 5 (3) 7 (7) | | 13 (11) 15 (15)|
+ *
+ * 2 | 4 (2) 6 (6) | | 12 (10) 14 (14)|
+ * ----------------- ------------------
+ * 1 | 1 (1) 3 (5) | | 9 (9) 11 (13)|
+ *
+ * 0 | 0 (0) 2 (4) | | 8 (8) 10 (12)|
+ * ----------------- ------------------
+ * 0 1 2 3
+ */
+static inline int nps_cluster_logic_to_phys(int cluster)
+{
+#ifdef __arc__
+ __asm__ __volatile__(
+ " mov r3,%0\n"
+ " .short %1\n"
+ " .word %2\n"
+ " mov %0,r3\n"
+ : "+r"(cluster)
+ : "i"(CTOP_INST_MOV2B_FLIP_R3_B1_B2_INST),
+ "i"(CTOP_INST_MOV2B_FLIP_R3_B1_B2_LIMM)
+ : "r3");
+#endif
+
+ return cluster;
+}
+
+#define NPS_CPU_TO_CLUSTER_NUM(cpu) \
+ ({ struct global_id gid; gid.value = cpu; \
+ nps_cluster_logic_to_phys(gid.cluster); })
+
+struct nps_host_reg_address {
+ union {
+ struct {
+ u32 base:8, cl_x:4, cl_y:4,
+ blkid:6, reg:8, __reserved:2;
+ };
+ u32 value;
+ };
+};
+
+struct nps_host_reg_address_non_cl {
+ union {
+ struct {
+ u32 base:7, blkid:11, reg:12, __reserved:2;
+ };
+ u32 value;
+ };
+};
+
+static inline void *nps_host_reg_non_cl(u32 blkid, u32 reg)
+{
+ struct nps_host_reg_address_non_cl reg_address;
+
+ reg_address.value = NPS_HOST_REG_BASE;
+ reg_address.blkid = blkid;
+ reg_address.reg = reg;
+
+ return (void *)reg_address.value;
+}
+
+static inline void *nps_host_reg(u32 cpu, u32 blkid, u32 reg)
+{
+ struct nps_host_reg_address reg_address;
+ u32 cl = NPS_CPU_TO_CLUSTER_NUM(cpu);
+
+ reg_address.value = NPS_HOST_REG_BASE;
+ reg_address.cl_x = (cl >> 2) & 0x3;
+ reg_address.cl_y = cl & 0x3;
+ reg_address.blkid = blkid;
+ reg_address.reg = reg;
+
+ return (void *)reg_address.value;
+}
+#endif /* __ASSEMBLY__ */
+
+#endif /* SOC_NPS_COMMON_H */
diff --git a/include/soc/tegra/fuse.h b/include/soc/tegra/fuse.h
index 961b821b6a46..b4c9219e7f95 100644
--- a/include/soc/tegra/fuse.h
+++ b/include/soc/tegra/fuse.h
@@ -26,6 +26,7 @@
#define TEGRA_FUSE_SKU_CALIB_0 0xf0
#define TEGRA30_FUSE_SATA_CALIB 0x124
+#define TEGRA_FUSE_USB_CALIB_EXT_0 0x250
#ifndef __ASSEMBLY__
diff --git a/include/soc/tegra/pmc.h b/include/soc/tegra/pmc.h
index d18efe402ff1..e9e53473a63e 100644
--- a/include/soc/tegra/pmc.h
+++ b/include/soc/tegra/pmc.h
@@ -33,9 +33,9 @@ void tegra_pmc_enter_suspend_mode(enum tegra_suspend_mode mode);
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_SMP
-bool tegra_pmc_cpu_is_powered(int cpuid);
-int tegra_pmc_cpu_power_on(int cpuid);
-int tegra_pmc_cpu_remove_clamping(int cpuid);
+bool tegra_pmc_cpu_is_powered(unsigned int cpuid);
+int tegra_pmc_cpu_power_on(unsigned int cpuid);
+int tegra_pmc_cpu_remove_clamping(unsigned int cpuid);
#endif /* CONFIG_SMP */
/*
@@ -72,6 +72,7 @@ int tegra_pmc_cpu_remove_clamping(int cpuid);
#define TEGRA_POWERGATE_AUD 27
#define TEGRA_POWERGATE_DFD 28
#define TEGRA_POWERGATE_VE2 29
+#define TEGRA_POWERGATE_MAX TEGRA_POWERGATE_VE2
#define TEGRA_POWERGATE_3D0 TEGRA_POWERGATE_3D
@@ -108,50 +109,51 @@ int tegra_pmc_cpu_remove_clamping(int cpuid);
#define TEGRA_IO_RAIL_SYS_DDC 58
#ifdef CONFIG_ARCH_TEGRA
-int tegra_powergate_is_powered(int id);
-int tegra_powergate_power_on(int id);
-int tegra_powergate_power_off(int id);
-int tegra_powergate_remove_clamping(int id);
+int tegra_powergate_is_powered(unsigned int id);
+int tegra_powergate_power_on(unsigned int id);
+int tegra_powergate_power_off(unsigned int id);
+int tegra_powergate_remove_clamping(unsigned int id);
/* Must be called with clk disabled, and returns with clk enabled */
-int tegra_powergate_sequence_power_up(int id, struct clk *clk,
+int tegra_powergate_sequence_power_up(unsigned int id, struct clk *clk,
struct reset_control *rst);
-int tegra_io_rail_power_on(int id);
-int tegra_io_rail_power_off(int id);
+int tegra_io_rail_power_on(unsigned int id);
+int tegra_io_rail_power_off(unsigned int id);
#else
-static inline int tegra_powergate_is_powered(int id)
+static inline int tegra_powergate_is_powered(unsigned int id)
{
return -ENOSYS;
}
-static inline int tegra_powergate_power_on(int id)
+static inline int tegra_powergate_power_on(unsigned int id)
{
return -ENOSYS;
}
-static inline int tegra_powergate_power_off(int id)
+static inline int tegra_powergate_power_off(unsigned int id)
{
return -ENOSYS;
}
-static inline int tegra_powergate_remove_clamping(int id)
+static inline int tegra_powergate_remove_clamping(unsigned int id)
{
return -ENOSYS;
}
-static inline int tegra_powergate_sequence_power_up(int id, struct clk *clk,
+static inline int tegra_powergate_sequence_power_up(unsigned int id,
+ struct clk *clk,
struct reset_control *rst)
{
return -ENOSYS;
}
-static inline int tegra_io_rail_power_on(int id)
+static inline int tegra_io_rail_power_on(unsigned int id)
{
return -ENOSYS;
}
-static inline int tegra_io_rail_power_off(int id)
+static inline int tegra_io_rail_power_off(unsigned int id)
{
return -ENOSYS;
}
diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h
index f86ef5ea9b01..67be2445941a 100644
--- a/include/sound/dmaengine_pcm.h
+++ b/include/sound/dmaengine_pcm.h
@@ -51,6 +51,16 @@ struct dma_chan *snd_dmaengine_pcm_request_channel(dma_filter_fn filter_fn,
void *filter_data);
struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream);
+/*
+ * The DAI supports packed transfers, eg 2 16-bit samples in a 32-bit word.
+ * If this flag is set the dmaengine driver won't put any restriction on
+ * the supported sample formats and set the DMA transfer size to undefined.
+ * The DAI driver is responsible to disable any unsupported formats in it's
+ * configuration and catch corner cases that are not already handled in
+ * the ALSA core.
+ */
+#define SND_DMAENGINE_PCM_DAI_FLAG_PACK BIT(0)
+
/**
* struct snd_dmaengine_dai_dma_data - DAI DMA configuration data
* @addr: Address of the DAI data source or destination register.
@@ -63,6 +73,7 @@ struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream)
* requesting the DMA channel.
* @chan_name: Custom channel name to use when requesting DMA channel.
* @fifo_size: FIFO size of the DAI controller in bytes
+ * @flags: PCM_DAI flags, only SND_DMAENGINE_PCM_DAI_FLAG_PACK for now
*/
struct snd_dmaengine_dai_dma_data {
dma_addr_t addr;
@@ -72,6 +83,7 @@ struct snd_dmaengine_dai_dma_data {
void *filter_data;
const char *chan_name;
unsigned int fifo_size;
+ unsigned int flags;
};
void snd_dmaengine_pcm_set_config_from_dai_data(
diff --git a/include/sound/hda_chmap.h b/include/sound/hda_chmap.h
index e20d219a0304..babd445c7505 100644
--- a/include/sound/hda_chmap.h
+++ b/include/sound/hda_chmap.h
@@ -36,6 +36,8 @@ struct hdac_chmap_ops {
int (*chmap_validate)(struct hdac_chmap *hchmap, int ca,
int channels, unsigned char *chmap);
+ int (*get_spk_alloc)(struct hdac_device *hdac, int pcm_idx);
+
void (*get_chmap)(struct hdac_device *hdac, int pcm_idx,
unsigned char *chmap);
void (*set_chmap)(struct hdac_device *hdac, int pcm_idx,
diff --git a/include/sound/hda_i915.h b/include/sound/hda_i915.h
index fa341fcb5829..796cabf6be5e 100644
--- a/include/sound/hda_i915.h
+++ b/include/sound/hda_i915.h
@@ -9,9 +9,9 @@
#ifdef CONFIG_SND_HDA_I915
int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable);
int snd_hdac_display_power(struct hdac_bus *bus, bool enable);
-int snd_hdac_get_display_clk(struct hdac_bus *bus);
-int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid, int rate);
-int snd_hdac_acomp_get_eld(struct hdac_bus *bus, hda_nid_t nid,
+void snd_hdac_i915_set_bclk(struct hdac_bus *bus);
+int snd_hdac_sync_audio_rate(struct hdac_device *codec, hda_nid_t nid, int rate);
+int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid,
bool *audio_enabled, char *buffer, int max_bytes);
int snd_hdac_i915_init(struct hdac_bus *bus);
int snd_hdac_i915_exit(struct hdac_bus *bus);
@@ -25,16 +25,15 @@ static inline int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
{
return 0;
}
-static inline int snd_hdac_get_display_clk(struct hdac_bus *bus)
+static inline void snd_hdac_i915_set_bclk(struct hdac_bus *bus)
{
- return 0;
}
-static inline int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid,
- int rate)
+static inline int snd_hdac_sync_audio_rate(struct hdac_device *codec,
+ hda_nid_t nid, int rate)
{
return 0;
}
-static inline int snd_hdac_acomp_get_eld(struct hdac_bus *bus, hda_nid_t nid,
+static inline int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid,
bool *audio_enabled, char *buffer,
int max_bytes)
{
diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h
index 07fa59237feb..b9593b201599 100644
--- a/include/sound/hdaudio_ext.h
+++ b/include/sound/hdaudio_ext.h
@@ -14,6 +14,8 @@
* @gtscap: gts capabilities pointer
* @drsmcap: dma resume capabilities pointer
* @hlink_list: link list of HDA links
+ * @lock: lock for link mgmt
+ * @cmd_dma_state: state of cmd DMAs: CORB and RIRB
*/
struct hdac_ext_bus {
struct hdac_bus bus;
@@ -27,6 +29,9 @@ struct hdac_ext_bus {
void __iomem *drsmcap;
struct list_head hlink_list;
+
+ struct mutex lock;
+ bool cmd_dma_state;
};
int snd_hdac_ext_bus_init(struct hdac_ext_bus *sbus, struct device *dev,
@@ -142,6 +147,9 @@ struct hdac_ext_link {
void __iomem *ml_addr; /* link output stream reg pointer */
u32 lcaps; /* link capablities */
u16 lsdiid; /* link sdi identifier */
+
+ int ref_count;
+
struct list_head list;
};
@@ -154,6 +162,11 @@ void snd_hdac_ext_link_set_stream_id(struct hdac_ext_link *link,
void snd_hdac_ext_link_clear_stream_id(struct hdac_ext_link *link,
int stream);
+int snd_hdac_ext_bus_link_get(struct hdac_ext_bus *ebus,
+ struct hdac_ext_link *link);
+int snd_hdac_ext_bus_link_put(struct hdac_ext_bus *ebus,
+ struct hdac_ext_link *link);
+
/* update register macro */
#define snd_hdac_updatel(addr, reg, mask, val) \
writel(((readl(addr + reg) & ~(mask)) | (val)), \
diff --git a/include/sound/hdmi-codec.h b/include/sound/hdmi-codec.h
new file mode 100644
index 000000000000..fc3a481ad91e
--- /dev/null
+++ b/include/sound/hdmi-codec.h
@@ -0,0 +1,100 @@
+/*
+ * hdmi-codec.h - HDMI Codec driver API
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Author: Jyri Sarha <jsarha@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __HDMI_CODEC_H__
+#define __HDMI_CODEC_H__
+
+#include <linux/hdmi.h>
+#include <drm/drm_edid.h>
+#include <sound/asoundef.h>
+#include <uapi/sound/asound.h>
+
+/*
+ * Protocol between ASoC cpu-dai and HDMI-encoder
+ */
+struct hdmi_codec_daifmt {
+ enum {
+ HDMI_I2S,
+ HDMI_RIGHT_J,
+ HDMI_LEFT_J,
+ HDMI_DSP_A,
+ HDMI_DSP_B,
+ HDMI_AC97,
+ HDMI_SPDIF,
+ } fmt;
+ int bit_clk_inv:1;
+ int frame_clk_inv:1;
+ int bit_clk_master:1;
+ int frame_clk_master:1;
+};
+
+/*
+ * HDMI audio parameters
+ */
+struct hdmi_codec_params {
+ struct hdmi_audio_infoframe cea;
+ struct snd_aes_iec958 iec;
+ int sample_rate;
+ int sample_width;
+ int channels;
+};
+
+struct hdmi_codec_ops {
+ /*
+ * Called when ASoC starts an audio stream setup.
+ * Optional
+ */
+ int (*audio_startup)(struct device *dev);
+
+ /*
+ * Configures HDMI-encoder for audio stream.
+ * Mandatory
+ */
+ int (*hw_params)(struct device *dev,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms);
+
+ /*
+ * Shuts down the audio stream.
+ * Mandatory
+ */
+ void (*audio_shutdown)(struct device *dev);
+
+ /*
+ * Mute/unmute HDMI audio stream.
+ * Optional
+ */
+ int (*digital_mute)(struct device *dev, bool enable);
+
+ /*
+ * Provides EDID-Like-Data from connected HDMI device.
+ * Optional
+ */
+ int (*get_eld)(struct device *dev, uint8_t *buf, size_t len);
+};
+
+/* HDMI codec initalization data */
+struct hdmi_codec_pdata {
+ const struct hdmi_codec_ops *ops;
+ uint i2s:1;
+ uint spdif:1;
+ int max_i2s_channels;
+};
+
+#define HDMI_CODEC_DRV_NAME "hdmi-audio-codec"
+
+#endif /* __HDMI_CODEC_H__ */
diff --git a/include/sound/pcm_iec958.h b/include/sound/pcm_iec958.h
index 0eed397aca8e..36f023acb201 100644
--- a/include/sound/pcm_iec958.h
+++ b/include/sound/pcm_iec958.h
@@ -6,4 +6,6 @@
int snd_pcm_create_iec958_consumer(struct snd_pcm_runtime *runtime, u8 *cs,
size_t len);
+int snd_pcm_create_iec958_consumer_hw_params(struct snd_pcm_hw_params *params,
+ u8 *cs, size_t len);
#endif
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 97069466c38d..3101d53468aa 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -100,6 +100,7 @@ struct device;
{ .id = snd_soc_dapm_mixer_named_ctl, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = wncontrols}
+/* DEPRECATED: use SND_SOC_DAPM_SUPPLY */
#define SND_SOC_DAPM_MICBIAS(wname, wreg, wshift, winvert) \
{ .id = snd_soc_dapm_micbias, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
@@ -473,7 +474,7 @@ enum snd_soc_dapm_type {
snd_soc_dapm_out_drv, /* output driver */
snd_soc_dapm_adc, /* analog to digital converter */
snd_soc_dapm_dac, /* digital to analog converter */
- snd_soc_dapm_micbias, /* microphone bias (power) */
+ snd_soc_dapm_micbias, /* microphone bias (power) - DEPRECATED: use snd_soc_dapm_supply */
snd_soc_dapm_mic, /* microphone */
snd_soc_dapm_hp, /* headphones */
snd_soc_dapm_spk, /* speaker */
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 02b4a215fd75..fd7b58a58d6f 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -1002,7 +1002,7 @@ struct snd_soc_dai_link {
*/
const char *platform_name;
struct device_node *platform_of_node;
- int be_id; /* optional ID for machine driver BE identification */
+ int id; /* optional ID for machine driver link identification */
const struct snd_soc_pcm_stream *params;
unsigned int num_params;
@@ -1683,6 +1683,9 @@ void snd_soc_remove_dai_link(struct snd_soc_card *card,
int snd_soc_register_dai(struct snd_soc_component *component,
struct snd_soc_dai_driver *dai_drv);
+struct snd_soc_dai *snd_soc_find_dai(
+ const struct snd_soc_dai_link_component *dlc);
+
#include <sound/soc-dai.h>
#ifdef CONFIG_DEBUG_FS
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index c3371fa548cb..4ac24f5a3308 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -74,6 +74,7 @@ enum iscsit_transport_type {
ISCSI_IWARP_TCP = 3,
ISCSI_IWARP_SCTP = 4,
ISCSI_INFINIBAND = 5,
+ ISCSI_CXGBIT = 6,
};
/* RFC-3720 7.1.4 Standard Connection State Diagram for a Target */
@@ -890,4 +891,30 @@ static inline u32 session_get_next_ttt(struct iscsi_session *session)
}
extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, itt_t);
+
+static inline void iscsit_thread_check_cpumask(
+ struct iscsi_conn *conn,
+ struct task_struct *p,
+ int mode)
+{
+ /*
+ * mode == 1 signals iscsi_target_tx_thread() usage.
+ * mode == 0 signals iscsi_target_rx_thread() usage.
+ */
+ if (mode == 1) {
+ if (!conn->conn_tx_reset_cpumask)
+ return;
+ conn->conn_tx_reset_cpumask = 0;
+ } else {
+ if (!conn->conn_rx_reset_cpumask)
+ return;
+ conn->conn_rx_reset_cpumask = 0;
+ }
+ /*
+ * Update the CPU mask for this single kthread so that
+ * both TX and RX kthreads are scheduled to run on the
+ * same CPU.
+ */
+ set_cpus_allowed_ptr(p, conn->conn_cpumask);
+}
#endif /* ISCSI_TARGET_CORE_H */
diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h
index 90e37faa2ede..40ac7cd80150 100644
--- a/include/target/iscsi/iscsi_transport.h
+++ b/include/target/iscsi/iscsi_transport.h
@@ -6,6 +6,7 @@ struct iscsit_transport {
#define ISCSIT_TRANSPORT_NAME 16
char name[ISCSIT_TRANSPORT_NAME];
int transport_type;
+ bool rdma_shutdown;
int priv_size;
struct module *owner;
struct list_head t_node;
@@ -22,6 +23,13 @@ struct iscsit_transport {
int (*iscsit_queue_data_in)(struct iscsi_conn *, struct iscsi_cmd *);
int (*iscsit_queue_status)(struct iscsi_conn *, struct iscsi_cmd *);
void (*iscsit_aborted_task)(struct iscsi_conn *, struct iscsi_cmd *);
+ int (*iscsit_xmit_pdu)(struct iscsi_conn *, struct iscsi_cmd *,
+ struct iscsi_datain_req *, const void *, u32);
+ void (*iscsit_release_cmd)(struct iscsi_conn *, struct iscsi_cmd *);
+ void (*iscsit_get_rx_pdu)(struct iscsi_conn *);
+ int (*iscsit_validate_params)(struct iscsi_conn *);
+ void (*iscsit_get_r2t_ttt)(struct iscsi_conn *, struct iscsi_cmd *,
+ struct iscsi_r2t *);
enum target_prot_op (*iscsit_get_sup_prot_ops)(struct iscsi_conn *);
};
@@ -77,6 +85,18 @@ extern void iscsit_build_reject(struct iscsi_cmd *, struct iscsi_conn *,
extern int iscsit_build_logout_rsp(struct iscsi_cmd *, struct iscsi_conn *,
struct iscsi_logout_rsp *);
extern int iscsit_logout_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
+extern int iscsit_queue_rsp(struct iscsi_conn *, struct iscsi_cmd *);
+extern void iscsit_aborted_task(struct iscsi_conn *, struct iscsi_cmd *);
+extern int iscsit_add_reject(struct iscsi_conn *, u8, unsigned char *);
+extern int iscsit_reject_cmd(struct iscsi_cmd *, u8, unsigned char *);
+extern int iscsit_handle_snack(struct iscsi_conn *, unsigned char *);
+extern void iscsit_build_datain_pdu(struct iscsi_cmd *, struct iscsi_conn *,
+ struct iscsi_datain *,
+ struct iscsi_data_rsp *, bool);
+extern int iscsit_build_r2ts_for_cmd(struct iscsi_conn *, struct iscsi_cmd *,
+ bool);
+extern int iscsit_immediate_queue(struct iscsi_conn *, struct iscsi_cmd *, int);
+extern int iscsit_response_queue(struct iscsi_conn *, struct iscsi_cmd *, int);
/*
* From iscsi_target_device.c
*/
@@ -102,3 +122,24 @@ extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, int);
extern int iscsit_sequence_cmd(struct iscsi_conn *, struct iscsi_cmd *,
unsigned char *, __be32);
extern void iscsit_release_cmd(struct iscsi_cmd *);
+extern void iscsit_free_cmd(struct iscsi_cmd *, bool);
+extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *,
+ struct iscsi_conn *, u8);
+
+/*
+ * From iscsi_target_nego.c
+ */
+extern int iscsi_target_check_login_request(struct iscsi_conn *,
+ struct iscsi_login *);
+
+/*
+ * From iscsi_target_login.c
+ */
+extern __printf(2, 3) int iscsi_change_param_sprintf(
+ struct iscsi_conn *, const char *, ...);
+
+/*
+ * From iscsi_target_parameters.c
+ */
+extern struct iscsi_param *iscsi_find_param_from_key(
+ char *, struct iscsi_param_list *);
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 28ee5c2e6bcd..d8ab5101fad5 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -85,7 +85,6 @@ extern struct configfs_attribute *passthrough_attrib_attrs[];
void *transport_kmap_data_sg(struct se_cmd *);
void transport_kunmap_data_sg(struct se_cmd *);
/* core helpers also used by xcopy during internal command setup */
-int target_alloc_sgl(struct scatterlist **, unsigned int *, u32, bool);
sense_reason_t transport_generic_map_mem_to_cmd(struct se_cmd *,
struct scatterlist *, u32, struct scatterlist *, u32);
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 3e0dd86360a2..b316b44d03f3 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -536,7 +536,6 @@ struct se_node_acl {
char initiatorname[TRANSPORT_IQN_LEN];
/* Used to signal demo mode created ACL, disabled by default */
bool dynamic_node_acl;
- bool acl_stop:1;
u32 queue_depth;
u32 acl_index;
enum target_prot_type saved_prot_type;
@@ -603,7 +602,6 @@ struct se_session {
struct list_head sess_cmd_list;
struct list_head sess_wait_list;
spinlock_t sess_cmd_lock;
- struct kref sess_kref;
void *sess_cmd_map;
struct percpu_ida sess_tag_pool;
};
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 8ff6d40a294f..de44462a7680 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -50,10 +50,6 @@ struct target_core_fabric_ops {
*/
int (*check_stop_free)(struct se_cmd *);
void (*release_cmd)(struct se_cmd *);
- /*
- * Called with spin_lock_bh(struct se_portal_group->session_lock held.
- */
- int (*shutdown_session)(struct se_session *);
void (*close_session)(struct se_session *);
u32 (*sess_get_index)(struct se_session *);
/*
@@ -123,8 +119,6 @@ void __transport_register_session(struct se_portal_group *,
struct se_node_acl *, struct se_session *, void *);
void transport_register_session(struct se_portal_group *,
struct se_node_acl *, struct se_session *, void *);
-int target_get_session(struct se_session *);
-void target_put_session(struct se_session *);
ssize_t target_show_dynamic_sessions(struct se_portal_group *, char *);
void transport_free_session(struct se_session *);
void target_put_nacl(struct se_node_acl *);
@@ -185,6 +179,10 @@ int core_tpg_set_initiator_node_tag(struct se_portal_group *,
int core_tpg_register(struct se_wwn *, struct se_portal_group *, int);
int core_tpg_deregister(struct se_portal_group *);
+int target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents,
+ u32 length, bool zero_page, bool chainable);
+void target_free_sgl(struct scatterlist *sgl, int nents);
+
/*
* The LIO target core uses DMA_TO_DEVICE to mean that data is going
* to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean
diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h
index e215bf68f521..36e2d6fb1360 100644
--- a/include/trace/events/compaction.h
+++ b/include/trace/events/compaction.h
@@ -10,10 +10,11 @@
#include <trace/events/mmflags.h>
#define COMPACTION_STATUS \
- EM( COMPACT_DEFERRED, "deferred") \
EM( COMPACT_SKIPPED, "skipped") \
+ EM( COMPACT_DEFERRED, "deferred") \
EM( COMPACT_CONTINUE, "continue") \
EM( COMPACT_PARTIAL, "partial") \
+ EM( COMPACT_PARTIAL_SKIPPED, "partial_skipped") \
EM( COMPACT_COMPLETE, "complete") \
EM( COMPACT_NO_SUITABLE_PAGE, "no_suitable_page") \
EM( COMPACT_NOT_SUITABLE_ZONE, "not_suitable_zone") \
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 4e4b2fa78609..09c71e9aaebf 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -872,7 +872,7 @@ TRACE_EVENT(ext4_sync_file_enter,
TP_fast_assign(
struct dentry *dentry = file->f_path.dentry;
- __entry->dev = d_inode(dentry)->i_sb->s_dev;
+ __entry->dev = dentry->d_sb->s_dev;
__entry->ino = d_inode(dentry)->i_ino;
__entry->datasync = datasync;
__entry->parent = d_inode(dentry->d_parent)->i_ino;
@@ -1451,7 +1451,7 @@ TRACE_EVENT(ext4_unlink_enter,
),
TP_fast_assign(
- __entry->dev = d_inode(dentry)->i_sb->s_dev;
+ __entry->dev = dentry->d_sb->s_dev;
__entry->ino = d_inode(dentry)->i_ino;
__entry->parent = parent->i_ino;
__entry->size = d_inode(dentry)->i_size;
@@ -1475,7 +1475,7 @@ TRACE_EVENT(ext4_unlink_exit,
),
TP_fast_assign(
- __entry->dev = d_inode(dentry)->i_sb->s_dev;
+ __entry->dev = dentry->d_sb->s_dev;
__entry->ino = d_inode(dentry)->i_ino;
__entry->ret = ret;
),
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 0f565845707b..3a09bb4dc3b2 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -694,28 +694,32 @@ TRACE_EVENT(f2fs_direct_IO_exit,
__entry->ret)
);
-TRACE_EVENT(f2fs_reserve_new_block,
+TRACE_EVENT(f2fs_reserve_new_blocks,
- TP_PROTO(struct inode *inode, nid_t nid, unsigned int ofs_in_node),
+ TP_PROTO(struct inode *inode, nid_t nid, unsigned int ofs_in_node,
+ blkcnt_t count),
- TP_ARGS(inode, nid, ofs_in_node),
+ TP_ARGS(inode, nid, ofs_in_node, count),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(nid_t, nid)
__field(unsigned int, ofs_in_node)
+ __field(blkcnt_t, count)
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->nid = nid;
__entry->ofs_in_node = ofs_in_node;
+ __entry->count = count;
),
- TP_printk("dev = (%d,%d), nid = %u, ofs_in_node = %u",
+ TP_printk("dev = (%d,%d), nid = %u, ofs_in_node = %u, count = %llu",
show_dev(__entry),
(unsigned int)__entry->nid,
- __entry->ofs_in_node)
+ __entry->ofs_in_node,
+ (unsigned long long)__entry->count)
);
DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
@@ -1271,14 +1275,14 @@ TRACE_EVENT(f2fs_destroy_extent_tree,
DECLARE_EVENT_CLASS(f2fs_sync_dirty_inodes,
- TP_PROTO(struct super_block *sb, int type, int count),
+ TP_PROTO(struct super_block *sb, int type, s64 count),
TP_ARGS(sb, type, count),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(int, type)
- __field(int, count)
+ __field(s64, count)
),
TP_fast_assign(
@@ -1287,7 +1291,7 @@ DECLARE_EVENT_CLASS(f2fs_sync_dirty_inodes,
__entry->count = count;
),
- TP_printk("dev = (%d,%d), %s, dirty count = %d",
+ TP_printk("dev = (%d,%d), %s, dirty count = %lld",
show_dev(__entry),
show_file_type(__entry->type),
__entry->count)
@@ -1295,14 +1299,14 @@ DECLARE_EVENT_CLASS(f2fs_sync_dirty_inodes,
DEFINE_EVENT(f2fs_sync_dirty_inodes, f2fs_sync_dirty_inodes_enter,
- TP_PROTO(struct super_block *sb, int type, int count),
+ TP_PROTO(struct super_block *sb, int type, s64 count),
TP_ARGS(sb, type, count)
);
DEFINE_EVENT(f2fs_sync_dirty_inodes, f2fs_sync_dirty_inodes_exit,
- TP_PROTO(struct super_block *sb, int type, int count),
+ TP_PROTO(struct super_block *sb, int type, s64 count),
TP_ARGS(sb, type, count)
);
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index aa69253ecc7d..f28292d73ddb 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -38,22 +38,25 @@ TRACE_EVENT(kvm_userspace_exit,
);
TRACE_EVENT(kvm_vcpu_wakeup,
- TP_PROTO(__u64 ns, bool waited),
- TP_ARGS(ns, waited),
+ TP_PROTO(__u64 ns, bool waited, bool valid),
+ TP_ARGS(ns, waited, valid),
TP_STRUCT__entry(
__field( __u64, ns )
__field( bool, waited )
+ __field( bool, valid )
),
TP_fast_assign(
__entry->ns = ns;
__entry->waited = waited;
+ __entry->valid = valid;
),
- TP_printk("%s time %lld ns",
+ TP_printk("%s time %lld ns, polling %s",
__entry->waited ? "wait" : "poll",
- __entry->ns)
+ __entry->ns,
+ __entry->valid ? "valid" : "invalid")
);
#if defined(CONFIG_HAVE_KVM_IRQFD)
@@ -105,7 +108,7 @@ TRACE_EVENT(kvm_ioapic_set_irq,
__entry->coalesced = coalesced;
),
- TP_printk("pin %u dst %x vec=%u (%s|%s|%s%s)%s",
+ TP_printk("pin %u dst %x vec %u (%s|%s|%s%s)%s",
__entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e,
__print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
(__entry->e & (1<<11)) ? "logical" : "physical",
@@ -126,7 +129,7 @@ TRACE_EVENT(kvm_ioapic_delayed_eoi_inj,
__entry->e = e;
),
- TP_printk("dst %x vec=%u (%s|%s|%s%s)",
+ TP_printk("dst %x vec %u (%s|%s|%s%s)",
(u8)(__entry->e >> 56), (u8)__entry->e,
__print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
(__entry->e & (1<<11)) ? "logical" : "physical",
@@ -148,7 +151,7 @@ TRACE_EVENT(kvm_msi_set_irq,
__entry->data = data;
),
- TP_printk("dst %u vec %x (%s|%s|%s%s)",
+ TP_printk("dst %u vec %u (%s|%s|%s%s)",
(u8)(__entry->address >> 12), (u8)__entry->data,
__print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode),
(__entry->address & (1<<2)) ? "logical" : "physical",
diff --git a/include/trace/events/libata.h b/include/trace/events/libata.h
index 8b0fbd93082c..75fff8696bae 100644
--- a/include/trace/events/libata.h
+++ b/include/trace/events/libata.h
@@ -39,6 +39,7 @@
ata_opcode_name(ATA_CMD_WRITE_QUEUED_FUA_EXT), \
ata_opcode_name(ATA_CMD_FPDMA_READ), \
ata_opcode_name(ATA_CMD_FPDMA_WRITE), \
+ ata_opcode_name(ATA_CMD_NCQ_NON_DATA), \
ata_opcode_name(ATA_CMD_FPDMA_SEND), \
ata_opcode_name(ATA_CMD_FPDMA_RECV), \
ata_opcode_name(ATA_CMD_PIO_READ), \
@@ -97,6 +98,8 @@
ata_opcode_name(ATA_CMD_CFA_WRITE_MULT_NE), \
ata_opcode_name(ATA_CMD_REQ_SENSE_DATA), \
ata_opcode_name(ATA_CMD_SANITIZE_DEVICE), \
+ ata_opcode_name(ATA_CMD_ZAC_MGMT_IN), \
+ ata_opcode_name(ATA_CMD_ZAC_MGMT_OUT), \
ata_opcode_name(ATA_CMD_RESTORE), \
ata_opcode_name(ATA_CMD_READ_LONG), \
ata_opcode_name(ATA_CMD_READ_LONG_ONCE), \
@@ -139,6 +142,10 @@ const char *libata_trace_parse_eh_err_mask(struct trace_seq *, unsigned int);
const char *libata_trace_parse_qc_flags(struct trace_seq *, unsigned int);
#define __parse_qc_flags(f) libata_trace_parse_qc_flags(p, f)
+const char *libata_trace_parse_subcmd(struct trace_seq *, unsigned char,
+ unsigned char, unsigned char);
+#define __parse_subcmd(c,f,h) libata_trace_parse_subcmd(p, c, f, h)
+
TRACE_EVENT(ata_qc_issue,
TP_PROTO(struct ata_queued_cmd *qc),
@@ -185,11 +192,12 @@ TRACE_EVENT(ata_qc_issue,
__entry->hob_nsect = qc->tf.hob_nsect;
),
- TP_printk("ata_port=%u ata_dev=%u tag=%d proto=%s cmd=%s " \
+ TP_printk("ata_port=%u ata_dev=%u tag=%d proto=%s cmd=%s%s " \
" tf=(%02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x)",
__entry->ata_port, __entry->ata_dev, __entry->tag,
show_protocol_name(__entry->proto),
show_opcode_name(__entry->cmd),
+ __parse_subcmd(__entry->cmd, __entry->feature, __entry->hob_nsect),
__entry->cmd, __entry->feature, __entry->nsect,
__entry->lbal, __entry->lbam, __entry->lbah,
__entry->hob_feature, __entry->hob_nsect,
diff --git a/include/trace/events/mmc.h b/include/trace/events/mmc.h
new file mode 100644
index 000000000000..a72f9b94c80b
--- /dev/null
+++ b/include/trace/events/mmc.h
@@ -0,0 +1,182 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mmc
+
+#if !defined(_TRACE_MMC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MMC_H
+
+#include <linux/blkdev.h>
+#include <linux/mmc/core.h>
+#include <linux/mmc/host.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(mmc_request_start,
+
+ TP_PROTO(struct mmc_host *host, struct mmc_request *mrq),
+
+ TP_ARGS(host, mrq),
+
+ TP_STRUCT__entry(
+ __field(u32, cmd_opcode)
+ __field(u32, cmd_arg)
+ __field(unsigned int, cmd_flags)
+ __field(unsigned int, cmd_retries)
+ __field(u32, stop_opcode)
+ __field(u32, stop_arg)
+ __field(unsigned int, stop_flags)
+ __field(unsigned int, stop_retries)
+ __field(u32, sbc_opcode)
+ __field(u32, sbc_arg)
+ __field(unsigned int, sbc_flags)
+ __field(unsigned int, sbc_retries)
+ __field(unsigned int, blocks)
+ __field(unsigned int, blksz)
+ __field(unsigned int, data_flags)
+ __field(unsigned int, can_retune)
+ __field(unsigned int, doing_retune)
+ __field(unsigned int, retune_now)
+ __field(int, need_retune)
+ __field(int, hold_retune)
+ __field(unsigned int, retune_period)
+ __field(struct mmc_request *, mrq)
+ __string(name, mmc_hostname(host))
+ ),
+
+ TP_fast_assign(
+ __entry->cmd_opcode = mrq->cmd->opcode;
+ __entry->cmd_arg = mrq->cmd->arg;
+ __entry->cmd_flags = mrq->cmd->flags;
+ __entry->cmd_retries = mrq->cmd->retries;
+ __entry->stop_opcode = mrq->stop ? mrq->stop->opcode : 0;
+ __entry->stop_arg = mrq->stop ? mrq->stop->arg : 0;
+ __entry->stop_flags = mrq->stop ? mrq->stop->flags : 0;
+ __entry->stop_retries = mrq->stop ? mrq->stop->retries : 0;
+ __entry->sbc_opcode = mrq->sbc ? mrq->sbc->opcode : 0;
+ __entry->sbc_arg = mrq->sbc ? mrq->sbc->arg : 0;
+ __entry->sbc_flags = mrq->sbc ? mrq->sbc->flags : 0;
+ __entry->sbc_retries = mrq->sbc ? mrq->sbc->retries : 0;
+ __entry->blksz = mrq->data ? mrq->data->blksz : 0;
+ __entry->blocks = mrq->data ? mrq->data->blocks : 0;
+ __entry->data_flags = mrq->data ? mrq->data->flags : 0;
+ __entry->can_retune = host->can_retune;
+ __entry->doing_retune = host->doing_retune;
+ __entry->retune_now = host->retune_now;
+ __entry->need_retune = host->need_retune;
+ __entry->hold_retune = host->hold_retune;
+ __entry->retune_period = host->retune_period;
+ __assign_str(name, mmc_hostname(host));
+ __entry->mrq = mrq;
+ ),
+
+ TP_printk("%s: start struct mmc_request[%p]: "
+ "cmd_opcode=%u cmd_arg=0x%x cmd_flags=0x%x cmd_retries=%u "
+ "stop_opcode=%u stop_arg=0x%x stop_flags=0x%x stop_retries=%u "
+ "sbc_opcode=%u sbc_arg=0x%x sbc_flags=0x%x sbc_retires=%u "
+ "blocks=%u block_size=%u data_flags=0x%x "
+ "can_retune=%u doing_retune=%u retune_now=%u "
+ "need_retune=%d hold_retune=%d retune_period=%u",
+ __get_str(name), __entry->mrq,
+ __entry->cmd_opcode, __entry->cmd_arg,
+ __entry->cmd_flags, __entry->cmd_retries,
+ __entry->stop_opcode, __entry->stop_arg,
+ __entry->stop_flags, __entry->stop_retries,
+ __entry->sbc_opcode, __entry->sbc_arg,
+ __entry->sbc_flags, __entry->sbc_retries,
+ __entry->blocks, __entry->blksz, __entry->data_flags,
+ __entry->can_retune, __entry->doing_retune,
+ __entry->retune_now, __entry->need_retune,
+ __entry->hold_retune, __entry->retune_period)
+);
+
+TRACE_EVENT(mmc_request_done,
+
+ TP_PROTO(struct mmc_host *host, struct mmc_request *mrq),
+
+ TP_ARGS(host, mrq),
+
+ TP_STRUCT__entry(
+ __field(u32, cmd_opcode)
+ __field(int, cmd_err)
+ __array(u32, cmd_resp, 4)
+ __field(unsigned int, cmd_retries)
+ __field(u32, stop_opcode)
+ __field(int, stop_err)
+ __array(u32, stop_resp, 4)
+ __field(unsigned int, stop_retries)
+ __field(u32, sbc_opcode)
+ __field(int, sbc_err)
+ __array(u32, sbc_resp, 4)
+ __field(unsigned int, sbc_retries)
+ __field(unsigned int, bytes_xfered)
+ __field(int, data_err)
+ __field(unsigned int, can_retune)
+ __field(unsigned int, doing_retune)
+ __field(unsigned int, retune_now)
+ __field(int, need_retune)
+ __field(int, hold_retune)
+ __field(unsigned int, retune_period)
+ __field(struct mmc_request *, mrq)
+ __string(name, mmc_hostname(host))
+ ),
+
+ TP_fast_assign(
+ __entry->cmd_opcode = mrq->cmd->opcode;
+ __entry->cmd_err = mrq->cmd->error;
+ memcpy(__entry->cmd_resp, mrq->cmd->resp, 4);
+ __entry->cmd_retries = mrq->cmd->retries;
+ __entry->stop_opcode = mrq->stop ? mrq->stop->opcode : 0;
+ __entry->stop_err = mrq->stop ? mrq->stop->error : 0;
+ __entry->stop_resp[0] = mrq->stop ? mrq->stop->resp[0] : 0;
+ __entry->stop_resp[1] = mrq->stop ? mrq->stop->resp[1] : 0;
+ __entry->stop_resp[2] = mrq->stop ? mrq->stop->resp[2] : 0;
+ __entry->stop_resp[3] = mrq->stop ? mrq->stop->resp[3] : 0;
+ __entry->stop_retries = mrq->stop ? mrq->stop->retries : 0;
+ __entry->sbc_opcode = mrq->sbc ? mrq->sbc->opcode : 0;
+ __entry->sbc_err = mrq->sbc ? mrq->sbc->error : 0;
+ __entry->sbc_resp[0] = mrq->sbc ? mrq->sbc->resp[0] : 0;
+ __entry->sbc_resp[1] = mrq->sbc ? mrq->sbc->resp[1] : 0;
+ __entry->sbc_resp[2] = mrq->sbc ? mrq->sbc->resp[2] : 0;
+ __entry->sbc_resp[3] = mrq->sbc ? mrq->sbc->resp[3] : 0;
+ __entry->sbc_retries = mrq->sbc ? mrq->sbc->retries : 0;
+ __entry->bytes_xfered = mrq->data ? mrq->data->bytes_xfered : 0;
+ __entry->data_err = mrq->data ? mrq->data->error : 0;
+ __entry->can_retune = host->can_retune;
+ __entry->doing_retune = host->doing_retune;
+ __entry->retune_now = host->retune_now;
+ __entry->need_retune = host->need_retune;
+ __entry->hold_retune = host->hold_retune;
+ __entry->retune_period = host->retune_period;
+ __assign_str(name, mmc_hostname(host));
+ __entry->mrq = mrq;
+ ),
+
+ TP_printk("%s: end struct mmc_request[%p]: "
+ "cmd_opcode=%u cmd_err=%d cmd_resp=0x%x 0x%x 0x%x 0x%x "
+ "cmd_retries=%u stop_opcode=%u stop_err=%d "
+ "stop_resp=0x%x 0x%x 0x%x 0x%x stop_retries=%u "
+ "sbc_opcode=%u sbc_err=%d sbc_resp=0x%x 0x%x 0x%x 0x%x "
+ "sbc_retries=%u bytes_xfered=%u data_err=%d "
+ "can_retune=%u doing_retune=%u retune_now=%u need_retune=%d "
+ "hold_retune=%d retune_period=%u",
+ __get_str(name), __entry->mrq,
+ __entry->cmd_opcode, __entry->cmd_err,
+ __entry->cmd_resp[0], __entry->cmd_resp[1],
+ __entry->cmd_resp[2], __entry->cmd_resp[3],
+ __entry->cmd_retries,
+ __entry->stop_opcode, __entry->stop_err,
+ __entry->stop_resp[0], __entry->stop_resp[1],
+ __entry->stop_resp[2], __entry->stop_resp[3],
+ __entry->stop_retries,
+ __entry->sbc_opcode, __entry->sbc_err,
+ __entry->sbc_resp[0], __entry->sbc_resp[1],
+ __entry->sbc_resp[2], __entry->sbc_resp[3],
+ __entry->sbc_retries,
+ __entry->bytes_xfered, __entry->data_err,
+ __entry->can_retune, __entry->doing_retune,
+ __entry->retune_now, __entry->need_retune,
+ __entry->hold_retune, __entry->retune_period)
+);
+
+#endif /* _TRACE_MMC_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index ef72c4aada56..d3e756539d44 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -172,6 +172,77 @@ TRACE_EVENT(rcu_grace_period_init,
);
/*
+ * Tracepoint for expedited grace-period events. Takes a string identifying
+ * the RCU flavor, the expedited grace-period sequence number, and a string
+ * identifying the grace-period-related event as follows:
+ *
+ * "snap": Captured snapshot of expedited grace period sequence number.
+ * "start": Started a real expedited grace period.
+ * "end": Ended a real expedited grace period.
+ * "endwake": Woke piggybackers up.
+ * "done": Someone else did the expedited grace period for us.
+ */
+TRACE_EVENT(rcu_exp_grace_period,
+
+ TP_PROTO(const char *rcuname, unsigned long gpseq, const char *gpevent),
+
+ TP_ARGS(rcuname, gpseq, gpevent),
+
+ TP_STRUCT__entry(
+ __field(const char *, rcuname)
+ __field(unsigned long, gpseq)
+ __field(const char *, gpevent)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->gpseq = gpseq;
+ __entry->gpevent = gpevent;
+ ),
+
+ TP_printk("%s %lu %s",
+ __entry->rcuname, __entry->gpseq, __entry->gpevent)
+);
+
+/*
+ * Tracepoint for expedited grace-period funnel-locking events. Takes a
+ * string identifying the RCU flavor, an integer identifying the rcu_node
+ * combining-tree level, another pair of integers identifying the lowest-
+ * and highest-numbered CPU associated with the current rcu_node structure,
+ * and a string. identifying the grace-period-related event as follows:
+ *
+ * "nxtlvl": Advance to next level of rcu_node funnel
+ * "wait": Wait for someone else to do expedited GP
+ */
+TRACE_EVENT(rcu_exp_funnel_lock,
+
+ TP_PROTO(const char *rcuname, u8 level, int grplo, int grphi,
+ const char *gpevent),
+
+ TP_ARGS(rcuname, level, grplo, grphi, gpevent),
+
+ TP_STRUCT__entry(
+ __field(const char *, rcuname)
+ __field(u8, level)
+ __field(int, grplo)
+ __field(int, grphi)
+ __field(const char *, gpevent)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->level = level;
+ __entry->grplo = grplo;
+ __entry->grphi = grphi;
+ __entry->gpevent = gpevent;
+ ),
+
+ TP_printk("%s %d %d %d %s",
+ __entry->rcuname, __entry->level, __entry->grplo,
+ __entry->grphi, __entry->gpevent)
+);
+
+/*
* Tracepoint for RCU no-CBs CPU callback handoffs. This event is intended
* to assist debugging of these handoffs.
*
@@ -704,11 +775,15 @@ TRACE_EVENT(rcu_barrier,
#else /* #ifdef CONFIG_RCU_TRACE */
#define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0)
-#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \
- qsmask) do { } while (0)
#define trace_rcu_future_grace_period(rcuname, gpnum, completed, c, \
level, grplo, grphi, event) \
do { } while (0)
+#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \
+ qsmask) do { } while (0)
+#define trace_rcu_exp_grace_period(rcuname, gqseq, gpevent) \
+ do { } while (0)
+#define trace_rcu_exp_funnel_lock(rcuname, level, grplo, grphi, gpevent) \
+ do { } while (0)
#define trace_rcu_nocb_wake(rcuname, cpu, reason) do { } while (0)
#define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0)
#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
diff --git a/include/trace/events/scsi.h b/include/trace/events/scsi.h
index 079bd10a01b4..9a9b3e2550af 100644
--- a/include/trace/events/scsi.h
+++ b/include/trace/events/scsi.h
@@ -94,11 +94,9 @@
scsi_opcode_name(WRITE_16), \
scsi_opcode_name(VERIFY_16), \
scsi_opcode_name(WRITE_SAME_16), \
+ scsi_opcode_name(ZBC_OUT), \
+ scsi_opcode_name(ZBC_IN), \
scsi_opcode_name(SERVICE_ACTION_IN_16), \
- scsi_opcode_name(SAI_READ_CAPACITY_16), \
- scsi_opcode_name(SAI_GET_LBA_STATUS), \
- scsi_opcode_name(MI_REPORT_TARGET_PGS), \
- scsi_opcode_name(MO_SET_TARGET_PGS), \
scsi_opcode_name(READ_32), \
scsi_opcode_name(WRITE_32), \
scsi_opcode_name(WRITE_SAME_32), \
diff --git a/include/trace/perf.h b/include/trace/perf.h
index 26486fcd74ce..88de5c205e86 100644
--- a/include/trace/perf.h
+++ b/include/trace/perf.h
@@ -20,9 +20,6 @@
#undef __get_bitmask
#define __get_bitmask(field) (char *)__get_dynamic_array(field)
-#undef __perf_addr
-#define __perf_addr(a) (__addr = (a))
-
#undef __perf_count
#define __perf_count(c) (__count = (c))
@@ -37,8 +34,9 @@ perf_trace_##call(void *__data, proto) \
struct trace_event_call *event_call = __data; \
struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
struct trace_event_raw_##call *entry; \
+ struct bpf_prog *prog = event_call->prog; \
struct pt_regs *__regs; \
- u64 __addr = 0, __count = 1; \
+ u64 __count = 1; \
struct task_struct *__task = NULL; \
struct hlist_head *head; \
int __entry_size; \
@@ -48,7 +46,7 @@ perf_trace_##call(void *__data, proto) \
__data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
\
head = this_cpu_ptr(event_call->perf_events); \
- if (__builtin_constant_p(!__task) && !__task && \
+ if (!prog && __builtin_constant_p(!__task) && !__task && \
hlist_empty(head)) \
return; \
\
@@ -56,8 +54,7 @@ perf_trace_##call(void *__data, proto) \
sizeof(u64)); \
__entry_size -= sizeof(u32); \
\
- entry = perf_trace_buf_prepare(__entry_size, \
- event_call->event.type, &__regs, &rctx); \
+ entry = perf_trace_buf_alloc(__entry_size, &__regs, &rctx); \
if (!entry) \
return; \
\
@@ -67,8 +64,9 @@ perf_trace_##call(void *__data, proto) \
\
{ assign; } \
\
- perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
- __count, __regs, head, __task); \
+ perf_trace_run_bpf_submit(entry, __entry_size, rctx, \
+ event_call, __count, __regs, \
+ head, __task); \
}
/*
diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h
index 170c93bbdbb7..80679a9fae65 100644
--- a/include/trace/trace_events.h
+++ b/include/trace/trace_events.h
@@ -652,9 +652,6 @@ static inline notrace int trace_event_get_offsets_##call( \
#undef TP_fast_assign
#define TP_fast_assign(args...) args
-#undef __perf_addr
-#define __perf_addr(a) (a)
-
#undef __perf_count
#define __perf_count(c) (c)
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 6e0f5f01734c..a26415b5151c 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -127,8 +127,11 @@ __SYSCALL(__NR_unlinkat, sys_unlinkat)
__SYSCALL(__NR_symlinkat, sys_symlinkat)
#define __NR_linkat 37
__SYSCALL(__NR_linkat, sys_linkat)
+#ifdef __ARCH_WANT_RENAMEAT
+/* renameat is superseded with flags by renameat2 */
#define __NR_renameat 38
__SYSCALL(__NR_renameat, sys_renameat)
+#endif /* __ARCH_WANT_RENAMEAT */
/* fs/namespace.c */
#define __NR_umount2 39
@@ -718,9 +721,9 @@ __SYSCALL(__NR_mlock2, sys_mlock2)
#define __NR_copy_file_range 285
__SYSCALL(__NR_copy_file_range, sys_copy_file_range)
#define __NR_preadv2 286
-__SYSCALL(__NR_preadv2, sys_preadv2)
+__SC_COMP(__NR_preadv2, sys_preadv2, compat_sys_preadv2)
#define __NR_pwritev2 287
-__SYSCALL(__NR_pwritev2, sys_pwritev2)
+__SC_COMP(__NR_pwritev2, sys_pwritev2, compat_sys_pwritev2)
#undef __NR_syscalls
#define __NR_syscalls 288
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 453a76af123c..cdecf87576e8 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -34,6 +34,10 @@
#include "drm.h"
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
#define DRM_AMDGPU_GEM_CREATE 0x00
#define DRM_AMDGPU_GEM_MMAP 0x01
#define DRM_AMDGPU_CTX 0x02
@@ -642,4 +646,8 @@ struct drm_amdgpu_info_hw_ip {
#define AMDGPU_FAMILY_VI 130 /* Iceland, Tonga */
#define AMDGPU_FAMILY_CZ 135 /* Carrizo, Stoney */
+#if defined(__cplusplus)
+}
+#endif
+
#endif
diff --git a/include/uapi/drm/armada_drm.h b/include/uapi/drm/armada_drm.h
index 6de7f0196ca0..72e326f9c7de 100644
--- a/include/uapi/drm/armada_drm.h
+++ b/include/uapi/drm/armada_drm.h
@@ -11,6 +11,10 @@
#include "drm.h"
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
#define DRM_ARMADA_GEM_CREATE 0x00
#define DRM_ARMADA_GEM_MMAP 0x02
#define DRM_ARMADA_GEM_PWRITE 0x03
@@ -44,4 +48,8 @@ struct drm_armada_gem_pwrite {
#define DRM_IOCTL_ARMADA_GEM_PWRITE \
ARMADA_IOCTL(IOW, GEM_PWRITE, gem_pwrite)
+#if defined(__cplusplus)
+}
+#endif
+
#endif
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index a0ebfe7c9a28..452675fb55d9 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -36,7 +36,13 @@
#ifndef _DRM_H_
#define _DRM_H_
-#if defined(__KERNEL__) || defined(__linux__)
+#if defined(__KERNEL__)
+
+#include <linux/types.h>
+#include <asm/ioctl.h>
+typedef unsigned int drm_handle_t;
+
+#elif defined(__linux__)
#include <linux/types.h>
#include <asm/ioctl.h>
@@ -59,6 +65,10 @@ typedef unsigned long drm_handle_t;
#endif
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */
#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */
#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */
@@ -181,7 +191,7 @@ enum drm_map_type {
_DRM_SHM = 2, /**< shared, cached */
_DRM_AGP = 3, /**< AGP/GART */
_DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
- _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */
+ _DRM_CONSISTENT = 5 /**< Consistent memory for PCI DMA */
};
/**
@@ -373,7 +383,11 @@ struct drm_buf_pub {
*/
struct drm_buf_map {
int count; /**< Length of the buffer list */
+#ifdef __cplusplus
+ void __user *virt;
+#else
void __user *virtual; /**< Mmap'd area in user-virtual */
+#endif
struct drm_buf_pub __user *list; /**< Buffer information */
};
@@ -431,7 +445,7 @@ struct drm_draw {
* DRM_IOCTL_UPDATE_DRAW ioctl argument type.
*/
typedef enum {
- DRM_DRAWABLE_CLIPRECTS,
+ DRM_DRAWABLE_CLIPRECTS
} drm_drawable_info_type_t;
struct drm_update_draw {
@@ -681,7 +695,15 @@ struct drm_prime_handle {
__s32 fd;
};
-#include <drm/drm_mode.h>
+#if defined(__cplusplus)
+}
+#endif
+
+#include "drm_mode.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
#define DRM_IOCTL_BASE 'd'
#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
@@ -876,4 +898,8 @@ typedef struct drm_scatter_gather drm_scatter_gather_t;
typedef struct drm_set_version drm_set_version_t;
#endif
+#if defined(__cplusplus)
+}
+#endif
+
#endif
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 4d8da699a623..a5890bf44c0a 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -26,6 +26,10 @@
#include "drm.h"
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
#define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \
((__u32)(c) << 16) | ((__u32)(d) << 24))
@@ -229,4 +233,8 @@
*/
#define DRM_FORMAT_MOD_SAMSUNG_64_32_TILE fourcc_mod_code(SAMSUNG, 1)
+#if defined(__cplusplus)
+}
+#endif
+
#endif /* DRM_FOURCC_H */
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index c0217434d28d..49a72659b801 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -29,6 +29,10 @@
#include "drm.h"
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
#define DRM_DISPLAY_INFO_LEN 32
#define DRM_CONNECTOR_NAME_LEN 32
#define DRM_DISPLAY_MODE_LEN 32
@@ -202,6 +206,7 @@ struct drm_mode_get_plane_res {
#define DRM_MODE_ENCODER_VIRTUAL 5
#define DRM_MODE_ENCODER_DSI 6
#define DRM_MODE_ENCODER_DPMST 7
+#define DRM_MODE_ENCODER_DPI 8
struct drm_mode_get_encoder {
__u32 encoder_id;
@@ -241,6 +246,7 @@ struct drm_mode_get_encoder {
#define DRM_MODE_CONNECTOR_eDP 14
#define DRM_MODE_CONNECTOR_VIRTUAL 15
#define DRM_MODE_CONNECTOR_DSI 16
+#define DRM_MODE_CONNECTOR_DPI 17
struct drm_mode_get_connector {
@@ -320,6 +326,16 @@ struct drm_mode_connector_set_property {
__u32 connector_id;
};
+#define DRM_MODE_OBJECT_CRTC 0xcccccccc
+#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0
+#define DRM_MODE_OBJECT_ENCODER 0xe0e0e0e0
+#define DRM_MODE_OBJECT_MODE 0xdededede
+#define DRM_MODE_OBJECT_PROPERTY 0xb0b0b0b0
+#define DRM_MODE_OBJECT_FB 0xfbfbfbfb
+#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb
+#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee
+#define DRM_MODE_OBJECT_ANY 0
+
struct drm_mode_obj_get_properties {
__u64 props_ptr;
__u64 prop_values_ptr;
@@ -611,4 +627,8 @@ struct drm_mode_destroy_blob {
__u32 blob_id;
};
+#if defined(__cplusplus)
+}
+#endif
+
#endif
diff --git a/include/uapi/drm/drm_sarea.h b/include/uapi/drm/drm_sarea.h
index 1d1a858a203d..a951ced60ebe 100644
--- a/include/uapi/drm/drm_sarea.h
+++ b/include/uapi/drm/drm_sarea.h
@@ -34,6 +34,10 @@
#include "drm.h"
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
/* SAREA area needs to be at least a page */
#if defined(__alpha__)
#define SAREA_MAX 0x2000U
@@ -83,4 +87,8 @@ typedef struct drm_sarea_frame drm_sarea_frame_t;
typedef struct drm_sarea drm_sarea_t;
#endif
+#if defined(__cplusplus)
+}
+#endif
+
#endif /* _DRM_SAREA_H_ */
diff --git a/include/uapi/drm/etnaviv_drm.h b/include/uapi/drm/etnaviv_drm.h
index f95e1c43c3fb..2584c1cca42f 100644
--- a/include/uapi/drm/etnaviv_drm.h
+++ b/include/uapi/drm/etnaviv_drm.h
@@ -19,6 +19,10 @@
#include "drm.h"
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
/* Please note that modifications to all structs defined here are
* subject to backwards-compatibility constraints:
* 1) Do not use pointers, use __u64 instead for 32 bit / 64 bit
@@ -222,4 +226,8 @@ struct drm_etnaviv_gem_wait {
#define DRM_IOCTL_ETNAVIV_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_USERPTR, struct drm_etnaviv_gem_userptr)
#define DRM_IOCTL_ETNAVIV_GEM_WAIT DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_WAIT, struct drm_etnaviv_gem_wait)
+#if defined(__cplusplus)
+}
+#endif
+
#endif /* __ETNAVIV_DRM_H__ */
diff --git a/include/uapi/drm/exynos_drm.h b/include/uapi/drm/exynos_drm.h
index 3947c2eb8d69..cb3e9f9d029f 100644
--- a/include/uapi/drm/exynos_drm.h
+++ b/include/uapi/drm/exynos_drm.h
@@ -17,6 +17,10 @@
#include "drm.h"
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
/**
* User-desired buffer creation information structure.
*
@@ -362,4 +366,8 @@ struct drm_exynos_ipp_event {
__u32 buf_id[EXYNOS_DRM_OPS_MAX];
};
+#if defined(__cplusplus)
+}
+#endif
+
#endif /* _UAPI_EXYNOS_DRM_H_ */
diff --git a/include/uapi/drm/i810_drm.h b/include/uapi/drm/i810_drm.h
index bdb028723ded..6e6cf86b75b0 100644
--- a/include/uapi/drm/i810_drm.h
+++ b/include/uapi/drm/i810_drm.h
@@ -3,6 +3,10 @@
#include "drm.h"
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
/* WARNING: These defines must be the same as what the Xserver uses.
* if you change them, you must change the defines in the Xserver.
*/
@@ -280,4 +284,8 @@ typedef struct _drm_i810_mc {
unsigned int last_render; /* Last Render Request */
} drm_i810_mc_t;
+#if defined(__cplusplus)
+}
+#endif
+
#endif /* _I810_DRM_H_ */
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index a5524cc95ff8..c17d63d8b543 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -29,6 +29,10 @@
#include "drm.h"
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
/* Please note that modifications to all structs defined here are
* subject to backwards-compatibility constraints.
*/
@@ -1170,4 +1174,8 @@ struct drm_i915_gem_context_param {
__u64 value;
};
+#if defined(__cplusplus)
+}
+#endif
+
#endif /* _UAPI_I915_DRM_H_ */
diff --git a/include/uapi/drm/mga_drm.h b/include/uapi/drm/mga_drm.h
index fca817009e13..8c4337548ab5 100644
--- a/include/uapi/drm/mga_drm.h
+++ b/include/uapi/drm/mga_drm.h
@@ -37,6 +37,10 @@
#include "drm.h"
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
/* WARNING: If you change any of these defines, make sure to change the
* defines in the Xserver file (mga_sarea.h)
*/
@@ -416,4 +420,8 @@ typedef struct drm_mga_getparam {
void __user *value;
} drm_mga_getparam_t;
+#if defined(__cplusplus)
+}
+#endif
+
#endif
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 254d3e92d18e..bf19d2cd9078 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -20,6 +20,10 @@
#include "drm.h"
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
/* Please note that modifications to all structs defined here are
* subject to backwards-compatibility constraints:
* 1) Do not use pointers, use __u64 instead for 32 bit / 64 bit
@@ -217,4 +221,8 @@ struct drm_msm_wait_fence {
#define DRM_IOCTL_MSM_GEM_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_SUBMIT, struct drm_msm_gem_submit)
#define DRM_IOCTL_MSM_WAIT_FENCE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_WAIT_FENCE, struct drm_msm_wait_fence)
+#if defined(__cplusplus)
+}
+#endif
+
#endif /* __MSM_DRM_H__ */
diff --git a/include/uapi/drm/nouveau_drm.h b/include/uapi/drm/nouveau_drm.h
index 500d82aecbe4..259588a4b61b 100644
--- a/include/uapi/drm/nouveau_drm.h
+++ b/include/uapi/drm/nouveau_drm.h
@@ -27,7 +27,11 @@
#define DRM_NOUVEAU_EVENT_NVIF 0x80000000
-#include <drm/drm.h>
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
#define NOUVEAU_GEM_DOMAIN_CPU (1 << 0)
#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
@@ -141,4 +145,8 @@ struct drm_nouveau_gem_cpu_fini {
#define DRM_IOCTL_NOUVEAU_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_FINI, struct drm_nouveau_gem_cpu_fini)
#define DRM_IOCTL_NOUVEAU_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_INFO, struct drm_nouveau_gem_info)
+#if defined(__cplusplus)
+}
+#endif
+
#endif /* __NOUVEAU_DRM_H__ */
diff --git a/include/uapi/drm/omap_drm.h b/include/uapi/drm/omap_drm.h
index 38a3bd847e15..407cb55df6ac 100644
--- a/include/uapi/drm/omap_drm.h
+++ b/include/uapi/drm/omap_drm.h
@@ -22,6 +22,10 @@
#include "drm.h"
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
/* Please note that modifications to all structs defined here are
* subject to backwards-compatibility constraints.
*/
@@ -114,4 +118,8 @@ struct drm_omap_gem_info {
#define DRM_IOCTL_OMAP_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_GEM_CPU_FINI, struct drm_omap_gem_cpu_fini)
#define DRM_IOCTL_OMAP_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GEM_INFO, struct drm_omap_gem_info)
+#if defined(__cplusplus)
+}
+#endif
+
#endif /* __OMAP_DRM_H__ */
diff --git a/include/uapi/drm/qxl_drm.h b/include/uapi/drm/qxl_drm.h
index 4d1e32640463..7eef42213051 100644
--- a/include/uapi/drm/qxl_drm.h
+++ b/include/uapi/drm/qxl_drm.h
@@ -26,6 +26,10 @@
#include "drm.h"
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
/* Please note that modifications to all structs defined here are
* subject to backwards-compatibility constraints.
*
@@ -84,7 +88,6 @@ struct drm_qxl_command {
__u32 pad;
};
-/* XXX: call it drm_qxl_commands? */
struct drm_qxl_execbuffer {
__u32 flags; /* for future use */
__u32 commands_num;
@@ -148,4 +151,8 @@ struct drm_qxl_alloc_surf {
DRM_IOWR(DRM_COMMAND_BASE + DRM_QXL_ALLOC_SURF,\
struct drm_qxl_alloc_surf)
+#if defined(__cplusplus)
+}
+#endif
+
#endif
diff --git a/include/uapi/drm/r128_drm.h b/include/uapi/drm/r128_drm.h
index 7a44c6500a7e..690e9c62f510 100644
--- a/include/uapi/drm/r128_drm.h
+++ b/include/uapi/drm/r128_drm.h
@@ -35,6 +35,10 @@
#include "drm.h"
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
/* WARNING: If you change any of these defines, make sure to change the
* defines in the X server file (r128_sarea.h)
*/
@@ -325,4 +329,8 @@ typedef struct drm_r128_getparam {
void __user *value;
} drm_r128_getparam_t;
+#if defined(__cplusplus)
+}
+#endif
+
#endif
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index ccb9bcd82685..490a59cc4532 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -35,6 +35,10 @@
#include "drm.h"
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
/* WARNING: If you change any of these defines, make sure to change the
* defines in the X server file (radeon_sarea.h)
*/
@@ -1067,4 +1071,8 @@ struct drm_radeon_info {
#define CIK_TILE_MODE_DEPTH_STENCIL_1D 5
+#if defined(__cplusplus)
+}
+#endif
+
#endif
diff --git a/include/uapi/drm/savage_drm.h b/include/uapi/drm/savage_drm.h
index 574147489c60..0f6eddef74aa 100644
--- a/include/uapi/drm/savage_drm.h
+++ b/include/uapi/drm/savage_drm.h
@@ -28,6 +28,10 @@
#include "drm.h"
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
#ifndef __SAVAGE_SAREA_DEFINES__
#define __SAVAGE_SAREA_DEFINES__
@@ -209,4 +213,8 @@ union drm_savage_cmd_header {
} clear1; /* SAVAGE_CMD_CLEAR data */
};
+#if defined(__cplusplus)
+}
+#endif
+
#endif
diff --git a/include/uapi/drm/sis_drm.h b/include/uapi/drm/sis_drm.h
index 374858cdcdaa..3e3f7e989e0b 100644
--- a/include/uapi/drm/sis_drm.h
+++ b/include/uapi/drm/sis_drm.h
@@ -27,6 +27,12 @@
#ifndef __SIS_DRM_H__
#define __SIS_DRM_H__
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
/* SiS specific ioctls */
#define NOT_USED_0_3
#define DRM_SIS_FB_ALLOC 0x04
@@ -64,4 +70,8 @@ typedef struct {
unsigned long offset, size;
} drm_sis_fb_t;
+#if defined(__cplusplus)
+}
+#endif
+
#endif /* __SIS_DRM_H__ */
diff --git a/include/uapi/drm/tegra_drm.h b/include/uapi/drm/tegra_drm.h
index 27d0b054aed0..d954f8c33321 100644
--- a/include/uapi/drm/tegra_drm.h
+++ b/include/uapi/drm/tegra_drm.h
@@ -25,6 +25,10 @@
#include "drm.h"
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
#define DRM_TEGRA_GEM_CREATE_TILED (1 << 0)
#define DRM_TEGRA_GEM_CREATE_BOTTOM_UP (1 << 1)
@@ -198,4 +202,8 @@ struct drm_tegra_gem_get_flags {
#define DRM_IOCTL_TEGRA_GEM_SET_FLAGS DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_SET_FLAGS, struct drm_tegra_gem_set_flags)
#define DRM_IOCTL_TEGRA_GEM_GET_FLAGS DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_GET_FLAGS, struct drm_tegra_gem_get_flags)
+#if defined(__cplusplus)
+}
+#endif
+
#endif
diff --git a/include/uapi/drm/vc4_drm.h b/include/uapi/drm/vc4_drm.h
index eeb37e394f13..af12e8a184c8 100644
--- a/include/uapi/drm/vc4_drm.h
+++ b/include/uapi/drm/vc4_drm.h
@@ -26,6 +26,10 @@
#include "drm.h"
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
#define DRM_VC4_SUBMIT_CL 0x00
#define DRM_VC4_WAIT_SEQNO 0x01
#define DRM_VC4_WAIT_BO 0x02
@@ -276,4 +280,8 @@ struct drm_vc4_get_hang_state {
__u32 pad[16];
};
+#if defined(__cplusplus)
+}
+#endif
+
#endif /* _UAPI_VC4_DRM_H_ */
diff --git a/include/uapi/drm/via_drm.h b/include/uapi/drm/via_drm.h
index fa21ed185520..a1e125d42208 100644
--- a/include/uapi/drm/via_drm.h
+++ b/include/uapi/drm/via_drm.h
@@ -26,6 +26,10 @@
#include "drm.h"
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
/* WARNING: These defines must be the same as what the Xserver uses.
* if you change them, you must change the defines in the Xserver.
*/
@@ -271,4 +275,8 @@ typedef struct drm_via_dmablit {
drm_via_blitsync_t sync;
} drm_via_dmablit_t;
+#if defined(__cplusplus)
+}
+#endif
+
#endif /* _VIA_DRM_H_ */
diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h
index c74f1f90cb37..91a31ffed828 100644
--- a/include/uapi/drm/virtgpu_drm.h
+++ b/include/uapi/drm/virtgpu_drm.h
@@ -26,6 +26,10 @@
#include "drm.h"
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
/* Please note that modifications to all structs defined here are
* subject to backwards-compatibility constraints.
*
@@ -163,4 +167,8 @@ struct drm_virtgpu_get_caps {
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GET_CAPS, \
struct drm_virtgpu_get_caps)
+#if defined(__cplusplus)
+}
+#endif
+
#endif
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
index 5b68b4d10884..d325a4107916 100644
--- a/include/uapi/drm/vmwgfx_drm.h
+++ b/include/uapi/drm/vmwgfx_drm.h
@@ -30,6 +30,10 @@
#include "drm.h"
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
#define DRM_VMW_MAX_SURFACE_FACES 6
#define DRM_VMW_MAX_MIP_LEVELS 24
@@ -1087,4 +1091,9 @@ union drm_vmw_extended_context_arg {
enum drm_vmw_extended_context req;
struct drm_vmw_context_arg rep;
};
+
+#if defined(__cplusplus)
+}
+#endif
+
#endif
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 813ffb2e22c9..8bdae34d1f9a 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -141,6 +141,7 @@ header-y += gfs2_ondisk.h
header-y += gigaset_dev.h
header-y += gpio.h
header-y += gsmmux.h
+header-y += gtp.h
header-y += hdlcdrv.h
header-y += hdlc.h
header-y += hdreg.h
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 23917bb47bf3..406459b935a2 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -92,6 +92,7 @@ enum bpf_prog_type {
BPF_PROG_TYPE_KPROBE,
BPF_PROG_TYPE_SCHED_CLS,
BPF_PROG_TYPE_SCHED_ACT,
+ BPF_PROG_TYPE_TRACEPOINT,
};
#define BPF_PSEUDO_MAP_FD 1
@@ -346,6 +347,10 @@ enum bpf_func_id {
#define BPF_F_ZERO_CSUM_TX (1ULL << 1)
#define BPF_F_DONT_FRAGMENT (1ULL << 2)
+/* BPF_FUNC_perf_event_output flags. */
+#define BPF_F_INDEX_MASK 0xffffffffULL
+#define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK
+
/* user accessible mirror of in-kernel sk_buff.
* new fields can only be added to the end of this structure
*/
@@ -365,6 +370,8 @@ struct __sk_buff {
__u32 cb[5];
__u32 hash;
__u32 tc_classid;
+ __u32 data;
+ __u32 data_end;
};
struct bpf_tunnel_key {
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index dea893199257..23c6960e94a4 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -23,6 +23,7 @@
#define BTRFS_IOCTL_MAGIC 0x94
#define BTRFS_VOL_NAME_MAX 255
+#define BTRFS_LABEL_SIZE 256
/* this should be 4k */
#define BTRFS_PATH_NAME_MAX 4087
@@ -33,14 +34,31 @@ struct btrfs_ioctl_vol_args {
#define BTRFS_DEVICE_PATH_NAME_MAX 1024
-#define BTRFS_SUBVOL_CREATE_ASYNC (1ULL << 0)
-#define BTRFS_SUBVOL_RDONLY (1ULL << 1)
-#define BTRFS_SUBVOL_QGROUP_INHERIT (1ULL << 2)
+#define BTRFS_DEVICE_SPEC_BY_ID (1ULL << 3)
+
+#define BTRFS_VOL_ARG_V2_FLAGS_SUPPORTED \
+ (BTRFS_SUBVOL_CREATE_ASYNC | \
+ BTRFS_SUBVOL_RDONLY | \
+ BTRFS_SUBVOL_QGROUP_INHERIT | \
+ BTRFS_DEVICE_SPEC_BY_ID)
+
#define BTRFS_FSID_SIZE 16
#define BTRFS_UUID_SIZE 16
#define BTRFS_UUID_UNPARSED_SIZE 37
-#define BTRFS_QGROUP_INHERIT_SET_LIMITS (1ULL << 0)
+/*
+ * flags definition for qgroup limits
+ *
+ * Used by:
+ * struct btrfs_qgroup_limit.flags
+ * struct btrfs_qgroup_limit_item.flags
+ */
+#define BTRFS_QGROUP_LIMIT_MAX_RFER (1ULL << 0)
+#define BTRFS_QGROUP_LIMIT_MAX_EXCL (1ULL << 1)
+#define BTRFS_QGROUP_LIMIT_RSV_RFER (1ULL << 2)
+#define BTRFS_QGROUP_LIMIT_RSV_EXCL (1ULL << 3)
+#define BTRFS_QGROUP_LIMIT_RFER_CMPR (1ULL << 4)
+#define BTRFS_QGROUP_LIMIT_EXCL_CMPR (1ULL << 5)
struct btrfs_qgroup_limit {
__u64 flags;
@@ -50,6 +68,14 @@ struct btrfs_qgroup_limit {
__u64 rsv_excl;
};
+/*
+ * flags definition for qgroup inheritance
+ *
+ * Used by:
+ * struct btrfs_qgroup_inherit.flags
+ */
+#define BTRFS_QGROUP_INHERIT_SET_LIMITS (1ULL << 0)
+
struct btrfs_qgroup_inherit {
__u64 flags;
__u64 num_qgroups;
@@ -64,6 +90,20 @@ struct btrfs_ioctl_qgroup_limit_args {
struct btrfs_qgroup_limit lim;
};
+/*
+ * flags for subvolumes
+ *
+ * Used by:
+ * struct btrfs_ioctl_vol_args_v2.flags
+ *
+ * BTRFS_SUBVOL_RDONLY is also provided/consumed by the following ioctls:
+ * - BTRFS_IOC_SUBVOL_GETFLAGS
+ * - BTRFS_IOC_SUBVOL_SETFLAGS
+ */
+#define BTRFS_SUBVOL_CREATE_ASYNC (1ULL << 0)
+#define BTRFS_SUBVOL_RDONLY (1ULL << 1)
+#define BTRFS_SUBVOL_QGROUP_INHERIT (1ULL << 2)
+
#define BTRFS_SUBVOL_NAME_MAX 4039
struct btrfs_ioctl_vol_args_v2 {
__s64 fd;
@@ -76,7 +116,10 @@ struct btrfs_ioctl_vol_args_v2 {
};
__u64 unused[4];
};
- char name[BTRFS_SUBVOL_NAME_MAX + 1];
+ union {
+ char name[BTRFS_SUBVOL_NAME_MAX + 1];
+ u64 devid;
+ };
};
/*
@@ -190,6 +233,37 @@ struct btrfs_ioctl_fs_info_args {
__u64 reserved[122]; /* pad to 1k */
};
+/*
+ * feature flags
+ *
+ * Used by:
+ * struct btrfs_ioctl_feature_flags
+ */
+#define BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE (1ULL << 0)
+
+#define BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF (1ULL << 0)
+#define BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL (1ULL << 1)
+#define BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS (1ULL << 2)
+#define BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO (1ULL << 3)
+/*
+ * some patches floated around with a second compression method
+ * lets save that incompat here for when they do get in
+ * Note we don't actually support it, we're just reserving the
+ * number
+ */
+#define BTRFS_FEATURE_INCOMPAT_COMPRESS_LZOv2 (1ULL << 4)
+
+/*
+ * older kernels tried to do bigger metadata blocks, but the
+ * code was pretty buggy. Lets not let them try anymore.
+ */
+#define BTRFS_FEATURE_INCOMPAT_BIG_METADATA (1ULL << 5)
+
+#define BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF (1ULL << 6)
+#define BTRFS_FEATURE_INCOMPAT_RAID56 (1ULL << 7)
+#define BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA (1ULL << 8)
+#define BTRFS_FEATURE_INCOMPAT_NO_HOLES (1ULL << 9)
+
struct btrfs_ioctl_feature_flags {
__u64 compat_flags;
__u64 compat_ro_flags;
@@ -254,6 +328,70 @@ struct btrfs_balance_progress {
__u64 completed; /* # of chunks relocated so far */
};
+/*
+ * flags definition for balance
+ *
+ * Restriper's general type filter
+ *
+ * Used by:
+ * btrfs_ioctl_balance_args.flags
+ * btrfs_balance_control.flags (internal)
+ */
+#define BTRFS_BALANCE_DATA (1ULL << 0)
+#define BTRFS_BALANCE_SYSTEM (1ULL << 1)
+#define BTRFS_BALANCE_METADATA (1ULL << 2)
+
+#define BTRFS_BALANCE_TYPE_MASK (BTRFS_BALANCE_DATA | \
+ BTRFS_BALANCE_SYSTEM | \
+ BTRFS_BALANCE_METADATA)
+
+#define BTRFS_BALANCE_FORCE (1ULL << 3)
+#define BTRFS_BALANCE_RESUME (1ULL << 4)
+
+/*
+ * flags definitions for per-type balance args
+ *
+ * Balance filters
+ *
+ * Used by:
+ * struct btrfs_balance_args
+ */
+#define BTRFS_BALANCE_ARGS_PROFILES (1ULL << 0)
+#define BTRFS_BALANCE_ARGS_USAGE (1ULL << 1)
+#define BTRFS_BALANCE_ARGS_DEVID (1ULL << 2)
+#define BTRFS_BALANCE_ARGS_DRANGE (1ULL << 3)
+#define BTRFS_BALANCE_ARGS_VRANGE (1ULL << 4)
+#define BTRFS_BALANCE_ARGS_LIMIT (1ULL << 5)
+#define BTRFS_BALANCE_ARGS_LIMIT_RANGE (1ULL << 6)
+#define BTRFS_BALANCE_ARGS_STRIPES_RANGE (1ULL << 7)
+#define BTRFS_BALANCE_ARGS_USAGE_RANGE (1ULL << 10)
+
+#define BTRFS_BALANCE_ARGS_MASK \
+ (BTRFS_BALANCE_ARGS_PROFILES | \
+ BTRFS_BALANCE_ARGS_USAGE | \
+ BTRFS_BALANCE_ARGS_DEVID | \
+ BTRFS_BALANCE_ARGS_DRANGE | \
+ BTRFS_BALANCE_ARGS_VRANGE | \
+ BTRFS_BALANCE_ARGS_LIMIT | \
+ BTRFS_BALANCE_ARGS_LIMIT_RANGE | \
+ BTRFS_BALANCE_ARGS_STRIPES_RANGE | \
+ BTRFS_BALANCE_ARGS_USAGE_RANGE)
+
+/*
+ * Profile changing flags. When SOFT is set we won't relocate chunk if
+ * it already has the target profile (even though it may be
+ * half-filled).
+ */
+#define BTRFS_BALANCE_ARGS_CONVERT (1ULL << 8)
+#define BTRFS_BALANCE_ARGS_SOFT (1ULL << 9)
+
+
+/*
+ * flags definition for balance state
+ *
+ * Used by:
+ * struct btrfs_ioctl_balance_args.state
+ */
#define BTRFS_BALANCE_STATE_RUNNING (1ULL << 0)
#define BTRFS_BALANCE_STATE_PAUSE_REQ (1ULL << 1)
#define BTRFS_BALANCE_STATE_CANCEL_REQ (1ULL << 2)
@@ -347,9 +485,45 @@ struct btrfs_ioctl_clone_range_args {
__u64 dest_offset;
};
-/* flags for the defrag range ioctl */
+/*
+ * flags definition for the defrag range ioctl
+ *
+ * Used by:
+ * struct btrfs_ioctl_defrag_range_args.flags
+ */
#define BTRFS_DEFRAG_RANGE_COMPRESS 1
#define BTRFS_DEFRAG_RANGE_START_IO 2
+struct btrfs_ioctl_defrag_range_args {
+ /* start of the defrag operation */
+ __u64 start;
+
+ /* number of bytes to defrag, use (u64)-1 to say all */
+ __u64 len;
+
+ /*
+ * flags for the operation, which can include turning
+ * on compression for this one defrag
+ */
+ __u64 flags;
+
+ /*
+ * any extent bigger than this will be considered
+ * already defragged. Use 0 to take the kernel default
+ * Use 1 to say every single extent must be rewritten
+ */
+ __u32 extent_thresh;
+
+ /*
+ * which compression method to use if turning on compression
+ * for this defrag operation. If unspecified, zlib will
+ * be used
+ */
+ __u32 compress_type;
+
+ /* spare for later */
+ __u32 unused[4];
+};
+
#define BTRFS_SAME_DATA_DIFFERS 1
/* For extent-same ioctl */
@@ -659,5 +833,7 @@ static inline char *btrfs_err_str(enum btrfs_err_code err_code)
struct btrfs_ioctl_feature_flags[2])
#define BTRFS_IOC_GET_SUPPORTED_FEATURES _IOR(BTRFS_IOCTL_MAGIC, 57, \
struct btrfs_ioctl_feature_flags[3])
+#define BTRFS_IOC_RM_DEV_V2 _IOW(BTRFS_IOCTL_MAGIC, 58, \
+ struct btrfs_ioctl_vol_args_v2)
#endif /* _UAPI_LINUX_BTRFS_H */
diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h
new file mode 100644
index 000000000000..d5ad15a106a7
--- /dev/null
+++ b/include/uapi/linux/btrfs_tree.h
@@ -0,0 +1,966 @@
+#ifndef _BTRFS_CTREE_H_
+#define _BTRFS_CTREE_H_
+
+/*
+ * This header contains the structure definitions and constants used
+ * by file system objects that can be retrieved using
+ * the BTRFS_IOC_SEARCH_TREE ioctl. That means basically anything that
+ * is needed to describe a leaf node's key or item contents.
+ */
+
+/* holds pointers to all of the tree roots */
+#define BTRFS_ROOT_TREE_OBJECTID 1ULL
+
+/* stores information about which extents are in use, and reference counts */
+#define BTRFS_EXTENT_TREE_OBJECTID 2ULL
+
+/*
+ * chunk tree stores translations from logical -> physical block numbering
+ * the super block points to the chunk tree
+ */
+#define BTRFS_CHUNK_TREE_OBJECTID 3ULL
+
+/*
+ * stores information about which areas of a given device are in use.
+ * one per device. The tree of tree roots points to the device tree
+ */
+#define BTRFS_DEV_TREE_OBJECTID 4ULL
+
+/* one per subvolume, storing files and directories */
+#define BTRFS_FS_TREE_OBJECTID 5ULL
+
+/* directory objectid inside the root tree */
+#define BTRFS_ROOT_TREE_DIR_OBJECTID 6ULL
+
+/* holds checksums of all the data extents */
+#define BTRFS_CSUM_TREE_OBJECTID 7ULL
+
+/* holds quota configuration and tracking */
+#define BTRFS_QUOTA_TREE_OBJECTID 8ULL
+
+/* for storing items that use the BTRFS_UUID_KEY* types */
+#define BTRFS_UUID_TREE_OBJECTID 9ULL
+
+/* tracks free space in block groups. */
+#define BTRFS_FREE_SPACE_TREE_OBJECTID 10ULL
+
+/* device stats in the device tree */
+#define BTRFS_DEV_STATS_OBJECTID 0ULL
+
+/* for storing balance parameters in the root tree */
+#define BTRFS_BALANCE_OBJECTID -4ULL
+
+/* orhpan objectid for tracking unlinked/truncated files */
+#define BTRFS_ORPHAN_OBJECTID -5ULL
+
+/* does write ahead logging to speed up fsyncs */
+#define BTRFS_TREE_LOG_OBJECTID -6ULL
+#define BTRFS_TREE_LOG_FIXUP_OBJECTID -7ULL
+
+/* for space balancing */
+#define BTRFS_TREE_RELOC_OBJECTID -8ULL
+#define BTRFS_DATA_RELOC_TREE_OBJECTID -9ULL
+
+/*
+ * extent checksums all have this objectid
+ * this allows them to share the logging tree
+ * for fsyncs
+ */
+#define BTRFS_EXTENT_CSUM_OBJECTID -10ULL
+
+/* For storing free space cache */
+#define BTRFS_FREE_SPACE_OBJECTID -11ULL
+
+/*
+ * The inode number assigned to the special inode for storing
+ * free ino cache
+ */
+#define BTRFS_FREE_INO_OBJECTID -12ULL
+
+/* dummy objectid represents multiple objectids */
+#define BTRFS_MULTIPLE_OBJECTIDS -255ULL
+
+/*
+ * All files have objectids in this range.
+ */
+#define BTRFS_FIRST_FREE_OBJECTID 256ULL
+#define BTRFS_LAST_FREE_OBJECTID -256ULL
+#define BTRFS_FIRST_CHUNK_TREE_OBJECTID 256ULL
+
+
+/*
+ * the device items go into the chunk tree. The key is in the form
+ * [ 1 BTRFS_DEV_ITEM_KEY device_id ]
+ */
+#define BTRFS_DEV_ITEMS_OBJECTID 1ULL
+
+#define BTRFS_BTREE_INODE_OBJECTID 1
+
+#define BTRFS_EMPTY_SUBVOL_DIR_OBJECTID 2
+
+#define BTRFS_DEV_REPLACE_DEVID 0ULL
+
+/*
+ * inode items have the data typically returned from stat and store other
+ * info about object characteristics. There is one for every file and dir in
+ * the FS
+ */
+#define BTRFS_INODE_ITEM_KEY 1
+#define BTRFS_INODE_REF_KEY 12
+#define BTRFS_INODE_EXTREF_KEY 13
+#define BTRFS_XATTR_ITEM_KEY 24
+#define BTRFS_ORPHAN_ITEM_KEY 48
+/* reserve 2-15 close to the inode for later flexibility */
+
+/*
+ * dir items are the name -> inode pointers in a directory. There is one
+ * for every name in a directory.
+ */
+#define BTRFS_DIR_LOG_ITEM_KEY 60
+#define BTRFS_DIR_LOG_INDEX_KEY 72
+#define BTRFS_DIR_ITEM_KEY 84
+#define BTRFS_DIR_INDEX_KEY 96
+/*
+ * extent data is for file data
+ */
+#define BTRFS_EXTENT_DATA_KEY 108
+
+/*
+ * extent csums are stored in a separate tree and hold csums for
+ * an entire extent on disk.
+ */
+#define BTRFS_EXTENT_CSUM_KEY 128
+
+/*
+ * root items point to tree roots. They are typically in the root
+ * tree used by the super block to find all the other trees
+ */
+#define BTRFS_ROOT_ITEM_KEY 132
+
+/*
+ * root backrefs tie subvols and snapshots to the directory entries that
+ * reference them
+ */
+#define BTRFS_ROOT_BACKREF_KEY 144
+
+/*
+ * root refs make a fast index for listing all of the snapshots and
+ * subvolumes referenced by a given root. They point directly to the
+ * directory item in the root that references the subvol
+ */
+#define BTRFS_ROOT_REF_KEY 156
+
+/*
+ * extent items are in the extent map tree. These record which blocks
+ * are used, and how many references there are to each block
+ */
+#define BTRFS_EXTENT_ITEM_KEY 168
+
+/*
+ * The same as the BTRFS_EXTENT_ITEM_KEY, except it's metadata we already know
+ * the length, so we save the level in key->offset instead of the length.
+ */
+#define BTRFS_METADATA_ITEM_KEY 169
+
+#define BTRFS_TREE_BLOCK_REF_KEY 176
+
+#define BTRFS_EXTENT_DATA_REF_KEY 178
+
+#define BTRFS_EXTENT_REF_V0_KEY 180
+
+#define BTRFS_SHARED_BLOCK_REF_KEY 182
+
+#define BTRFS_SHARED_DATA_REF_KEY 184
+
+/*
+ * block groups give us hints into the extent allocation trees. Which
+ * blocks are free etc etc
+ */
+#define BTRFS_BLOCK_GROUP_ITEM_KEY 192
+
+/*
+ * Every block group is represented in the free space tree by a free space info
+ * item, which stores some accounting information. It is keyed on
+ * (block_group_start, FREE_SPACE_INFO, block_group_length).
+ */
+#define BTRFS_FREE_SPACE_INFO_KEY 198
+
+/*
+ * A free space extent tracks an extent of space that is free in a block group.
+ * It is keyed on (start, FREE_SPACE_EXTENT, length).
+ */
+#define BTRFS_FREE_SPACE_EXTENT_KEY 199
+
+/*
+ * When a block group becomes very fragmented, we convert it to use bitmaps
+ * instead of extents. A free space bitmap is keyed on
+ * (start, FREE_SPACE_BITMAP, length); the corresponding item is a bitmap with
+ * (length / sectorsize) bits.
+ */
+#define BTRFS_FREE_SPACE_BITMAP_KEY 200
+
+#define BTRFS_DEV_EXTENT_KEY 204
+#define BTRFS_DEV_ITEM_KEY 216
+#define BTRFS_CHUNK_ITEM_KEY 228
+
+/*
+ * Records the overall state of the qgroups.
+ * There's only one instance of this key present,
+ * (0, BTRFS_QGROUP_STATUS_KEY, 0)
+ */
+#define BTRFS_QGROUP_STATUS_KEY 240
+/*
+ * Records the currently used space of the qgroup.
+ * One key per qgroup, (0, BTRFS_QGROUP_INFO_KEY, qgroupid).
+ */
+#define BTRFS_QGROUP_INFO_KEY 242
+/*
+ * Contains the user configured limits for the qgroup.
+ * One key per qgroup, (0, BTRFS_QGROUP_LIMIT_KEY, qgroupid).
+ */
+#define BTRFS_QGROUP_LIMIT_KEY 244
+/*
+ * Records the child-parent relationship of qgroups. For
+ * each relation, 2 keys are present:
+ * (childid, BTRFS_QGROUP_RELATION_KEY, parentid)
+ * (parentid, BTRFS_QGROUP_RELATION_KEY, childid)
+ */
+#define BTRFS_QGROUP_RELATION_KEY 246
+
+/*
+ * Obsolete name, see BTRFS_TEMPORARY_ITEM_KEY.
+ */
+#define BTRFS_BALANCE_ITEM_KEY 248
+
+/*
+ * The key type for tree items that are stored persistently, but do not need to
+ * exist for extended period of time. The items can exist in any tree.
+ *
+ * [subtype, BTRFS_TEMPORARY_ITEM_KEY, data]
+ *
+ * Existing items:
+ *
+ * - balance status item
+ * (BTRFS_BALANCE_OBJECTID, BTRFS_TEMPORARY_ITEM_KEY, 0)
+ */
+#define BTRFS_TEMPORARY_ITEM_KEY 248
+
+/*
+ * Obsolete name, see BTRFS_PERSISTENT_ITEM_KEY
+ */
+#define BTRFS_DEV_STATS_KEY 249
+
+/*
+ * The key type for tree items that are stored persistently and usually exist
+ * for a long period, eg. filesystem lifetime. The item kinds can be status
+ * information, stats or preference values. The item can exist in any tree.
+ *
+ * [subtype, BTRFS_PERSISTENT_ITEM_KEY, data]
+ *
+ * Existing items:
+ *
+ * - device statistics, store IO stats in the device tree, one key for all
+ * stats
+ * (BTRFS_DEV_STATS_OBJECTID, BTRFS_DEV_STATS_KEY, 0)
+ */
+#define BTRFS_PERSISTENT_ITEM_KEY 249
+
+/*
+ * Persistantly stores the device replace state in the device tree.
+ * The key is built like this: (0, BTRFS_DEV_REPLACE_KEY, 0).
+ */
+#define BTRFS_DEV_REPLACE_KEY 250
+
+/*
+ * Stores items that allow to quickly map UUIDs to something else.
+ * These items are part of the filesystem UUID tree.
+ * The key is built like this:
+ * (UUID_upper_64_bits, BTRFS_UUID_KEY*, UUID_lower_64_bits).
+ */
+#if BTRFS_UUID_SIZE != 16
+#error "UUID items require BTRFS_UUID_SIZE == 16!"
+#endif
+#define BTRFS_UUID_KEY_SUBVOL 251 /* for UUIDs assigned to subvols */
+#define BTRFS_UUID_KEY_RECEIVED_SUBVOL 252 /* for UUIDs assigned to
+ * received subvols */
+
+/*
+ * string items are for debugging. They just store a short string of
+ * data in the FS
+ */
+#define BTRFS_STRING_ITEM_KEY 253
+
+
+
+/* 32 bytes in various csum fields */
+#define BTRFS_CSUM_SIZE 32
+
+/* csum types */
+#define BTRFS_CSUM_TYPE_CRC32 0
+
+/*
+ * flags definitions for directory entry item type
+ *
+ * Used by:
+ * struct btrfs_dir_item.type
+ */
+#define BTRFS_FT_UNKNOWN 0
+#define BTRFS_FT_REG_FILE 1
+#define BTRFS_FT_DIR 2
+#define BTRFS_FT_CHRDEV 3
+#define BTRFS_FT_BLKDEV 4
+#define BTRFS_FT_FIFO 5
+#define BTRFS_FT_SOCK 6
+#define BTRFS_FT_SYMLINK 7
+#define BTRFS_FT_XATTR 8
+#define BTRFS_FT_MAX 9
+
+/*
+ * The key defines the order in the tree, and so it also defines (optimal)
+ * block layout.
+ *
+ * objectid corresponds to the inode number.
+ *
+ * type tells us things about the object, and is a kind of stream selector.
+ * so for a given inode, keys with type of 1 might refer to the inode data,
+ * type of 2 may point to file data in the btree and type == 3 may point to
+ * extents.
+ *
+ * offset is the starting byte offset for this key in the stream.
+ *
+ * btrfs_disk_key is in disk byte order. struct btrfs_key is always
+ * in cpu native order. Otherwise they are identical and their sizes
+ * should be the same (ie both packed)
+ */
+struct btrfs_disk_key {
+ __le64 objectid;
+ __u8 type;
+ __le64 offset;
+} __attribute__ ((__packed__));
+
+struct btrfs_key {
+ __u64 objectid;
+ __u8 type;
+ __u64 offset;
+} __attribute__ ((__packed__));
+
+struct btrfs_dev_item {
+ /* the internal btrfs device id */
+ __le64 devid;
+
+ /* size of the device */
+ __le64 total_bytes;
+
+ /* bytes used */
+ __le64 bytes_used;
+
+ /* optimal io alignment for this device */
+ __le32 io_align;
+
+ /* optimal io width for this device */
+ __le32 io_width;
+
+ /* minimal io size for this device */
+ __le32 sector_size;
+
+ /* type and info about this device */
+ __le64 type;
+
+ /* expected generation for this device */
+ __le64 generation;
+
+ /*
+ * starting byte of this partition on the device,
+ * to allow for stripe alignment in the future
+ */
+ __le64 start_offset;
+
+ /* grouping information for allocation decisions */
+ __le32 dev_group;
+
+ /* seek speed 0-100 where 100 is fastest */
+ __u8 seek_speed;
+
+ /* bandwidth 0-100 where 100 is fastest */
+ __u8 bandwidth;
+
+ /* btrfs generated uuid for this device */
+ __u8 uuid[BTRFS_UUID_SIZE];
+
+ /* uuid of FS who owns this device */
+ __u8 fsid[BTRFS_UUID_SIZE];
+} __attribute__ ((__packed__));
+
+struct btrfs_stripe {
+ __le64 devid;
+ __le64 offset;
+ __u8 dev_uuid[BTRFS_UUID_SIZE];
+} __attribute__ ((__packed__));
+
+struct btrfs_chunk {
+ /* size of this chunk in bytes */
+ __le64 length;
+
+ /* objectid of the root referencing this chunk */
+ __le64 owner;
+
+ __le64 stripe_len;
+ __le64 type;
+
+ /* optimal io alignment for this chunk */
+ __le32 io_align;
+
+ /* optimal io width for this chunk */
+ __le32 io_width;
+
+ /* minimal io size for this chunk */
+ __le32 sector_size;
+
+ /* 2^16 stripes is quite a lot, a second limit is the size of a single
+ * item in the btree
+ */
+ __le16 num_stripes;
+
+ /* sub stripes only matter for raid10 */
+ __le16 sub_stripes;
+ struct btrfs_stripe stripe;
+ /* additional stripes go here */
+} __attribute__ ((__packed__));
+
+#define BTRFS_FREE_SPACE_EXTENT 1
+#define BTRFS_FREE_SPACE_BITMAP 2
+
+struct btrfs_free_space_entry {
+ __le64 offset;
+ __le64 bytes;
+ __u8 type;
+} __attribute__ ((__packed__));
+
+struct btrfs_free_space_header {
+ struct btrfs_disk_key location;
+ __le64 generation;
+ __le64 num_entries;
+ __le64 num_bitmaps;
+} __attribute__ ((__packed__));
+
+#define BTRFS_HEADER_FLAG_WRITTEN (1ULL << 0)
+#define BTRFS_HEADER_FLAG_RELOC (1ULL << 1)
+
+/* Super block flags */
+/* Errors detected */
+#define BTRFS_SUPER_FLAG_ERROR (1ULL << 2)
+
+#define BTRFS_SUPER_FLAG_SEEDING (1ULL << 32)
+#define BTRFS_SUPER_FLAG_METADUMP (1ULL << 33)
+
+
+/*
+ * items in the extent btree are used to record the objectid of the
+ * owner of the block and the number of references
+ */
+
+struct btrfs_extent_item {
+ __le64 refs;
+ __le64 generation;
+ __le64 flags;
+} __attribute__ ((__packed__));
+
+struct btrfs_extent_item_v0 {
+ __le32 refs;
+} __attribute__ ((__packed__));
+
+
+#define BTRFS_EXTENT_FLAG_DATA (1ULL << 0)
+#define BTRFS_EXTENT_FLAG_TREE_BLOCK (1ULL << 1)
+
+/* following flags only apply to tree blocks */
+
+/* use full backrefs for extent pointers in the block */
+#define BTRFS_BLOCK_FLAG_FULL_BACKREF (1ULL << 8)
+
+/*
+ * this flag is only used internally by scrub and may be changed at any time
+ * it is only declared here to avoid collisions
+ */
+#define BTRFS_EXTENT_FLAG_SUPER (1ULL << 48)
+
+struct btrfs_tree_block_info {
+ struct btrfs_disk_key key;
+ __u8 level;
+} __attribute__ ((__packed__));
+
+struct btrfs_extent_data_ref {
+ __le64 root;
+ __le64 objectid;
+ __le64 offset;
+ __le32 count;
+} __attribute__ ((__packed__));
+
+struct btrfs_shared_data_ref {
+ __le32 count;
+} __attribute__ ((__packed__));
+
+struct btrfs_extent_inline_ref {
+ __u8 type;
+ __le64 offset;
+} __attribute__ ((__packed__));
+
+/* old style backrefs item */
+struct btrfs_extent_ref_v0 {
+ __le64 root;
+ __le64 generation;
+ __le64 objectid;
+ __le32 count;
+} __attribute__ ((__packed__));
+
+
+/* dev extents record free space on individual devices. The owner
+ * field points back to the chunk allocation mapping tree that allocated
+ * the extent. The chunk tree uuid field is a way to double check the owner
+ */
+struct btrfs_dev_extent {
+ __le64 chunk_tree;
+ __le64 chunk_objectid;
+ __le64 chunk_offset;
+ __le64 length;
+ __u8 chunk_tree_uuid[BTRFS_UUID_SIZE];
+} __attribute__ ((__packed__));
+
+struct btrfs_inode_ref {
+ __le64 index;
+ __le16 name_len;
+ /* name goes here */
+} __attribute__ ((__packed__));
+
+struct btrfs_inode_extref {
+ __le64 parent_objectid;
+ __le64 index;
+ __le16 name_len;
+ __u8 name[0];
+ /* name goes here */
+} __attribute__ ((__packed__));
+
+struct btrfs_timespec {
+ __le64 sec;
+ __le32 nsec;
+} __attribute__ ((__packed__));
+
+struct btrfs_inode_item {
+ /* nfs style generation number */
+ __le64 generation;
+ /* transid that last touched this inode */
+ __le64 transid;
+ __le64 size;
+ __le64 nbytes;
+ __le64 block_group;
+ __le32 nlink;
+ __le32 uid;
+ __le32 gid;
+ __le32 mode;
+ __le64 rdev;
+ __le64 flags;
+
+ /* modification sequence number for NFS */
+ __le64 sequence;
+
+ /*
+ * a little future expansion, for more than this we can
+ * just grow the inode item and version it
+ */
+ __le64 reserved[4];
+ struct btrfs_timespec atime;
+ struct btrfs_timespec ctime;
+ struct btrfs_timespec mtime;
+ struct btrfs_timespec otime;
+} __attribute__ ((__packed__));
+
+struct btrfs_dir_log_item {
+ __le64 end;
+} __attribute__ ((__packed__));
+
+struct btrfs_dir_item {
+ struct btrfs_disk_key location;
+ __le64 transid;
+ __le16 data_len;
+ __le16 name_len;
+ __u8 type;
+} __attribute__ ((__packed__));
+
+#define BTRFS_ROOT_SUBVOL_RDONLY (1ULL << 0)
+
+/*
+ * Internal in-memory flag that a subvolume has been marked for deletion but
+ * still visible as a directory
+ */
+#define BTRFS_ROOT_SUBVOL_DEAD (1ULL << 48)
+
+struct btrfs_root_item {
+ struct btrfs_inode_item inode;
+ __le64 generation;
+ __le64 root_dirid;
+ __le64 bytenr;
+ __le64 byte_limit;
+ __le64 bytes_used;
+ __le64 last_snapshot;
+ __le64 flags;
+ __le32 refs;
+ struct btrfs_disk_key drop_progress;
+ __u8 drop_level;
+ __u8 level;
+
+ /*
+ * The following fields appear after subvol_uuids+subvol_times
+ * were introduced.
+ */
+
+ /*
+ * This generation number is used to test if the new fields are valid
+ * and up to date while reading the root item. Every time the root item
+ * is written out, the "generation" field is copied into this field. If
+ * anyone ever mounted the fs with an older kernel, we will have
+ * mismatching generation values here and thus must invalidate the
+ * new fields. See btrfs_update_root and btrfs_find_last_root for
+ * details.
+ * the offset of generation_v2 is also used as the start for the memset
+ * when invalidating the fields.
+ */
+ __le64 generation_v2;
+ __u8 uuid[BTRFS_UUID_SIZE];
+ __u8 parent_uuid[BTRFS_UUID_SIZE];
+ __u8 received_uuid[BTRFS_UUID_SIZE];
+ __le64 ctransid; /* updated when an inode changes */
+ __le64 otransid; /* trans when created */
+ __le64 stransid; /* trans when sent. non-zero for received subvol */
+ __le64 rtransid; /* trans when received. non-zero for received subvol */
+ struct btrfs_timespec ctime;
+ struct btrfs_timespec otime;
+ struct btrfs_timespec stime;
+ struct btrfs_timespec rtime;
+ __le64 reserved[8]; /* for future */
+} __attribute__ ((__packed__));
+
+/*
+ * this is used for both forward and backward root refs
+ */
+struct btrfs_root_ref {
+ __le64 dirid;
+ __le64 sequence;
+ __le16 name_len;
+} __attribute__ ((__packed__));
+
+struct btrfs_disk_balance_args {
+ /*
+ * profiles to operate on, single is denoted by
+ * BTRFS_AVAIL_ALLOC_BIT_SINGLE
+ */
+ __le64 profiles;
+
+ /*
+ * usage filter
+ * BTRFS_BALANCE_ARGS_USAGE with a single value means '0..N'
+ * BTRFS_BALANCE_ARGS_USAGE_RANGE - range syntax, min..max
+ */
+ union {
+ __le64 usage;
+ struct {
+ __le32 usage_min;
+ __le32 usage_max;
+ };
+ };
+
+ /* devid filter */
+ __le64 devid;
+
+ /* devid subset filter [pstart..pend) */
+ __le64 pstart;
+ __le64 pend;
+
+ /* btrfs virtual address space subset filter [vstart..vend) */
+ __le64 vstart;
+ __le64 vend;
+
+ /*
+ * profile to convert to, single is denoted by
+ * BTRFS_AVAIL_ALLOC_BIT_SINGLE
+ */
+ __le64 target;
+
+ /* BTRFS_BALANCE_ARGS_* */
+ __le64 flags;
+
+ /*
+ * BTRFS_BALANCE_ARGS_LIMIT with value 'limit'
+ * BTRFS_BALANCE_ARGS_LIMIT_RANGE - the extend version can use minimum
+ * and maximum
+ */
+ union {
+ __le64 limit;
+ struct {
+ __le32 limit_min;
+ __le32 limit_max;
+ };
+ };
+
+ /*
+ * Process chunks that cross stripes_min..stripes_max devices,
+ * BTRFS_BALANCE_ARGS_STRIPES_RANGE
+ */
+ __le32 stripes_min;
+ __le32 stripes_max;
+
+ __le64 unused[6];
+} __attribute__ ((__packed__));
+
+/*
+ * store balance parameters to disk so that balance can be properly
+ * resumed after crash or unmount
+ */
+struct btrfs_balance_item {
+ /* BTRFS_BALANCE_* */
+ __le64 flags;
+
+ struct btrfs_disk_balance_args data;
+ struct btrfs_disk_balance_args meta;
+ struct btrfs_disk_balance_args sys;
+
+ __le64 unused[4];
+} __attribute__ ((__packed__));
+
+#define BTRFS_FILE_EXTENT_INLINE 0
+#define BTRFS_FILE_EXTENT_REG 1
+#define BTRFS_FILE_EXTENT_PREALLOC 2
+
+struct btrfs_file_extent_item {
+ /*
+ * transaction id that created this extent
+ */
+ __le64 generation;
+ /*
+ * max number of bytes to hold this extent in ram
+ * when we split a compressed extent we can't know how big
+ * each of the resulting pieces will be. So, this is
+ * an upper limit on the size of the extent in ram instead of
+ * an exact limit.
+ */
+ __le64 ram_bytes;
+
+ /*
+ * 32 bits for the various ways we might encode the data,
+ * including compression and encryption. If any of these
+ * are set to something a given disk format doesn't understand
+ * it is treated like an incompat flag for reading and writing,
+ * but not for stat.
+ */
+ __u8 compression;
+ __u8 encryption;
+ __le16 other_encoding; /* spare for later use */
+
+ /* are we inline data or a real extent? */
+ __u8 type;
+
+ /*
+ * disk space consumed by the extent, checksum blocks are included
+ * in these numbers
+ *
+ * At this offset in the structure, the inline extent data start.
+ */
+ __le64 disk_bytenr;
+ __le64 disk_num_bytes;
+ /*
+ * the logical offset in file blocks (no csums)
+ * this extent record is for. This allows a file extent to point
+ * into the middle of an existing extent on disk, sharing it
+ * between two snapshots (useful if some bytes in the middle of the
+ * extent have changed
+ */
+ __le64 offset;
+ /*
+ * the logical number of file blocks (no csums included). This
+ * always reflects the size uncompressed and without encoding.
+ */
+ __le64 num_bytes;
+
+} __attribute__ ((__packed__));
+
+struct btrfs_csum_item {
+ __u8 csum;
+} __attribute__ ((__packed__));
+
+struct btrfs_dev_stats_item {
+ /*
+ * grow this item struct at the end for future enhancements and keep
+ * the existing values unchanged
+ */
+ __le64 values[BTRFS_DEV_STAT_VALUES_MAX];
+} __attribute__ ((__packed__));
+
+#define BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_ALWAYS 0
+#define BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID 1
+#define BTRFS_DEV_REPLACE_ITEM_STATE_NEVER_STARTED 0
+#define BTRFS_DEV_REPLACE_ITEM_STATE_STARTED 1
+#define BTRFS_DEV_REPLACE_ITEM_STATE_SUSPENDED 2
+#define BTRFS_DEV_REPLACE_ITEM_STATE_FINISHED 3
+#define BTRFS_DEV_REPLACE_ITEM_STATE_CANCELED 4
+
+struct btrfs_dev_replace_item {
+ /*
+ * grow this item struct at the end for future enhancements and keep
+ * the existing values unchanged
+ */
+ __le64 src_devid;
+ __le64 cursor_left;
+ __le64 cursor_right;
+ __le64 cont_reading_from_srcdev_mode;
+
+ __le64 replace_state;
+ __le64 time_started;
+ __le64 time_stopped;
+ __le64 num_write_errors;
+ __le64 num_uncorrectable_read_errors;
+} __attribute__ ((__packed__));
+
+/* different types of block groups (and chunks) */
+#define BTRFS_BLOCK_GROUP_DATA (1ULL << 0)
+#define BTRFS_BLOCK_GROUP_SYSTEM (1ULL << 1)
+#define BTRFS_BLOCK_GROUP_METADATA (1ULL << 2)
+#define BTRFS_BLOCK_GROUP_RAID0 (1ULL << 3)
+#define BTRFS_BLOCK_GROUP_RAID1 (1ULL << 4)
+#define BTRFS_BLOCK_GROUP_DUP (1ULL << 5)
+#define BTRFS_BLOCK_GROUP_RAID10 (1ULL << 6)
+#define BTRFS_BLOCK_GROUP_RAID5 (1ULL << 7)
+#define BTRFS_BLOCK_GROUP_RAID6 (1ULL << 8)
+#define BTRFS_BLOCK_GROUP_RESERVED (BTRFS_AVAIL_ALLOC_BIT_SINGLE | \
+ BTRFS_SPACE_INFO_GLOBAL_RSV)
+
+enum btrfs_raid_types {
+ BTRFS_RAID_RAID10,
+ BTRFS_RAID_RAID1,
+ BTRFS_RAID_DUP,
+ BTRFS_RAID_RAID0,
+ BTRFS_RAID_SINGLE,
+ BTRFS_RAID_RAID5,
+ BTRFS_RAID_RAID6,
+ BTRFS_NR_RAID_TYPES
+};
+
+#define BTRFS_BLOCK_GROUP_TYPE_MASK (BTRFS_BLOCK_GROUP_DATA | \
+ BTRFS_BLOCK_GROUP_SYSTEM | \
+ BTRFS_BLOCK_GROUP_METADATA)
+
+#define BTRFS_BLOCK_GROUP_PROFILE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \
+ BTRFS_BLOCK_GROUP_RAID1 | \
+ BTRFS_BLOCK_GROUP_RAID5 | \
+ BTRFS_BLOCK_GROUP_RAID6 | \
+ BTRFS_BLOCK_GROUP_DUP | \
+ BTRFS_BLOCK_GROUP_RAID10)
+#define BTRFS_BLOCK_GROUP_RAID56_MASK (BTRFS_BLOCK_GROUP_RAID5 | \
+ BTRFS_BLOCK_GROUP_RAID6)
+
+/*
+ * We need a bit for restriper to be able to tell when chunks of type
+ * SINGLE are available. This "extended" profile format is used in
+ * fs_info->avail_*_alloc_bits (in-memory) and balance item fields
+ * (on-disk). The corresponding on-disk bit in chunk.type is reserved
+ * to avoid remappings between two formats in future.
+ */
+#define BTRFS_AVAIL_ALLOC_BIT_SINGLE (1ULL << 48)
+
+/*
+ * A fake block group type that is used to communicate global block reserve
+ * size to userspace via the SPACE_INFO ioctl.
+ */
+#define BTRFS_SPACE_INFO_GLOBAL_RSV (1ULL << 49)
+
+#define BTRFS_EXTENDED_PROFILE_MASK (BTRFS_BLOCK_GROUP_PROFILE_MASK | \
+ BTRFS_AVAIL_ALLOC_BIT_SINGLE)
+
+static inline __u64 chunk_to_extended(__u64 flags)
+{
+ if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0)
+ flags |= BTRFS_AVAIL_ALLOC_BIT_SINGLE;
+
+ return flags;
+}
+static inline __u64 extended_to_chunk(__u64 flags)
+{
+ return flags & ~BTRFS_AVAIL_ALLOC_BIT_SINGLE;
+}
+
+struct btrfs_block_group_item {
+ __le64 used;
+ __le64 chunk_objectid;
+ __le64 flags;
+} __attribute__ ((__packed__));
+
+struct btrfs_free_space_info {
+ __le32 extent_count;
+ __le32 flags;
+} __attribute__ ((__packed__));
+
+#define BTRFS_FREE_SPACE_USING_BITMAPS (1ULL << 0)
+
+#define BTRFS_QGROUP_LEVEL_SHIFT 48
+static inline __u64 btrfs_qgroup_level(__u64 qgroupid)
+{
+ return qgroupid >> BTRFS_QGROUP_LEVEL_SHIFT;
+}
+
+/*
+ * is subvolume quota turned on?
+ */
+#define BTRFS_QGROUP_STATUS_FLAG_ON (1ULL << 0)
+/*
+ * RESCAN is set during the initialization phase
+ */
+#define BTRFS_QGROUP_STATUS_FLAG_RESCAN (1ULL << 1)
+/*
+ * Some qgroup entries are known to be out of date,
+ * either because the configuration has changed in a way that
+ * makes a rescan necessary, or because the fs has been mounted
+ * with a non-qgroup-aware version.
+ * Turning qouta off and on again makes it inconsistent, too.
+ */
+#define BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT (1ULL << 2)
+
+#define BTRFS_QGROUP_STATUS_VERSION 1
+
+struct btrfs_qgroup_status_item {
+ __le64 version;
+ /*
+ * the generation is updated during every commit. As older
+ * versions of btrfs are not aware of qgroups, it will be
+ * possible to detect inconsistencies by checking the
+ * generation on mount time
+ */
+ __le64 generation;
+
+ /* flag definitions see above */
+ __le64 flags;
+
+ /*
+ * only used during scanning to record the progress
+ * of the scan. It contains a logical address
+ */
+ __le64 rescan;
+} __attribute__ ((__packed__));
+
+struct btrfs_qgroup_info_item {
+ __le64 generation;
+ __le64 rfer;
+ __le64 rfer_cmpr;
+ __le64 excl;
+ __le64 excl_cmpr;
+} __attribute__ ((__packed__));
+
+struct btrfs_qgroup_limit_item {
+ /*
+ * only updated when any of the other values change
+ */
+ __le64 flags;
+ __le64 max_rfer;
+ __le64 max_excl;
+ __le64 rsv_rfer;
+ __le64 rsv_excl;
+} __attribute__ ((__packed__));
+
+#endif /* _BTRFS_CTREE_H_ */
diff --git a/include/uapi/linux/coresight-stm.h b/include/uapi/linux/coresight-stm.h
new file mode 100644
index 000000000000..7e4272cf1fb2
--- /dev/null
+++ b/include/uapi/linux/coresight-stm.h
@@ -0,0 +1,21 @@
+#ifndef __UAPI_CORESIGHT_STM_H_
+#define __UAPI_CORESIGHT_STM_H_
+
+#define STM_FLAG_TIMESTAMPED BIT(3)
+#define STM_FLAG_GUARANTEED BIT(7)
+
+/*
+ * The CoreSight STM supports guaranteed and invariant timing
+ * transactions. Guaranteed transactions are guaranteed to be
+ * traced, this might involve stalling the bus or system to
+ * ensure the transaction is accepted by the STM. While invariant
+ * timing transactions are not guaranteed to be traced, they
+ * will take an invariant amount of time regardless of the
+ * state of the STM.
+ */
+enum {
+ STM_OPTION_GUARANTEED = 0,
+ STM_OPTION_INVARIANT,
+};
+
+#endif
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index c9fee5781eb1..ba0073b26fa6 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -33,6 +33,30 @@ enum devlink_command {
DEVLINK_CMD_PORT_SPLIT,
DEVLINK_CMD_PORT_UNSPLIT,
+ DEVLINK_CMD_SB_GET, /* can dump */
+ DEVLINK_CMD_SB_SET,
+ DEVLINK_CMD_SB_NEW,
+ DEVLINK_CMD_SB_DEL,
+
+ DEVLINK_CMD_SB_POOL_GET, /* can dump */
+ DEVLINK_CMD_SB_POOL_SET,
+ DEVLINK_CMD_SB_POOL_NEW,
+ DEVLINK_CMD_SB_POOL_DEL,
+
+ DEVLINK_CMD_SB_PORT_POOL_GET, /* can dump */
+ DEVLINK_CMD_SB_PORT_POOL_SET,
+ DEVLINK_CMD_SB_PORT_POOL_NEW,
+ DEVLINK_CMD_SB_PORT_POOL_DEL,
+
+ DEVLINK_CMD_SB_TC_POOL_BIND_GET, /* can dump */
+ DEVLINK_CMD_SB_TC_POOL_BIND_SET,
+ DEVLINK_CMD_SB_TC_POOL_BIND_NEW,
+ DEVLINK_CMD_SB_TC_POOL_BIND_DEL,
+
+ /* Shared buffer occupancy monitoring commands */
+ DEVLINK_CMD_SB_OCC_SNAPSHOT,
+ DEVLINK_CMD_SB_OCC_MAX_CLEAR,
+
/* add new commands above here */
__DEVLINK_CMD_MAX,
@@ -46,6 +70,31 @@ enum devlink_port_type {
DEVLINK_PORT_TYPE_IB,
};
+enum devlink_sb_pool_type {
+ DEVLINK_SB_POOL_TYPE_INGRESS,
+ DEVLINK_SB_POOL_TYPE_EGRESS,
+};
+
+/* static threshold - limiting the maximum number of bytes.
+ * dynamic threshold - limiting the maximum number of bytes
+ * based on the currently available free space in the shared buffer pool.
+ * In this mode, the maximum quota is calculated based
+ * on the following formula:
+ * max_quota = alpha / (1 + alpha) * Free_Buffer
+ * While Free_Buffer is the amount of none-occupied buffer associated to
+ * the relevant pool.
+ * The value range which can be passed is 0-20 and serves
+ * for computation of alpha by following formula:
+ * alpha = 2 ^ (passed_value - 10)
+ */
+
+enum devlink_sb_threshold_type {
+ DEVLINK_SB_THRESHOLD_TYPE_STATIC,
+ DEVLINK_SB_THRESHOLD_TYPE_DYNAMIC,
+};
+
+#define DEVLINK_SB_THRESHOLD_TO_ALPHA_MAX 20
+
enum devlink_attr {
/* don't change the order or add anything between, this is ABI! */
DEVLINK_ATTR_UNSPEC,
@@ -62,6 +111,20 @@ enum devlink_attr {
DEVLINK_ATTR_PORT_IBDEV_NAME, /* string */
DEVLINK_ATTR_PORT_SPLIT_COUNT, /* u32 */
DEVLINK_ATTR_PORT_SPLIT_GROUP, /* u32 */
+ DEVLINK_ATTR_SB_INDEX, /* u32 */
+ DEVLINK_ATTR_SB_SIZE, /* u32 */
+ DEVLINK_ATTR_SB_INGRESS_POOL_COUNT, /* u16 */
+ DEVLINK_ATTR_SB_EGRESS_POOL_COUNT, /* u16 */
+ DEVLINK_ATTR_SB_INGRESS_TC_COUNT, /* u16 */
+ DEVLINK_ATTR_SB_EGRESS_TC_COUNT, /* u16 */
+ DEVLINK_ATTR_SB_POOL_INDEX, /* u16 */
+ DEVLINK_ATTR_SB_POOL_TYPE, /* u8 */
+ DEVLINK_ATTR_SB_POOL_SIZE, /* u32 */
+ DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE, /* u8 */
+ DEVLINK_ATTR_SB_THRESHOLD, /* u32 */
+ DEVLINK_ATTR_SB_TC_INDEX, /* u16 */
+ DEVLINK_ATTR_SB_OCC_CUR, /* u32 */
+ DEVLINK_ATTR_SB_OCC_MAX, /* u32 */
/* add new attributes above here, update the policy in devlink.c */
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index 71e1d0ed92f7..cb4a72f888d5 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -282,16 +282,18 @@ typedef struct elf64_phdr {
#define SHT_HIUSER 0xffffffff
/* sh_flags */
-#define SHF_WRITE 0x1
-#define SHF_ALLOC 0x2
-#define SHF_EXECINSTR 0x4
-#define SHF_MASKPROC 0xf0000000
+#define SHF_WRITE 0x1
+#define SHF_ALLOC 0x2
+#define SHF_EXECINSTR 0x4
+#define SHF_RELA_LIVEPATCH 0x00100000
+#define SHF_MASKPROC 0xf0000000
/* special section indexes */
#define SHN_UNDEF 0
#define SHN_LORESERVE 0xff00
#define SHN_LOPROC 0xff00
#define SHN_HIPROC 0xff1f
+#define SHN_LIVEPATCH 0xff20
#define SHN_ABS 0xfff1
#define SHN_COMMON 0xfff2
#define SHN_HIRESERVE 0xffff
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 9222db8ccccc..5f030b46cff4 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -1353,6 +1353,15 @@ enum ethtool_link_mode_bit_indices {
ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT = 28,
ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT = 29,
ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT = 30,
+ ETHTOOL_LINK_MODE_25000baseCR_Full_BIT = 31,
+ ETHTOOL_LINK_MODE_25000baseKR_Full_BIT = 32,
+ ETHTOOL_LINK_MODE_25000baseSR_Full_BIT = 33,
+ ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT = 34,
+ ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT = 35,
+ ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT = 36,
+ ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT = 37,
+ ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT = 38,
+ ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT = 39,
/* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit
* 31. Please do NOT define any SUPPORTED_* or ADVERTISED_*
@@ -1361,7 +1370,7 @@ enum ethtool_link_mode_bit_indices {
*/
__ETHTOOL_LINK_MODE_LAST
- = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
+ = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
};
#define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \
diff --git a/include/uapi/linux/fib_rules.h b/include/uapi/linux/fib_rules.h
index 96161b8202b5..620c8a5ddc00 100644
--- a/include/uapi/linux/fib_rules.h
+++ b/include/uapi/linux/fib_rules.h
@@ -49,6 +49,7 @@ enum {
FRA_TABLE, /* Extended table id */
FRA_FWMASK, /* mask for netfilter mark */
FRA_OIFNAME,
+ FRA_PAD,
__FRA_MAX
};
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index a079d50376e1..3b00f7c8943f 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -222,7 +222,6 @@ struct fsxattr {
#define BLKSECDISCARD _IO(0x12,125)
#define BLKROTATIONAL _IO(0x12,126)
#define BLKZEROOUT _IO(0x12,127)
-#define BLKDAXGET _IO(0x12,129)
#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */
#define FIBMAP _IO(0x00,1) /* bmap access */
@@ -324,5 +323,7 @@ struct fscrypt_policy {
/* flags for preadv2/pwritev2: */
#define RWF_HIPRI 0x00000001 /* high priority request, poll if possible */
+#define RWF_DSYNC 0x00000002 /* per-IO O_DSYNC */
+#define RWF_SYNC 0x00000004 /* per-IO O_SYNC */
#endif /* _UAPI_LINUX_FS_H */
diff --git a/include/uapi/linux/gen_stats.h b/include/uapi/linux/gen_stats.h
index 6487317ea619..52deccc2128e 100644
--- a/include/uapi/linux/gen_stats.h
+++ b/include/uapi/linux/gen_stats.h
@@ -10,6 +10,7 @@ enum {
TCA_STATS_QUEUE,
TCA_STATS_APP,
TCA_STATS_RATE_EST64,
+ TCA_STATS_PAD,
__TCA_STATS_MAX,
};
#define TCA_STATS_MAX (__TCA_STATS_MAX - 1)
diff --git a/include/uapi/linux/gtp.h b/include/uapi/linux/gtp.h
new file mode 100644
index 000000000000..ca1054dd8249
--- /dev/null
+++ b/include/uapi/linux/gtp.h
@@ -0,0 +1,33 @@
+#ifndef _UAPI_LINUX_GTP_H_
+#define _UAPI_LINUX_GTP_H__
+
+enum gtp_genl_cmds {
+ GTP_CMD_NEWPDP,
+ GTP_CMD_DELPDP,
+ GTP_CMD_GETPDP,
+
+ GTP_CMD_MAX,
+};
+
+enum gtp_version {
+ GTP_V0 = 0,
+ GTP_V1,
+};
+
+enum gtp_attrs {
+ GTPA_UNSPEC = 0,
+ GTPA_LINK,
+ GTPA_VERSION,
+ GTPA_TID, /* for GTPv0 only */
+ GTPA_SGSN_ADDRESS,
+ GTPA_MS_ADDRESS,
+ GTPA_FLOW,
+ GTPA_NET_NS_FD,
+ GTPA_I_TEI, /* for GTPv1 only */
+ GTPA_O_TEI, /* for GTPv1 only */
+ GTPA_PAD,
+ __GTPA_MAX,
+};
+#define GTPA_MAX (__GTPA_MAX + 1)
+
+#endif /* _UAPI_LINUX_GTP_H_ */
diff --git a/include/uapi/linux/i2c.h b/include/uapi/linux/i2c.h
index b0a7dd61eb35..adcbef4bff61 100644
--- a/include/uapi/linux/i2c.h
+++ b/include/uapi/linux/i2c.h
@@ -68,14 +68,15 @@
struct i2c_msg {
__u16 addr; /* slave address */
__u16 flags;
-#define I2C_M_TEN 0x0010 /* this is a ten bit chip address */
#define I2C_M_RD 0x0001 /* read data, from slave to master */
-#define I2C_M_STOP 0x8000 /* if I2C_FUNC_PROTOCOL_MANGLING */
-#define I2C_M_NOSTART 0x4000 /* if I2C_FUNC_NOSTART */
-#define I2C_M_REV_DIR_ADDR 0x2000 /* if I2C_FUNC_PROTOCOL_MANGLING */
-#define I2C_M_IGNORE_NAK 0x1000 /* if I2C_FUNC_PROTOCOL_MANGLING */
-#define I2C_M_NO_RD_ACK 0x0800 /* if I2C_FUNC_PROTOCOL_MANGLING */
+ /* I2C_M_RD is guaranteed to be 0x0001! */
+#define I2C_M_TEN 0x0010 /* this is a ten bit chip address */
#define I2C_M_RECV_LEN 0x0400 /* length will be first received byte */
+#define I2C_M_NO_RD_ACK 0x0800 /* if I2C_FUNC_PROTOCOL_MANGLING */
+#define I2C_M_IGNORE_NAK 0x1000 /* if I2C_FUNC_PROTOCOL_MANGLING */
+#define I2C_M_REV_DIR_ADDR 0x2000 /* if I2C_FUNC_PROTOCOL_MANGLING */
+#define I2C_M_NOSTART 0x4000 /* if I2C_FUNC_NOSTART */
+#define I2C_M_STOP 0x8000 /* if I2C_FUNC_PROTOCOL_MANGLING */
__u16 len; /* msg length */
__u8 *buf; /* pointer to msg data */
};
diff --git a/include/uapi/linux/if.h b/include/uapi/linux/if.h
index f80277569f24..e601c8c3bdc7 100644
--- a/include/uapi/linux/if.h
+++ b/include/uapi/linux/if.h
@@ -19,14 +19,20 @@
#ifndef _LINUX_IF_H
#define _LINUX_IF_H
+#include <linux/libc-compat.h> /* for compatibility with glibc */
#include <linux/types.h> /* for "__kernel_caddr_t" et al */
#include <linux/socket.h> /* for "struct sockaddr" et al */
#include <linux/compiler.h> /* for "__user" et al */
+#if __UAPI_DEF_IF_IFNAMSIZ
#define IFNAMSIZ 16
+#endif /* __UAPI_DEF_IF_IFNAMSIZ */
#define IFALIASZ 256
#include <linux/hdlc/ioctl.h>
+/* For glibc compatibility. An empty enum does not compile. */
+#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO != 0 && \
+ __UAPI_DEF_IF_NET_DEVICE_FLAGS != 0
/**
* enum net_device_flags - &struct net_device flags
*
@@ -68,6 +74,8 @@
* @IFF_ECHO: echo sent packets. Volatile.
*/
enum net_device_flags {
+/* for compatibility with glibc net/if.h */
+#if __UAPI_DEF_IF_NET_DEVICE_FLAGS
IFF_UP = 1<<0, /* sysfs */
IFF_BROADCAST = 1<<1, /* volatile */
IFF_DEBUG = 1<<2, /* sysfs */
@@ -84,11 +92,17 @@ enum net_device_flags {
IFF_PORTSEL = 1<<13, /* sysfs */
IFF_AUTOMEDIA = 1<<14, /* sysfs */
IFF_DYNAMIC = 1<<15, /* sysfs */
+#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS */
+#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
IFF_LOWER_UP = 1<<16, /* volatile */
IFF_DORMANT = 1<<17, /* volatile */
IFF_ECHO = 1<<18, /* volatile */
+#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
};
+#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO != 0 && __UAPI_DEF_IF_NET_DEVICE_FLAGS != 0 */
+/* for compatibility with glibc net/if.h */
+#if __UAPI_DEF_IF_NET_DEVICE_FLAGS
#define IFF_UP IFF_UP
#define IFF_BROADCAST IFF_BROADCAST
#define IFF_DEBUG IFF_DEBUG
@@ -105,9 +119,13 @@ enum net_device_flags {
#define IFF_PORTSEL IFF_PORTSEL
#define IFF_AUTOMEDIA IFF_AUTOMEDIA
#define IFF_DYNAMIC IFF_DYNAMIC
+#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS */
+
+#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
#define IFF_LOWER_UP IFF_LOWER_UP
#define IFF_DORMANT IFF_DORMANT
#define IFF_ECHO IFF_ECHO
+#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
#define IFF_VOLATILE (IFF_LOOPBACK|IFF_POINTOPOINT|IFF_BROADCAST|IFF_ECHO|\
IFF_MASTER|IFF_SLAVE|IFF_RUNNING|IFF_LOWER_UP|IFF_DORMANT)
@@ -166,6 +184,8 @@ enum {
* being very small might be worth keeping for clean configuration.
*/
+/* for compatibility with glibc net/if.h */
+#if __UAPI_DEF_IF_IFMAP
struct ifmap {
unsigned long mem_start;
unsigned long mem_end;
@@ -175,6 +195,7 @@ struct ifmap {
unsigned char port;
/* 3 bytes spare */
};
+#endif /* __UAPI_DEF_IF_IFMAP */
struct if_settings {
unsigned int type; /* Type of physical device or protocol */
@@ -200,6 +221,8 @@ struct if_settings {
* remainder may be interface specific.
*/
+/* for compatibility with glibc net/if.h */
+#if __UAPI_DEF_IF_IFREQ
struct ifreq {
#define IFHWADDRLEN 6
union
@@ -223,6 +246,7 @@ struct ifreq {
struct if_settings ifru_settings;
} ifr_ifru;
};
+#endif /* __UAPI_DEF_IF_IFREQ */
#define ifr_name ifr_ifrn.ifrn_name /* interface name */
#define ifr_hwaddr ifr_ifru.ifru_hwaddr /* MAC address */
@@ -249,6 +273,8 @@ struct ifreq {
* must know all networks accessible).
*/
+/* for compatibility with glibc net/if.h */
+#if __UAPI_DEF_IF_IFCONF
struct ifconf {
int ifc_len; /* size of buffer */
union {
@@ -256,6 +282,8 @@ struct ifconf {
struct ifreq __user *ifcu_req;
} ifc_ifcu;
};
+#endif /* __UAPI_DEF_IF_IFCONF */
+
#define ifc_buf ifc_ifcu.ifcu_buf /* buffer address */
#define ifc_req ifc_ifcu.ifcu_req /* array of structures */
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index 0536eefff9bf..397d503fdedb 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -134,6 +134,16 @@ struct bridge_vlan_info {
__u16 vid;
};
+struct bridge_vlan_xstats {
+ __u64 rx_bytes;
+ __u64 rx_packets;
+ __u64 tx_bytes;
+ __u64 tx_packets;
+ __u16 vid;
+ __u16 pad1;
+ __u32 pad2;
+};
+
/* Bridge multicast database attributes
* [MDBA_MDB] = {
* [MDBA_MDB_ENTRY] = {
@@ -233,4 +243,12 @@ enum {
};
#define MDBA_SET_ENTRY_MAX (__MDBA_SET_ENTRY_MAX - 1)
+/* Embedded inside LINK_XSTATS_TYPE_BRIDGE */
+enum {
+ BRIDGE_XSTATS_UNSPEC,
+ BRIDGE_XSTATS_VLAN,
+ __BRIDGE_XSTATS_MAX
+};
+#define BRIDGE_XSTATS_MAX (__BRIDGE_XSTATS_MAX - 1)
+
#endif /* _UAPI_LINUX_IF_BRIDGE_H */
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index 4a93051c578c..cec849a239f6 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -92,6 +92,7 @@
#define ETH_P_TDLS 0x890D /* TDLS */
#define ETH_P_FIP 0x8914 /* FCoE Initialization Protocol */
#define ETH_P_80221 0x8917 /* IEEE 802.21 Media Independent Handover Protocol */
+#define ETH_P_HSR 0x892F /* IEC 62439-3 HSRv1 */
#define ETH_P_LOOPBACK 0x9000 /* Ethernet loopback packet, per IEEE 802.3 */
#define ETH_P_QINQ1 0x9100 /* deprecated QinQ VLAN [ NOT AN OFFICIALLY REGISTERED ID ] */
#define ETH_P_QINQ2 0x9200 /* deprecated QinQ VLAN [ NOT AN OFFICIALLY REGISTERED ID ] */
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index c488066fb53a..bb36bd5675a7 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -155,6 +155,7 @@ enum {
IFLA_PROTO_DOWN,
IFLA_GSO_MAX_SEGS,
IFLA_GSO_MAX_SIZE,
+ IFLA_PAD,
__IFLA_MAX
};
@@ -270,6 +271,8 @@ enum {
IFLA_BR_NF_CALL_IP6TABLES,
IFLA_BR_NF_CALL_ARPTABLES,
IFLA_BR_VLAN_DEFAULT_PVID,
+ IFLA_BR_PAD,
+ IFLA_BR_VLAN_STATS_ENABLED,
__IFLA_BR_MAX,
};
@@ -312,6 +315,7 @@ enum {
IFLA_BRPORT_HOLD_TIMER,
IFLA_BRPORT_FLUSH,
IFLA_BRPORT_MULTICAST_ROUTER,
+ IFLA_BRPORT_PAD,
__IFLA_BRPORT_MAX
};
#define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
@@ -431,6 +435,7 @@ enum {
IFLA_MACSEC_SCB,
IFLA_MACSEC_REPLAY_PROTECT,
IFLA_MACSEC_VALIDATION,
+ IFLA_MACSEC_PAD,
__IFLA_MACSEC_MAX,
};
@@ -488,6 +493,7 @@ enum {
IFLA_VXLAN_REMCSUM_NOPARTIAL,
IFLA_VXLAN_COLLECT_METADATA,
IFLA_VXLAN_LABEL,
+ IFLA_VXLAN_GPE,
__IFLA_VXLAN_MAX
};
#define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
@@ -515,6 +521,24 @@ enum {
};
#define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1)
+/* PPP section */
+enum {
+ IFLA_PPP_UNSPEC,
+ IFLA_PPP_DEV_FD,
+ __IFLA_PPP_MAX
+};
+#define IFLA_PPP_MAX (__IFLA_PPP_MAX - 1)
+
+/* GTP section */
+enum {
+ IFLA_GTP_UNSPEC,
+ IFLA_GTP_FD0,
+ IFLA_GTP_FD1,
+ IFLA_GTP_PDP_HASHSIZE,
+ __IFLA_GTP_MAX,
+};
+#define IFLA_GTP_MAX (__IFLA_GTP_MAX - 1)
+
/* Bonding section */
enum {
@@ -664,6 +688,7 @@ enum {
IFLA_VF_STATS_TX_BYTES,
IFLA_VF_STATS_BROADCAST,
IFLA_VF_STATS_MULTICAST,
+ IFLA_VF_STATS_PAD,
__IFLA_VF_STATS_MAX,
};
@@ -774,9 +799,46 @@ enum {
IFLA_HSR_MULTICAST_SPEC, /* Last byte of supervision addr */
IFLA_HSR_SUPERVISION_ADDR, /* Supervision frame multicast addr */
IFLA_HSR_SEQ_NR,
+ IFLA_HSR_VERSION, /* HSR version */
__IFLA_HSR_MAX,
};
#define IFLA_HSR_MAX (__IFLA_HSR_MAX - 1)
+/* STATS section */
+
+struct if_stats_msg {
+ __u8 family;
+ __u8 pad1;
+ __u16 pad2;
+ __u32 ifindex;
+ __u32 filter_mask;
+};
+
+/* A stats attribute can be netdev specific or a global stat.
+ * For netdev stats, lets use the prefix IFLA_STATS_LINK_*
+ */
+enum {
+ IFLA_STATS_UNSPEC, /* also used as 64bit pad attribute */
+ IFLA_STATS_LINK_64,
+ IFLA_STATS_LINK_XSTATS,
+ __IFLA_STATS_MAX,
+};
+
+#define IFLA_STATS_MAX (__IFLA_STATS_MAX - 1)
+
+#define IFLA_STATS_FILTER_BIT(ATTR) (1 << (ATTR - 1))
+
+/* These are embedded into IFLA_STATS_LINK_XSTATS:
+ * [IFLA_STATS_LINK_XSTATS]
+ * -> [LINK_XSTATS_TYPE_xxx]
+ * -> [rtnl link type specific attributes]
+ */
+enum {
+ LINK_XSTATS_TYPE_UNSPEC,
+ LINK_XSTATS_TYPE_BRIDGE,
+ __LINK_XSTATS_TYPE_MAX
+};
+#define LINK_XSTATS_TYPE_MAX (__LINK_XSTATS_TYPE_MAX - 1)
+
#endif /* _UAPI_LINUX_IF_LINK_H */
diff --git a/include/uapi/linux/if_macsec.h b/include/uapi/linux/if_macsec.h
index 26b0d1e3e3e7..f7d4831a2cc7 100644
--- a/include/uapi/linux/if_macsec.h
+++ b/include/uapi/linux/if_macsec.h
@@ -19,8 +19,10 @@
#define MACSEC_MAX_KEY_LEN 128
-#define DEFAULT_CIPHER_ID 0x0080020001000001ULL
-#define DEFAULT_CIPHER_ALT 0x0080C20001000001ULL
+#define MACSEC_KEYID_LEN 16
+
+#define MACSEC_DEFAULT_CIPHER_ID 0x0080020001000001ULL
+#define MACSEC_DEFAULT_CIPHER_ALT 0x0080C20001000001ULL
#define MACSEC_MIN_ICV_LEN 8
#define MACSEC_MAX_ICV_LEN 32
@@ -55,6 +57,7 @@ enum macsec_secy_attrs {
MACSEC_SECY_ATTR_INC_SCI,
MACSEC_SECY_ATTR_ES,
MACSEC_SECY_ATTR_SCB,
+ MACSEC_SECY_ATTR_PAD,
__MACSEC_SECY_ATTR_END,
NUM_MACSEC_SECY_ATTR = __MACSEC_SECY_ATTR_END,
MACSEC_SECY_ATTR_MAX = __MACSEC_SECY_ATTR_END - 1,
@@ -66,6 +69,7 @@ enum macsec_rxsc_attrs {
MACSEC_RXSC_ATTR_ACTIVE, /* config/dump, u8 0..1 */
MACSEC_RXSC_ATTR_SA_LIST, /* dump, nested */
MACSEC_RXSC_ATTR_STATS, /* dump, nested, macsec_rxsc_stats_attr */
+ MACSEC_RXSC_ATTR_PAD,
__MACSEC_RXSC_ATTR_END,
NUM_MACSEC_RXSC_ATTR = __MACSEC_RXSC_ATTR_END,
MACSEC_RXSC_ATTR_MAX = __MACSEC_RXSC_ATTR_END - 1,
@@ -77,8 +81,9 @@ enum macsec_sa_attrs {
MACSEC_SA_ATTR_ACTIVE, /* config/dump, u8 0..1 */
MACSEC_SA_ATTR_PN, /* config/dump, u32 */
MACSEC_SA_ATTR_KEY, /* config, data */
- MACSEC_SA_ATTR_KEYID, /* config/dump, u64 */
+ MACSEC_SA_ATTR_KEYID, /* config/dump, 128-bit */
MACSEC_SA_ATTR_STATS, /* dump, nested, macsec_sa_stats_attr */
+ MACSEC_SA_ATTR_PAD,
__MACSEC_SA_ATTR_END,
NUM_MACSEC_SA_ATTR = __MACSEC_SA_ATTR_END,
MACSEC_SA_ATTR_MAX = __MACSEC_SA_ATTR_END - 1,
@@ -110,6 +115,7 @@ enum macsec_rxsc_stats_attr {
MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID,
MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA,
MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA,
+ MACSEC_RXSC_STATS_ATTR_PAD,
__MACSEC_RXSC_STATS_ATTR_END,
NUM_MACSEC_RXSC_STATS_ATTR = __MACSEC_RXSC_STATS_ATTR_END,
MACSEC_RXSC_STATS_ATTR_MAX = __MACSEC_RXSC_STATS_ATTR_END - 1,
@@ -137,6 +143,7 @@ enum macsec_txsc_stats_attr {
MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED,
MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED,
MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED,
+ MACSEC_TXSC_STATS_ATTR_PAD,
__MACSEC_TXSC_STATS_ATTR_END,
NUM_MACSEC_TXSC_STATS_ATTR = __MACSEC_TXSC_STATS_ATTR_END,
MACSEC_TXSC_STATS_ATTR_MAX = __MACSEC_TXSC_STATS_ATTR_END - 1,
@@ -153,6 +160,7 @@ enum macsec_secy_stats_attr {
MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI,
MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI,
MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN,
+ MACSEC_SECY_STATS_ATTR_PAD,
__MACSEC_SECY_STATS_ATTR_END,
NUM_MACSEC_SECY_STATS_ATTR = __MACSEC_SECY_STATS_ATTR_END,
MACSEC_SECY_STATS_ATTR_MAX = __MACSEC_SECY_STATS_ATTR_END - 1,
diff --git a/include/uapi/linux/iio/types.h b/include/uapi/linux/iio/types.h
index c077617f3304..b0916fc72cce 100644
--- a/include/uapi/linux/iio/types.h
+++ b/include/uapi/linux/iio/types.h
@@ -38,6 +38,7 @@ enum iio_chan_type {
IIO_CONCENTRATION,
IIO_RESISTANCE,
IIO_PH,
+ IIO_UVINDEX,
};
enum iio_modifier {
@@ -77,6 +78,7 @@ enum iio_modifier {
IIO_MOD_Q,
IIO_MOD_CO2,
IIO_MOD_VOC,
+ IIO_MOD_LIGHT_UV,
};
enum iio_event_type {
diff --git a/include/uapi/linux/ila.h b/include/uapi/linux/ila.h
index abde7bbd6f3b..948c0a91e11b 100644
--- a/include/uapi/linux/ila.h
+++ b/include/uapi/linux/ila.h
@@ -14,6 +14,8 @@ enum {
ILA_ATTR_LOCATOR_MATCH, /* u64 */
ILA_ATTR_IFINDEX, /* s32 */
ILA_ATTR_DIR, /* u32 */
+ ILA_ATTR_PAD,
+ ILA_ATTR_CSUM_MODE, /* u8 */
__ILA_ATTR_MAX,
};
@@ -34,4 +36,10 @@ enum {
#define ILA_DIR_IN (1 << 0)
#define ILA_DIR_OUT (1 << 1)
+enum {
+ ILA_CSUM_ADJUST_TRANSPORT,
+ ILA_CSUM_NEUTRAL_MAP,
+ ILA_CSUM_NO_ACTION,
+};
+
#endif /* _UAPI_LINUX_ILA_H */
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
index 68a1f71fde9f..a16643705669 100644
--- a/include/uapi/linux/inet_diag.h
+++ b/include/uapi/linux/inet_diag.h
@@ -113,9 +113,13 @@ enum {
INET_DIAG_DCTCPINFO,
INET_DIAG_PROTOCOL, /* response attribute only */
INET_DIAG_SKV6ONLY,
+ INET_DIAG_LOCALS,
+ INET_DIAG_PEERS,
+ INET_DIAG_PAD,
+ __INET_DIAG_MAX,
};
-#define INET_DIAG_MAX INET_DIAG_SKV6ONLY
+#define INET_DIAG_MAX (__INET_DIAG_MAX - 1)
/* INET_DIAG_MEM */
diff --git a/include/uapi/linux/ip_vs.h b/include/uapi/linux/ip_vs.h
index 391395c06c7e..22d69894bc92 100644
--- a/include/uapi/linux/ip_vs.h
+++ b/include/uapi/linux/ip_vs.h
@@ -435,6 +435,7 @@ enum {
IPVS_STATS_ATTR_OUTPPS, /* current out packet rate */
IPVS_STATS_ATTR_INBPS, /* current in byte rate */
IPVS_STATS_ATTR_OUTBPS, /* current out byte rate */
+ IPVS_STATS_ATTR_PAD,
__IPVS_STATS_ATTR_MAX,
};
diff --git a/include/uapi/linux/keyctl.h b/include/uapi/linux/keyctl.h
index 840cb990abe2..86eddd6241f3 100644
--- a/include/uapi/linux/keyctl.h
+++ b/include/uapi/linux/keyctl.h
@@ -12,6 +12,8 @@
#ifndef _LINUX_KEYCTL_H
#define _LINUX_KEYCTL_H
+#include <linux/types.h>
+
/* special process keyring shortcut IDs */
#define KEY_SPEC_THREAD_KEYRING -1 /* - key ID for thread-specific keyring */
#define KEY_SPEC_PROCESS_KEYRING -2 /* - key ID for process-specific keyring */
@@ -57,5 +59,13 @@
#define KEYCTL_INSTANTIATE_IOV 20 /* instantiate a partially constructed key */
#define KEYCTL_INVALIDATE 21 /* invalidate a key */
#define KEYCTL_GET_PERSISTENT 22 /* get a user's persistent keyring */
+#define KEYCTL_DH_COMPUTE 23 /* Compute Diffie-Hellman values */
+
+/* keyctl structures */
+struct keyctl_dh_params {
+ __s32 private;
+ __s32 prime;
+ __s32 base;
+};
#endif /* _LINUX_KEYCTL_H */
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index a7f1f8032ec1..05ebf475104c 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -865,6 +865,7 @@ struct kvm_ppc_smmu_info {
#define KVM_CAP_SPAPR_TCE_64 125
#define KVM_CAP_ARM_PMU_V3 126
#define KVM_CAP_VCPU_ATTRIBUTES 127
+#define KVM_CAP_MAX_VCPU_ID 128
#ifdef KVM_CAP_IRQ_ROUTING
diff --git a/include/uapi/linux/l2tp.h b/include/uapi/linux/l2tp.h
index 347ef22a964e..4bd27d0270a2 100644
--- a/include/uapi/linux/l2tp.h
+++ b/include/uapi/linux/l2tp.h
@@ -126,6 +126,7 @@ enum {
L2TP_ATTR_IP6_DADDR, /* struct in6_addr */
L2TP_ATTR_UDP_ZERO_CSUM6_TX, /* u8 */
L2TP_ATTR_UDP_ZERO_CSUM6_RX, /* u8 */
+ L2TP_ATTR_PAD,
__L2TP_ATTR_MAX,
};
@@ -142,6 +143,7 @@ enum {
L2TP_ATTR_RX_SEQ_DISCARDS, /* u64 */
L2TP_ATTR_RX_OOS_PACKETS, /* u64 */
L2TP_ATTR_RX_ERRORS, /* u64 */
+ L2TP_ATTR_STATS_PAD,
__L2TP_ATTR_STATS_MAX,
};
diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h
index 7d024ceb075d..e4f048ee7043 100644
--- a/include/uapi/linux/libc-compat.h
+++ b/include/uapi/linux/libc-compat.h
@@ -51,6 +51,40 @@
/* We have included glibc headers... */
#if defined(__GLIBC__)
+/* Coordinate with glibc net/if.h header. */
+#if defined(_NET_IF_H) && defined(__USE_MISC)
+
+/* GLIBC headers included first so don't define anything
+ * that would already be defined. */
+
+#define __UAPI_DEF_IF_IFCONF 0
+#define __UAPI_DEF_IF_IFMAP 0
+#define __UAPI_DEF_IF_IFNAMSIZ 0
+#define __UAPI_DEF_IF_IFREQ 0
+/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
+#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 0
+/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
+#ifndef __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
+#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
+#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
+
+#else /* _NET_IF_H */
+
+/* Linux headers included first, and we must define everything
+ * we need. The expectation is that glibc will check the
+ * __UAPI_DEF_* defines and adjust appropriately. */
+
+#define __UAPI_DEF_IF_IFCONF 1
+#define __UAPI_DEF_IF_IFMAP 1
+#define __UAPI_DEF_IF_IFNAMSIZ 1
+#define __UAPI_DEF_IF_IFREQ 1
+/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
+#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1
+/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
+#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
+
+#endif /* _NET_IF_H */
+
/* Coordinate with glibc netinet/in.h header. */
#if defined(_NETINET_IN_H)
@@ -117,6 +151,16 @@
* that we need. */
#else /* !defined(__GLIBC__) */
+/* Definitions for if.h */
+#define __UAPI_DEF_IF_IFCONF 1
+#define __UAPI_DEF_IF_IFMAP 1
+#define __UAPI_DEF_IF_IFNAMSIZ 1
+#define __UAPI_DEF_IF_IFREQ 1
+/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
+#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1
+/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
+#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
+
/* Definitions for in.h */
#define __UAPI_DEF_IN_ADDR 1
#define __UAPI_DEF_IN_IPPROTO 1
diff --git a/include/uapi/linux/lwtunnel.h b/include/uapi/linux/lwtunnel.h
index f8b01887a495..a478fe80e203 100644
--- a/include/uapi/linux/lwtunnel.h
+++ b/include/uapi/linux/lwtunnel.h
@@ -22,6 +22,7 @@ enum lwtunnel_ip_t {
LWTUNNEL_IP_TTL,
LWTUNNEL_IP_TOS,
LWTUNNEL_IP_FLAGS,
+ LWTUNNEL_IP_PAD,
__LWTUNNEL_IP_MAX,
};
@@ -35,6 +36,7 @@ enum lwtunnel_ip6_t {
LWTUNNEL_IP6_HOPLIMIT,
LWTUNNEL_IP6_TC,
LWTUNNEL_IP6_FLAGS,
+ LWTUNNEL_IP6_PAD,
__LWTUNNEL_IP6_MAX,
};
diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
index 0de181ad73d5..546b38886e11 100644
--- a/include/uapi/linux/magic.h
+++ b/include/uapi/linux/magic.h
@@ -78,5 +78,7 @@
#define BTRFS_TEST_MAGIC 0x73727279
#define NSFS_MAGIC 0x6e736673
#define BPF_FS_MAGIC 0xcafe4a11
+/* Since UDF 2.01 is ISO 13346 based... */
+#define UDF_SUPER_MAGIC 0x15013346
#endif /* __LINUX_MAGIC_H__ */
diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h
index 7cc28ab05b87..309915f74492 100644
--- a/include/uapi/linux/ndctl.h
+++ b/include/uapi/linux/ndctl.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2015, Intel Corporation.
+ * Copyright (c) 2014-2016, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU Lesser General Public License,
@@ -20,11 +20,45 @@ struct nd_cmd_smart {
__u8 data[128];
} __packed;
+#define ND_SMART_HEALTH_VALID (1 << 0)
+#define ND_SMART_TEMP_VALID (1 << 1)
+#define ND_SMART_SPARES_VALID (1 << 2)
+#define ND_SMART_ALARM_VALID (1 << 3)
+#define ND_SMART_USED_VALID (1 << 4)
+#define ND_SMART_SHUTDOWN_VALID (1 << 5)
+#define ND_SMART_VENDOR_VALID (1 << 6)
+#define ND_SMART_TEMP_TRIP (1 << 0)
+#define ND_SMART_SPARE_TRIP (1 << 1)
+#define ND_SMART_NON_CRITICAL_HEALTH (1 << 0)
+#define ND_SMART_CRITICAL_HEALTH (1 << 1)
+#define ND_SMART_FATAL_HEALTH (1 << 2)
+
+struct nd_smart_payload {
+ __u32 flags;
+ __u8 reserved0[4];
+ __u8 health;
+ __u16 temperature;
+ __u8 spares;
+ __u8 alarm_flags;
+ __u8 life_used;
+ __u8 shutdown_state;
+ __u8 reserved1;
+ __u32 vendor_size;
+ __u8 vendor_data[108];
+} __packed;
+
struct nd_cmd_smart_threshold {
__u32 status;
__u8 data[8];
} __packed;
+struct nd_smart_threshold_payload {
+ __u16 alarm_control;
+ __u16 temperature;
+ __u8 spares;
+ __u8 reserved[3];
+} __packed;
+
struct nd_cmd_dimm_flags {
__u32 status;
__u32 flags;
@@ -125,6 +159,7 @@ enum {
ND_CMD_VENDOR_EFFECT_LOG_SIZE = 7,
ND_CMD_VENDOR_EFFECT_LOG = 8,
ND_CMD_VENDOR = 9,
+ ND_CMD_CALL = 10,
};
enum {
@@ -158,6 +193,7 @@ static inline const char *nvdimm_cmd_name(unsigned cmd)
[ND_CMD_VENDOR_EFFECT_LOG_SIZE] = "effect_size",
[ND_CMD_VENDOR_EFFECT_LOG] = "effect_log",
[ND_CMD_VENDOR] = "vendor",
+ [ND_CMD_CALL] = "cmd_call",
};
if (cmd < ARRAY_SIZE(names) && names[cmd])
@@ -206,6 +242,7 @@ static inline const char *nvdimm_cmd_name(unsigned cmd)
#define ND_DEVICE_NAMESPACE_IO 4 /* legacy persistent memory */
#define ND_DEVICE_NAMESPACE_PMEM 5 /* PMEM namespace (may alias with BLK) */
#define ND_DEVICE_NAMESPACE_BLK 6 /* BLK namespace (may alias with PMEM) */
+#define ND_DEVICE_DAX_PMEM 7 /* Device DAX interface to pmem */
enum nd_driver_flags {
ND_DRIVER_DIMM = 1 << ND_DEVICE_DIMM,
@@ -214,6 +251,7 @@ enum nd_driver_flags {
ND_DRIVER_NAMESPACE_IO = 1 << ND_DEVICE_NAMESPACE_IO,
ND_DRIVER_NAMESPACE_PMEM = 1 << ND_DEVICE_NAMESPACE_PMEM,
ND_DRIVER_NAMESPACE_BLK = 1 << ND_DEVICE_NAMESPACE_BLK,
+ ND_DRIVER_DAX_PMEM = 1 << ND_DEVICE_DAX_PMEM,
};
enum {
@@ -224,4 +262,44 @@ enum ars_masks {
ARS_STATUS_MASK = 0x0000FFFF,
ARS_EXT_STATUS_SHIFT = 16,
};
+
+/*
+ * struct nd_cmd_pkg
+ *
+ * is a wrapper to a quasi pass thru interface for invoking firmware
+ * associated with nvdimms.
+ *
+ * INPUT PARAMETERS
+ *
+ * nd_family corresponds to the firmware (e.g. DSM) interface.
+ *
+ * nd_command are the function index advertised by the firmware.
+ *
+ * nd_size_in is the size of the input parameters being passed to firmware
+ *
+ * OUTPUT PARAMETERS
+ *
+ * nd_fw_size is the size of the data firmware wants to return for
+ * the call. If nd_fw_size is greater than size of nd_size_out, only
+ * the first nd_size_out bytes are returned.
+ */
+
+struct nd_cmd_pkg {
+ __u64 nd_family; /* family of commands */
+ __u64 nd_command;
+ __u32 nd_size_in; /* INPUT: size of input args */
+ __u32 nd_size_out; /* INPUT: size of payload */
+ __u32 nd_reserved2[9]; /* reserved must be zero */
+ __u32 nd_fw_size; /* OUTPUT: size fw wants to return */
+ unsigned char nd_payload[]; /* Contents of call */
+};
+
+/* These NVDIMM families represent pre-standardization command sets */
+#define NVDIMM_FAMILY_INTEL 0
+#define NVDIMM_FAMILY_HPE1 1
+#define NVDIMM_FAMILY_HPE2 2
+
+#define ND_IOCTL_CALL _IOWR(ND_IOCTL, ND_CMD_CALL,\
+ struct nd_cmd_pkg)
+
#endif /* __NDCTL_H__ */
diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h
index 788655bfa0f3..bd99a8d80f36 100644
--- a/include/uapi/linux/neighbour.h
+++ b/include/uapi/linux/neighbour.h
@@ -128,6 +128,7 @@ enum {
NDTPA_LOCKTIME, /* u64, msecs */
NDTPA_QUEUE_LENBYTES, /* u32 */
NDTPA_MCAST_REPROBES, /* u32 */
+ NDTPA_PAD,
__NDTPA_MAX
};
#define NDTPA_MAX (__NDTPA_MAX - 1)
@@ -160,6 +161,7 @@ enum {
NDTA_PARMS, /* nested TLV NDTPA_* */
NDTA_STATS, /* struct ndt_stats, read-only */
NDTA_GC_INTERVAL, /* u64, msecs */
+ NDTA_PAD,
__NDTA_MAX
};
#define NDTA_MAX (__NDTA_MAX - 1)
diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h
index 6d1abea9746e..264e515de16f 100644
--- a/include/uapi/linux/net_tstamp.h
+++ b/include/uapi/linux/net_tstamp.h
@@ -31,6 +31,16 @@ enum {
SOF_TIMESTAMPING_LAST
};
+/*
+ * SO_TIMESTAMPING flags are either for recording a packet timestamp or for
+ * reporting the timestamp to user space.
+ * Recording flags can be set both via socket options and control messages.
+ */
+#define SOF_TIMESTAMPING_TX_RECORD_MASK (SOF_TIMESTAMPING_TX_HARDWARE | \
+ SOF_TIMESTAMPING_TX_SOFTWARE | \
+ SOF_TIMESTAMPING_TX_SCHED | \
+ SOF_TIMESTAMPING_TX_ACK)
+
/**
* struct hwtstamp_config - %SIOCGHWTSTAMP and %SIOCSHWTSTAMP parameter
*
diff --git a/include/uapi/linux/netfilter/ipset/ip_set.h b/include/uapi/linux/netfilter/ipset/ip_set.h
index 63b2e34f1b60..ebb5154976de 100644
--- a/include/uapi/linux/netfilter/ipset/ip_set.h
+++ b/include/uapi/linux/netfilter/ipset/ip_set.h
@@ -118,6 +118,7 @@ enum {
IPSET_ATTR_SKBMARK,
IPSET_ATTR_SKBPRIO,
IPSET_ATTR_SKBQUEUE,
+ IPSET_ATTR_PAD,
__IPSET_ATTR_ADT_MAX,
};
#define IPSET_ATTR_ADT_MAX (__IPSET_ATTR_ADT_MAX - 1)
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index eeffde196f80..6a4dbe04f09e 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -3,6 +3,7 @@
#define NFT_TABLE_MAXNAMELEN 32
#define NFT_CHAIN_MAXNAMELEN 32
+#define NFT_SET_MAXNAMELEN 32
#define NFT_USERDATA_MAXLEN 256
/**
@@ -182,6 +183,7 @@ enum nft_chain_attributes {
NFTA_CHAIN_USE,
NFTA_CHAIN_TYPE,
NFTA_CHAIN_COUNTERS,
+ NFTA_CHAIN_PAD,
__NFTA_CHAIN_MAX
};
#define NFTA_CHAIN_MAX (__NFTA_CHAIN_MAX - 1)
@@ -206,6 +208,7 @@ enum nft_rule_attributes {
NFTA_RULE_COMPAT,
NFTA_RULE_POSITION,
NFTA_RULE_USERDATA,
+ NFTA_RULE_PAD,
__NFTA_RULE_MAX
};
#define NFTA_RULE_MAX (__NFTA_RULE_MAX - 1)
@@ -308,6 +311,7 @@ enum nft_set_attributes {
NFTA_SET_TIMEOUT,
NFTA_SET_GC_INTERVAL,
NFTA_SET_USERDATA,
+ NFTA_SET_PAD,
__NFTA_SET_MAX
};
#define NFTA_SET_MAX (__NFTA_SET_MAX - 1)
@@ -341,6 +345,7 @@ enum nft_set_elem_attributes {
NFTA_SET_ELEM_EXPIRATION,
NFTA_SET_ELEM_USERDATA,
NFTA_SET_ELEM_EXPR,
+ NFTA_SET_ELEM_PAD,
__NFTA_SET_ELEM_MAX
};
#define NFTA_SET_ELEM_MAX (__NFTA_SET_ELEM_MAX - 1)
@@ -584,6 +589,7 @@ enum nft_dynset_attributes {
NFTA_DYNSET_SREG_DATA,
NFTA_DYNSET_TIMEOUT,
NFTA_DYNSET_EXPR,
+ NFTA_DYNSET_PAD,
__NFTA_DYNSET_MAX,
};
#define NFTA_DYNSET_MAX (__NFTA_DYNSET_MAX - 1)
@@ -806,6 +812,7 @@ enum nft_limit_attributes {
NFTA_LIMIT_BURST,
NFTA_LIMIT_TYPE,
NFTA_LIMIT_FLAGS,
+ NFTA_LIMIT_PAD,
__NFTA_LIMIT_MAX
};
#define NFTA_LIMIT_MAX (__NFTA_LIMIT_MAX - 1)
@@ -820,6 +827,7 @@ enum nft_counter_attributes {
NFTA_COUNTER_UNSPEC,
NFTA_COUNTER_BYTES,
NFTA_COUNTER_PACKETS,
+ NFTA_COUNTER_PAD,
__NFTA_COUNTER_MAX
};
#define NFTA_COUNTER_MAX (__NFTA_COUNTER_MAX - 1)
@@ -1055,6 +1063,7 @@ enum nft_trace_attibutes {
NFTA_TRACE_MARK,
NFTA_TRACE_NFPROTO,
NFTA_TRACE_POLICY,
+ NFTA_TRACE_PAD,
__NFTA_TRACE_MAX
};
#define NFTA_TRACE_MAX (__NFTA_TRACE_MAX - 1)
diff --git a/include/uapi/linux/netfilter/nfnetlink_acct.h b/include/uapi/linux/netfilter/nfnetlink_acct.h
index f3e34dbbf966..36047ec70f37 100644
--- a/include/uapi/linux/netfilter/nfnetlink_acct.h
+++ b/include/uapi/linux/netfilter/nfnetlink_acct.h
@@ -29,6 +29,7 @@ enum nfnl_acct_type {
NFACCT_FLAGS,
NFACCT_QUOTA,
NFACCT_FILTER,
+ NFACCT_PAD,
__NFACCT_MAX
};
#define NFACCT_MAX (__NFACCT_MAX - 1)
diff --git a/include/uapi/linux/netfilter/nfnetlink_conntrack.h b/include/uapi/linux/netfilter/nfnetlink_conntrack.h
index c1a4e1441a25..9df789709abe 100644
--- a/include/uapi/linux/netfilter/nfnetlink_conntrack.h
+++ b/include/uapi/linux/netfilter/nfnetlink_conntrack.h
@@ -116,6 +116,7 @@ enum ctattr_protoinfo_dccp {
CTA_PROTOINFO_DCCP_STATE,
CTA_PROTOINFO_DCCP_ROLE,
CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ,
+ CTA_PROTOINFO_DCCP_PAD,
__CTA_PROTOINFO_DCCP_MAX,
};
#define CTA_PROTOINFO_DCCP_MAX (__CTA_PROTOINFO_DCCP_MAX - 1)
@@ -135,6 +136,7 @@ enum ctattr_counters {
CTA_COUNTERS_BYTES, /* 64bit counters */
CTA_COUNTERS32_PACKETS, /* old 32bit counters, unused */
CTA_COUNTERS32_BYTES, /* old 32bit counters, unused */
+ CTA_COUNTERS_PAD,
__CTA_COUNTERS_MAX
};
#define CTA_COUNTERS_MAX (__CTA_COUNTERS_MAX - 1)
@@ -143,6 +145,7 @@ enum ctattr_tstamp {
CTA_TIMESTAMP_UNSPEC,
CTA_TIMESTAMP_START,
CTA_TIMESTAMP_STOP,
+ CTA_TIMESTAMP_PAD,
__CTA_TIMESTAMP_MAX
};
#define CTA_TIMESTAMP_MAX (__CTA_TIMESTAMP_MAX - 1)
diff --git a/include/uapi/linux/netfilter/nfnetlink_queue.h b/include/uapi/linux/netfilter/nfnetlink_queue.h
index b67a853638ff..ae30841ff94e 100644
--- a/include/uapi/linux/netfilter/nfnetlink_queue.h
+++ b/include/uapi/linux/netfilter/nfnetlink_queue.h
@@ -30,6 +30,14 @@ struct nfqnl_msg_packet_timestamp {
__aligned_be64 usec;
};
+enum nfqnl_vlan_attr {
+ NFQA_VLAN_UNSPEC,
+ NFQA_VLAN_PROTO, /* __be16 skb vlan_proto */
+ NFQA_VLAN_TCI, /* __be16 skb htons(vlan_tci) */
+ __NFQA_VLAN_MAX,
+};
+#define NFQA_VLAN_MAX (__NFQA_VLAN_MAX + 1)
+
enum nfqnl_attr_type {
NFQA_UNSPEC,
NFQA_PACKET_HDR,
@@ -50,6 +58,8 @@ enum nfqnl_attr_type {
NFQA_UID, /* __u32 sk uid */
NFQA_GID, /* __u32 sk gid */
NFQA_SECCTX, /* security context string */
+ NFQA_VLAN, /* nested attribute: packet vlan info */
+ NFQA_L2HDR, /* full L2 header */
__NFQA_MAX
};
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 5a30a7563633..e23d78685a01 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -322,7 +322,9 @@
* @NL80211_CMD_GET_SCAN: get scan results
* @NL80211_CMD_TRIGGER_SCAN: trigger a new scan with the given parameters
* %NL80211_ATTR_TX_NO_CCK_RATE is used to decide whether to send the
- * probe requests at CCK rate or not.
+ * probe requests at CCK rate or not. %NL80211_ATTR_MAC can be used to
+ * specify a BSSID to scan for; if not included, the wildcard BSSID will
+ * be used.
* @NL80211_CMD_NEW_SCAN_RESULTS: scan notification (as a reply to
* NL80211_CMD_GET_SCAN and on the "scan" multicast group)
* @NL80211_CMD_SCAN_ABORTED: scan was aborted, for unspecified reasons,
@@ -427,7 +429,11 @@
* @NL80211_CMD_ASSOCIATE: association request and notification; like
* NL80211_CMD_AUTHENTICATE but for Association and Reassociation
* (similar to MLME-ASSOCIATE.request, MLME-REASSOCIATE.request,
- * MLME-ASSOCIATE.confirm or MLME-REASSOCIATE.confirm primitives).
+ * MLME-ASSOCIATE.confirm or MLME-REASSOCIATE.confirm primitives). The
+ * %NL80211_ATTR_PREV_BSSID attribute is used to specify whether the
+ * request is for the initial association to an ESS (that attribute not
+ * included) or for reassociation within the ESS (that attribute is
+ * included).
* @NL80211_CMD_DEAUTHENTICATE: deauthentication request and notification; like
* NL80211_CMD_AUTHENTICATE but for Deauthentication frames (similar to
* MLME-DEAUTHENTICATION.request and MLME-DEAUTHENTICATE.indication
@@ -477,6 +483,9 @@
* set of BSSID,frequency parameters is used (i.e., either the enforcing
* %NL80211_ATTR_MAC,%NL80211_ATTR_WIPHY_FREQ or the less strict
* %NL80211_ATTR_MAC_HINT and %NL80211_ATTR_WIPHY_FREQ_HINT).
+ * %NL80211_ATTR_PREV_BSSID can be used to request a reassociation within
+ * the ESS in case the device is already associated and an association with
+ * a different BSS is desired.
* Background scan period can optionally be
* specified in %NL80211_ATTR_BG_SCAN_PERIOD,
* if not specified default background scan configuration
@@ -1285,8 +1294,11 @@ enum nl80211_commands {
* @NL80211_ATTR_RESP_IE: (Re)association response information elements as
* sent by peer, for ROAM and successful CONNECT events.
*
- * @NL80211_ATTR_PREV_BSSID: previous BSSID, to be used by in ASSOCIATE
- * commands to specify using a reassociate frame
+ * @NL80211_ATTR_PREV_BSSID: previous BSSID, to be used in ASSOCIATE and CONNECT
+ * commands to specify a request to reassociate within an ESS, i.e., to use
+ * Reassociate Request frame (with the value of this attribute in the
+ * Current AP address field) instead of Association Request frame which is
+ * used for the initial association to an ESS.
*
* @NL80211_ATTR_KEY: key information in a nested attribute with
* %NL80211_KEY_* sub-attributes
@@ -1795,6 +1807,17 @@ enum nl80211_commands {
* in a PBSS. Specified in %NL80211_CMD_CONNECT to request
* connecting to a PCP, and in %NL80211_CMD_START_AP to start
* a PCP instead of AP. Relevant for DMG networks only.
+ * @NL80211_ATTR_BSS_SELECT: nested attribute for driver supporting the
+ * BSS selection feature. When used with %NL80211_CMD_GET_WIPHY it contains
+ * attributes according &enum nl80211_bss_select_attr to indicate what
+ * BSS selection behaviours are supported. When used with %NL80211_CMD_CONNECT
+ * it contains the behaviour-specific attribute containing the parameters for
+ * BSS selection to be done by driver and/or firmware.
+ *
+ * @NL80211_ATTR_STA_SUPPORT_P2P_PS: whether P2P PS mechanism supported
+ * or not. u8, one of the values of &enum nl80211_sta_p2p_ps_status
+ *
+ * @NL80211_ATTR_PAD: attribute used for padding for 64-bit alignment
*
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
@@ -2172,6 +2195,12 @@ enum nl80211_attrs {
NL80211_ATTR_PBSS,
+ NL80211_ATTR_BSS_SELECT,
+
+ NL80211_ATTR_STA_SUPPORT_P2P_PS,
+
+ NL80211_ATTR_PAD,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -2315,6 +2344,20 @@ enum nl80211_sta_flags {
NL80211_STA_FLAG_MAX = __NL80211_STA_FLAG_AFTER_LAST - 1
};
+/**
+ * enum nl80211_sta_p2p_ps_status - station support of P2P PS
+ *
+ * @NL80211_P2P_PS_UNSUPPORTED: station doesn't support P2P PS mechanism
+ * @@NL80211_P2P_PS_SUPPORTED: station supports P2P PS mechanism
+ * @NUM_NL80211_P2P_PS_STATUS: number of values
+ */
+enum nl80211_sta_p2p_ps_status {
+ NL80211_P2P_PS_UNSUPPORTED = 0,
+ NL80211_P2P_PS_SUPPORTED,
+
+ NUM_NL80211_P2P_PS_STATUS,
+};
+
#define NL80211_STA_FLAG_MAX_OLD_API NL80211_STA_FLAG_TDLS_PEER
/**
@@ -2472,6 +2515,9 @@ enum nl80211_sta_bss_param {
* TID+1 and the special TID 16 (i.e. value 17) is used for non-QoS frames;
* each one of those is again nested with &enum nl80211_tid_stats
* attributes carrying the actual values.
+ * @NL80211_STA_INFO_RX_DURATION: aggregate PPDU duration for all frames
+ * received from the station (u64, usec)
+ * @NL80211_STA_INFO_PAD: attribute used for padding for 64-bit alignment
* @__NL80211_STA_INFO_AFTER_LAST: internal
* @NL80211_STA_INFO_MAX: highest possible station info attribute
*/
@@ -2508,6 +2554,8 @@ enum nl80211_sta_info {
NL80211_STA_INFO_BEACON_RX,
NL80211_STA_INFO_BEACON_SIGNAL_AVG,
NL80211_STA_INFO_TID_STATS,
+ NL80211_STA_INFO_RX_DURATION,
+ NL80211_STA_INFO_PAD,
/* keep last */
__NL80211_STA_INFO_AFTER_LAST,
@@ -2524,6 +2572,7 @@ enum nl80211_sta_info {
* transmitted MSDUs (not counting the first attempt; u64)
* @NL80211_TID_STATS_TX_MSDU_FAILED: number of failed transmitted
* MSDUs (u64)
+ * @NL80211_TID_STATS_PAD: attribute used for padding for 64-bit alignment
* @NUM_NL80211_TID_STATS: number of attributes here
* @NL80211_TID_STATS_MAX: highest numbered attribute here
*/
@@ -2533,6 +2582,7 @@ enum nl80211_tid_stats {
NL80211_TID_STATS_TX_MSDU,
NL80211_TID_STATS_TX_MSDU_RETRIES,
NL80211_TID_STATS_TX_MSDU_FAILED,
+ NL80211_TID_STATS_PAD,
/* keep last */
NUM_NL80211_TID_STATS,
@@ -2969,6 +3019,7 @@ enum nl80211_user_reg_hint_type {
* transmitting data (on channel or globally)
* @NL80211_SURVEY_INFO_TIME_SCAN: time the radio spent for scan
* (on this channel or globally)
+ * @NL80211_SURVEY_INFO_PAD: attribute used for padding for 64-bit alignment
* @NL80211_SURVEY_INFO_MAX: highest survey info attribute number
* currently defined
* @__NL80211_SURVEY_INFO_AFTER_LAST: internal use
@@ -2984,6 +3035,7 @@ enum nl80211_survey_info {
NL80211_SURVEY_INFO_TIME_RX,
NL80211_SURVEY_INFO_TIME_TX,
NL80211_SURVEY_INFO_TIME_SCAN,
+ NL80211_SURVEY_INFO_PAD,
/* keep last */
__NL80211_SURVEY_INFO_AFTER_LAST,
@@ -3409,6 +3461,7 @@ enum nl80211_bss_scan_width {
* @NL80211_BSS_LAST_SEEN_BOOTTIME: CLOCK_BOOTTIME timestamp when this entry
* was last updated by a received frame. The value is expected to be
* accurate to about 10ms. (u64, nanoseconds)
+ * @NL80211_BSS_PAD: attribute used for padding for 64-bit alignment
* @__NL80211_BSS_AFTER_LAST: internal
* @NL80211_BSS_MAX: highest BSS attribute
*/
@@ -3429,6 +3482,7 @@ enum nl80211_bss {
NL80211_BSS_BEACON_TSF,
NL80211_BSS_PRESP_DATA,
NL80211_BSS_LAST_SEEN_BOOTTIME,
+ NL80211_BSS_PAD,
/* keep last */
__NL80211_BSS_AFTER_LAST,
@@ -3614,11 +3668,15 @@ enum nl80211_txrate_gi {
* @NL80211_BAND_2GHZ: 2.4 GHz ISM band
* @NL80211_BAND_5GHZ: around 5 GHz band (4.9 - 5.7 GHz)
* @NL80211_BAND_60GHZ: around 60 GHz band (58.32 - 64.80 GHz)
+ * @NUM_NL80211_BANDS: number of bands, avoid using this in userspace
+ * since newer kernel versions may support more bands
*/
enum nl80211_band {
NL80211_BAND_2GHZ,
NL80211_BAND_5GHZ,
NL80211_BAND_60GHZ,
+
+ NUM_NL80211_BANDS,
};
/**
@@ -4665,4 +4723,48 @@ enum nl80211_sched_scan_plan {
__NL80211_SCHED_SCAN_PLAN_AFTER_LAST - 1
};
+/**
+ * struct nl80211_bss_select_rssi_adjust - RSSI adjustment parameters.
+ *
+ * @band: band of BSS that must match for RSSI value adjustment.
+ * @delta: value used to adjust the RSSI value of matching BSS.
+ */
+struct nl80211_bss_select_rssi_adjust {
+ __u8 band;
+ __s8 delta;
+} __attribute__((packed));
+
+/**
+ * enum nl80211_bss_select_attr - attributes for bss selection.
+ *
+ * @__NL80211_BSS_SELECT_ATTR_INVALID: reserved.
+ * @NL80211_BSS_SELECT_ATTR_RSSI: Flag indicating only RSSI-based BSS selection
+ * is requested.
+ * @NL80211_BSS_SELECT_ATTR_BAND_PREF: attribute indicating BSS
+ * selection should be done such that the specified band is preferred.
+ * When there are multiple BSS-es in the preferred band, the driver
+ * shall use RSSI-based BSS selection as a second step. The value of
+ * this attribute is according to &enum nl80211_band (u32).
+ * @NL80211_BSS_SELECT_ATTR_RSSI_ADJUST: When present the RSSI level for
+ * BSS-es in the specified band is to be adjusted before doing
+ * RSSI-based BSS selection. The attribute value is a packed structure
+ * value as specified by &struct nl80211_bss_select_rssi_adjust.
+ * @NL80211_BSS_SELECT_ATTR_MAX: highest bss select attribute number.
+ * @__NL80211_BSS_SELECT_ATTR_AFTER_LAST: internal use.
+ *
+ * One and only one of these attributes are found within %NL80211_ATTR_BSS_SELECT
+ * for %NL80211_CMD_CONNECT. It specifies the required BSS selection behaviour
+ * which the driver shall use.
+ */
+enum nl80211_bss_select_attr {
+ __NL80211_BSS_SELECT_ATTR_INVALID,
+ NL80211_BSS_SELECT_ATTR_RSSI,
+ NL80211_BSS_SELECT_ATTR_BAND_PREF,
+ NL80211_BSS_SELECT_ATTR_RSSI_ADJUST,
+
+ /* keep last */
+ __NL80211_BSS_SELECT_ATTR_AFTER_LAST,
+ NL80211_BSS_SELECT_ATTR_MAX = __NL80211_BSS_SELECT_ATTR_AFTER_LAST - 1
+};
+
#endif /* __LINUX_NL80211_H */
diff --git a/include/uapi/linux/nvme_ioctl.h b/include/uapi/linux/nvme_ioctl.h
index c4b2a3f90829..50ff21f748b6 100644
--- a/include/uapi/linux/nvme_ioctl.h
+++ b/include/uapi/linux/nvme_ioctl.h
@@ -61,5 +61,6 @@ struct nvme_passthru_cmd {
#define NVME_IOCTL_IO_CMD _IOWR('N', 0x43, struct nvme_passthru_cmd)
#define NVME_IOCTL_RESET _IO('N', 0x44)
#define NVME_IOCTL_SUBSYS_RESET _IO('N', 0x45)
+#define NVME_IOCTL_RESCAN _IO('N', 0x46)
#endif /* _UAPI_LINUX_NVME_IOCTL_H */
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 616d04761730..bb0d515b7654 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -84,6 +84,7 @@ enum ovs_datapath_attr {
OVS_DP_ATTR_STATS, /* struct ovs_dp_stats */
OVS_DP_ATTR_MEGAFLOW_STATS, /* struct ovs_dp_megaflow_stats */
OVS_DP_ATTR_USER_FEATURES, /* OVS_DP_F_* */
+ OVS_DP_ATTR_PAD,
__OVS_DP_ATTR_MAX
};
@@ -253,6 +254,7 @@ enum ovs_vport_attr {
OVS_VPORT_ATTR_UPCALL_PID, /* array of u32 Netlink socket PIDs for */
/* receiving upcalls */
OVS_VPORT_ATTR_STATS, /* struct ovs_vport_stats */
+ OVS_VPORT_ATTR_PAD,
__OVS_VPORT_ATTR_MAX
};
@@ -351,6 +353,7 @@ enum ovs_tunnel_key_attr {
OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS, /* Nested OVS_VXLAN_EXT_* */
OVS_TUNNEL_KEY_ATTR_IPV6_SRC, /* struct in6_addr src IPv6 address. */
OVS_TUNNEL_KEY_ATTR_IPV6_DST, /* struct in6_addr dst IPv6 address. */
+ OVS_TUNNEL_KEY_ATTR_PAD,
__OVS_TUNNEL_KEY_ATTR_MAX
};
@@ -518,6 +521,7 @@ enum ovs_flow_attr {
* logging should be suppressed. */
OVS_FLOW_ATTR_UFID, /* Variable length unique flow identifier. */
OVS_FLOW_ATTR_UFID_FLAGS,/* u32 of OVS_UFID_F_*. */
+ OVS_FLOW_ATTR_PAD,
__OVS_FLOW_ATTR_MAX
};
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 1becea86c73c..404095124ae2 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -670,7 +670,8 @@
#define PCI_EXT_CAP_ID_SECPCI 0x19 /* Secondary PCIe Capability */
#define PCI_EXT_CAP_ID_PMUX 0x1A /* Protocol Multiplexing */
#define PCI_EXT_CAP_ID_PASID 0x1B /* Process Address Space ID */
-#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_PASID
+#define PCI_EXT_CAP_ID_DPC 0x1D /* Downstream Port Containment */
+#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_DPC
#define PCI_EXT_CAP_DSN_SIZEOF 12
#define PCI_EXT_CAP_MCAST_ENDPOINT_SIZEOF 40
@@ -946,4 +947,21 @@
#define PCI_TPH_CAP_ST_SHIFT 16 /* st table shift */
#define PCI_TPH_BASE_SIZEOF 12 /* size with no st table */
+/* Downstream Port Containment */
+#define PCI_EXP_DPC_CAP 4 /* DPC Capability */
+#define PCI_EXP_DPC_CAP_RP_EXT 0x20 /* Root Port Extensions for DPC */
+#define PCI_EXP_DPC_CAP_POISONED_TLP 0x40 /* Poisoned TLP Egress Blocking Supported */
+#define PCI_EXP_DPC_CAP_SW_TRIGGER 0x80 /* Software Triggering Supported */
+#define PCI_EXP_DPC_CAP_DL_ACTIVE 0x1000 /* ERR_COR signal on DL_Active supported */
+
+#define PCI_EXP_DPC_CTL 6 /* DPC control */
+#define PCI_EXP_DPC_CTL_EN_NONFATAL 0x02 /* Enable trigger on ERR_NONFATAL message */
+#define PCI_EXP_DPC_CTL_INT_EN 0x08 /* DPC Interrupt Enable */
+
+#define PCI_EXP_DPC_STATUS 8 /* DPC Status */
+#define PCI_EXP_DPC_STATUS_TRIGGER 0x01 /* Trigger Status */
+#define PCI_EXP_DPC_STATUS_INTERRUPT 0x08 /* Interrupt Status */
+
+#define PCI_EXP_DPC_SOURCE_ID 10 /* DPC Source Identifier */
+
#endif /* LINUX_PCI_REGS_H */
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 1afe9623c1a7..36ce552cf6a9 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -340,7 +340,8 @@ struct perf_event_attr {
comm_exec : 1, /* flag comm events that are due to an exec */
use_clockid : 1, /* use @clockid for time fields */
context_switch : 1, /* context switch data */
- __reserved_1 : 37;
+ write_backward : 1, /* Write ring buffer from end to beginning */
+ __reserved_1 : 36;
union {
__u32 wakeup_events; /* wakeup every n events */
@@ -401,6 +402,7 @@ struct perf_event_attr {
#define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
#define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *)
#define PERF_EVENT_IOC_SET_BPF _IOW('$', 8, __u32)
+#define PERF_EVENT_IOC_PAUSE_OUTPUT _IOW('$', 9, __u32)
enum perf_event_ioc_flags {
PERF_IOC_FLAG_GROUP = 1U << 0,
@@ -860,6 +862,7 @@ enum perf_event_type {
};
#define PERF_MAX_STACK_DEPTH 127
+#define PERF_MAX_CONTEXTS_PER_STACK 8
enum perf_callchain_context {
PERF_CONTEXT_HV = (__u64)-32,
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index c43c5f78b9c4..f4297c8a42fe 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -66,6 +66,7 @@ enum {
TCA_ACT_OPTIONS,
TCA_ACT_INDEX,
TCA_ACT_STATS,
+ TCA_ACT_PAD,
__TCA_ACT_MAX
};
@@ -144,12 +145,18 @@ enum {
TCA_POLICE_PEAKRATE,
TCA_POLICE_AVRATE,
TCA_POLICE_RESULT,
+ TCA_POLICE_TM,
+ TCA_POLICE_PAD,
__TCA_POLICE_MAX
#define TCA_POLICE_RESULT TCA_POLICE_RESULT
};
#define TCA_POLICE_MAX (__TCA_POLICE_MAX - 1)
+/* tca flags definitions */
+#define TCA_CLS_FLAGS_SKIP_HW (1 << 0)
+#define TCA_CLS_FLAGS_SKIP_SW (1 << 1)
+
/* U32 filters */
#define TC_U32_HTID(h) ((h)&0xFFF00000)
@@ -168,11 +175,12 @@ enum {
TCA_U32_DIVISOR,
TCA_U32_SEL,
TCA_U32_POLICE,
- TCA_U32_ACT,
+ TCA_U32_ACT,
TCA_U32_INDEV,
TCA_U32_PCNT,
TCA_U32_MARK,
TCA_U32_FLAGS,
+ TCA_U32_PAD,
__TCA_U32_MAX
};
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 8cb18b44968e..2382eed50278 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -179,6 +179,7 @@ enum {
TCA_TBF_PRATE64,
TCA_TBF_BURST,
TCA_TBF_PBURST,
+ TCA_TBF_PAD,
__TCA_TBF_MAX,
};
@@ -368,6 +369,7 @@ enum {
TCA_HTB_DIRECT_QLEN,
TCA_HTB_RATE64,
TCA_HTB_CEIL64,
+ TCA_HTB_PAD,
__TCA_HTB_MAX,
};
@@ -531,6 +533,7 @@ enum {
TCA_NETEM_RATE,
TCA_NETEM_ECN,
TCA_NETEM_RATE64,
+ TCA_NETEM_PAD,
__TCA_NETEM_MAX,
};
@@ -715,6 +718,8 @@ enum {
TCA_FQ_CODEL_FLOWS,
TCA_FQ_CODEL_QUANTUM,
TCA_FQ_CODEL_CE_THRESHOLD,
+ TCA_FQ_CODEL_DROP_BATCH_SIZE,
+ TCA_FQ_CODEL_MEMORY_LIMIT,
__TCA_FQ_CODEL_MAX
};
@@ -739,6 +744,8 @@ struct tc_fq_codel_qd_stats {
__u32 new_flows_len; /* count of flows in new list */
__u32 old_flows_len; /* count of flows in old list */
__u32 ce_mark; /* packets above ce_threshold */
+ __u32 memory_usage; /* in bytes */
+ __u32 drop_overmemory;
};
struct tc_fq_codel_cl_stats {
diff --git a/include/uapi/linux/qrtr.h b/include/uapi/linux/qrtr.h
new file mode 100644
index 000000000000..66c0748d26e2
--- /dev/null
+++ b/include/uapi/linux/qrtr.h
@@ -0,0 +1,12 @@
+#ifndef _LINUX_QRTR_H
+#define _LINUX_QRTR_H
+
+#include <linux/socket.h>
+
+struct sockaddr_qrtr {
+ __kernel_sa_family_t sq_family;
+ __u32 sq_node;
+ __u32 sq_port;
+};
+
+#endif /* _LINUX_QRTR_H */
diff --git a/include/uapi/linux/quota.h b/include/uapi/linux/quota.h
index 38baddb807f5..4d2489ef6f10 100644
--- a/include/uapi/linux/quota.h
+++ b/include/uapi/linux/quota.h
@@ -191,6 +191,7 @@ enum {
QUOTA_NL_A_DEV_MAJOR,
QUOTA_NL_A_DEV_MINOR,
QUOTA_NL_A_CAUSED_ID,
+ QUOTA_NL_A_PAD,
__QUOTA_NL_A_MAX,
};
#define QUOTA_NL_A_MAX (__QUOTA_NL_A_MAX - 1)
diff --git a/include/linux/rio_mport_cdev.h b/include/uapi/linux/rio_mport_cdev.h
index b65d19df76d2..5796bf1d06ad 100644
--- a/include/linux/rio_mport_cdev.h
+++ b/include/uapi/linux/rio_mport_cdev.h
@@ -39,16 +39,16 @@
#ifndef _RIO_MPORT_CDEV_H_
#define _RIO_MPORT_CDEV_H_
-#ifndef __user
-#define __user
-#endif
+#include <linux/ioctl.h>
+#include <linux/types.h>
struct rio_mport_maint_io {
- uint32_t rioid; /* destID of remote device */
- uint32_t hopcount; /* hopcount to remote device */
- uint32_t offset; /* offset in register space */
- size_t length; /* length in bytes */
- void __user *buffer; /* data buffer */
+ __u16 rioid; /* destID of remote device */
+ __u8 hopcount; /* hopcount to remote device */
+ __u8 pad0[5];
+ __u32 offset; /* offset in register space */
+ __u32 length; /* length in bytes */
+ __u64 buffer; /* pointer to data buffer */
};
/*
@@ -66,22 +66,23 @@ struct rio_mport_maint_io {
#define RIO_CAP_MAP_INB (1 << 7)
struct rio_mport_properties {
- uint16_t hdid;
- uint8_t id; /* Physical port ID */
- uint8_t index;
- uint32_t flags;
- uint32_t sys_size; /* Default addressing size */
- uint8_t port_ok;
- uint8_t link_speed;
- uint8_t link_width;
- uint32_t dma_max_sge;
- uint32_t dma_max_size;
- uint32_t dma_align;
- uint32_t transfer_mode; /* Default transfer mode */
- uint32_t cap_sys_size; /* Capable system sizes */
- uint32_t cap_addr_size; /* Capable addressing sizes */
- uint32_t cap_transfer_mode; /* Capable transfer modes */
- uint32_t cap_mport; /* Mport capabilities */
+ __u16 hdid;
+ __u8 id; /* Physical port ID */
+ __u8 index;
+ __u32 flags;
+ __u32 sys_size; /* Default addressing size */
+ __u8 port_ok;
+ __u8 link_speed;
+ __u8 link_width;
+ __u8 pad0;
+ __u32 dma_max_sge;
+ __u32 dma_max_size;
+ __u32 dma_align;
+ __u32 transfer_mode; /* Default transfer mode */
+ __u32 cap_sys_size; /* Capable system sizes */
+ __u32 cap_addr_size; /* Capable addressing sizes */
+ __u32 cap_transfer_mode; /* Capable transfer modes */
+ __u32 cap_mport; /* Mport capabilities */
};
/*
@@ -93,54 +94,57 @@ struct rio_mport_properties {
#define RIO_PORTWRITE (1 << 1)
struct rio_doorbell {
- uint32_t rioid;
- uint16_t payload;
+ __u16 rioid;
+ __u16 payload;
};
struct rio_doorbell_filter {
- uint32_t rioid; /* 0xffffffff to match all ids */
- uint16_t low;
- uint16_t high;
+ __u16 rioid; /* Use RIO_INVALID_DESTID to match all ids */
+ __u16 low;
+ __u16 high;
+ __u16 pad0;
};
struct rio_portwrite {
- uint32_t payload[16];
+ __u32 payload[16];
};
struct rio_pw_filter {
- uint32_t mask;
- uint32_t low;
- uint32_t high;
+ __u32 mask;
+ __u32 low;
+ __u32 high;
+ __u32 pad0;
};
/* RapidIO base address for inbound requests set to value defined below
* indicates that no specific RIO-to-local address translation is requested
* and driver should use direct (one-to-one) address mapping.
*/
-#define RIO_MAP_ANY_ADDR (uint64_t)(~((uint64_t) 0))
+#define RIO_MAP_ANY_ADDR (__u64)(~((__u64) 0))
struct rio_mmap {
- uint32_t rioid;
- uint64_t rio_addr;
- uint64_t length;
- uint64_t handle;
- void *address;
+ __u16 rioid;
+ __u16 pad0[3];
+ __u64 rio_addr;
+ __u64 length;
+ __u64 handle;
+ __u64 address;
};
struct rio_dma_mem {
- uint64_t length; /* length of DMA memory */
- uint64_t dma_handle; /* handle associated with this memory */
- void *buffer; /* pointer to this memory */
+ __u64 length; /* length of DMA memory */
+ __u64 dma_handle; /* handle associated with this memory */
+ __u64 address;
};
-
struct rio_event {
- unsigned int header; /* event type RIO_DOORBELL or RIO_PORTWRITE */
+ __u32 header; /* event type RIO_DOORBELL or RIO_PORTWRITE */
union {
struct rio_doorbell doorbell; /* header for RIO_DOORBELL */
struct rio_portwrite portwrite; /* header for RIO_PORTWRITE */
} u;
+ __u32 pad0;
};
enum rio_transfer_sync {
@@ -184,35 +188,37 @@ enum rio_exchange {
};
struct rio_transfer_io {
- uint32_t rioid; /* Target destID */
- uint64_t rio_addr; /* Address in target's RIO mem space */
- enum rio_exchange method; /* Data exchange method */
- void __user *loc_addr;
- uint64_t handle;
- uint64_t offset; /* Offset in buffer */
- uint64_t length; /* Length in bytes */
- uint32_t completion_code; /* Completion code for this transfer */
+ __u64 rio_addr; /* Address in target's RIO mem space */
+ __u64 loc_addr;
+ __u64 handle;
+ __u64 offset; /* Offset in buffer */
+ __u64 length; /* Length in bytes */
+ __u16 rioid; /* Target destID */
+ __u16 method; /* Data exchange method, one of rio_exchange enum */
+ __u32 completion_code; /* Completion code for this transfer */
};
struct rio_transaction {
- uint32_t transfer_mode; /* Data transfer mode */
- enum rio_transfer_sync sync; /* Synchronization method */
- enum rio_transfer_dir dir; /* Transfer direction */
- size_t count; /* Number of transfers */
- struct rio_transfer_io __user *block; /* Array of <count> transfers */
+ __u64 block; /* Pointer to array of <count> transfers */
+ __u32 count; /* Number of transfers */
+ __u32 transfer_mode; /* Data transfer mode */
+ __u16 sync; /* Synch method, one of rio_transfer_sync enum */
+ __u16 dir; /* Transfer direction, one of rio_transfer_dir enum */
+ __u32 pad0;
};
struct rio_async_tx_wait {
- uint32_t token; /* DMA transaction ID token */
- uint32_t timeout; /* Wait timeout in msec, if 0 use default TO */
+ __u32 token; /* DMA transaction ID token */
+ __u32 timeout; /* Wait timeout in msec, if 0 use default TO */
};
#define RIO_MAX_DEVNAME_SZ 20
struct rio_rdev_info {
- uint32_t destid;
- uint8_t hopcount;
- uint32_t comptag;
+ __u16 destid;
+ __u8 hopcount;
+ __u8 pad0;
+ __u32 comptag;
char name[RIO_MAX_DEVNAME_SZ + 1];
};
@@ -220,11 +226,11 @@ struct rio_rdev_info {
#define RIO_MPORT_DRV_MAGIC 'm'
#define RIO_MPORT_MAINT_HDID_SET \
- _IOW(RIO_MPORT_DRV_MAGIC, 1, uint16_t)
+ _IOW(RIO_MPORT_DRV_MAGIC, 1, __u16)
#define RIO_MPORT_MAINT_COMPTAG_SET \
- _IOW(RIO_MPORT_DRV_MAGIC, 2, uint32_t)
+ _IOW(RIO_MPORT_DRV_MAGIC, 2, __u32)
#define RIO_MPORT_MAINT_PORT_IDX_GET \
- _IOR(RIO_MPORT_DRV_MAGIC, 3, uint32_t)
+ _IOR(RIO_MPORT_DRV_MAGIC, 3, __u32)
#define RIO_MPORT_GET_PROPERTIES \
_IOR(RIO_MPORT_DRV_MAGIC, 4, struct rio_mport_properties)
#define RIO_MPORT_MAINT_READ_LOCAL \
@@ -244,9 +250,9 @@ struct rio_rdev_info {
#define RIO_DISABLE_PORTWRITE_RANGE \
_IOW(RIO_MPORT_DRV_MAGIC, 12, struct rio_pw_filter)
#define RIO_SET_EVENT_MASK \
- _IOW(RIO_MPORT_DRV_MAGIC, 13, unsigned int)
+ _IOW(RIO_MPORT_DRV_MAGIC, 13, __u32)
#define RIO_GET_EVENT_MASK \
- _IOR(RIO_MPORT_DRV_MAGIC, 14, unsigned int)
+ _IOR(RIO_MPORT_DRV_MAGIC, 14, __u32)
#define RIO_MAP_OUTBOUND \
_IOWR(RIO_MPORT_DRV_MAGIC, 15, struct rio_mmap)
#define RIO_UNMAP_OUTBOUND \
@@ -254,11 +260,11 @@ struct rio_rdev_info {
#define RIO_MAP_INBOUND \
_IOWR(RIO_MPORT_DRV_MAGIC, 17, struct rio_mmap)
#define RIO_UNMAP_INBOUND \
- _IOW(RIO_MPORT_DRV_MAGIC, 18, uint64_t)
+ _IOW(RIO_MPORT_DRV_MAGIC, 18, __u64)
#define RIO_ALLOC_DMA \
_IOWR(RIO_MPORT_DRV_MAGIC, 19, struct rio_dma_mem)
#define RIO_FREE_DMA \
- _IOW(RIO_MPORT_DRV_MAGIC, 20, uint64_t)
+ _IOW(RIO_MPORT_DRV_MAGIC, 20, __u64)
#define RIO_TRANSFER \
_IOWR(RIO_MPORT_DRV_MAGIC, 21, struct rio_transaction)
#define RIO_WAIT_FOR_ASYNC \
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index ca764b5da86d..262f0379d83a 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -139,6 +139,11 @@ enum {
RTM_GETNSID = 90,
#define RTM_GETNSID RTM_GETNSID
+ RTM_NEWSTATS = 92,
+#define RTM_NEWSTATS RTM_NEWSTATS
+ RTM_GETSTATS = 94,
+#define RTM_GETSTATS RTM_GETSTATS
+
__RTM_MAX,
#define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1)
};
@@ -312,6 +317,7 @@ enum rtattr_type_t {
RTA_ENCAP_TYPE,
RTA_ENCAP,
RTA_EXPIRES,
+ RTA_PAD,
__RTA_MAX
};
@@ -536,6 +542,7 @@ enum {
TCA_FCNT,
TCA_STATS2,
TCA_STAB,
+ TCA_PAD,
__TCA_MAX
};
diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h
index e513a4ee369b..99dbed8a8874 100644
--- a/include/uapi/linux/serial_core.h
+++ b/include/uapi/linux/serial_core.h
@@ -264,4 +264,10 @@
/* MVEBU UART */
#define PORT_MVEBU 114
+/* Microchip PIC32 UART */
+#define PORT_PIC32 115
+
+/* MPS2 UART */
+#define PORT_MPS2UART 116
+
#endif /* _UAPILINUX_SERIAL_CORE_H */
diff --git a/include/uapi/linux/signal.h b/include/uapi/linux/signal.h
index e1bd50c29ded..cd0804b6bfa2 100644
--- a/include/uapi/linux/signal.h
+++ b/include/uapi/linux/signal.h
@@ -7,4 +7,9 @@
#define SS_ONSTACK 1
#define SS_DISABLE 2
+/* bit-flags */
+#define SS_AUTODISARM (1U << 31) /* disable sas during sighandling */
+/* mask for all SS_xxx flags */
+#define SS_FLAG_BITS SS_AUTODISARM
+
#endif /* _UAPI_LINUX_SIGNAL_H */
diff --git a/include/uapi/linux/sock_diag.h b/include/uapi/linux/sock_diag.h
index bae2d80034d4..7ff505d8a47b 100644
--- a/include/uapi/linux/sock_diag.h
+++ b/include/uapi/linux/sock_diag.h
@@ -20,6 +20,7 @@ enum {
SK_MEMINFO_WMEM_QUEUED,
SK_MEMINFO_OPTMEM,
SK_MEMINFO_BACKLOG,
+ SK_MEMINFO_DROPS,
SK_MEMINFO_VARS,
};
diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
index 3f10e5317b46..8f3a8f606fd9 100644
--- a/include/uapi/linux/swab.h
+++ b/include/uapi/linux/swab.h
@@ -45,9 +45,7 @@
static inline __attribute_const__ __u16 __fswab16(__u16 val)
{
-#ifdef __HAVE_BUILTIN_BSWAP16__
- return __builtin_bswap16(val);
-#elif defined (__arch_swab16)
+#if defined (__arch_swab16)
return __arch_swab16(val);
#else
return ___constant_swab16(val);
@@ -56,9 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
static inline __attribute_const__ __u32 __fswab32(__u32 val)
{
-#ifdef __HAVE_BUILTIN_BSWAP32__
- return __builtin_bswap32(val);
-#elif defined(__arch_swab32)
+#if defined(__arch_swab32)
return __arch_swab32(val);
#else
return ___constant_swab32(val);
@@ -67,9 +63,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
static inline __attribute_const__ __u64 __fswab64(__u64 val)
{
-#ifdef __HAVE_BUILTIN_BSWAP64__
- return __builtin_bswap64(val);
-#elif defined (__arch_swab64)
+#if defined (__arch_swab64)
return __arch_swab64(val);
#elif defined(__SWAB_64_THRU_32__)
__u32 h = val >> 32;
@@ -102,28 +96,40 @@ static inline __attribute_const__ __u32 __fswahb32(__u32 val)
* __swab16 - return a byteswapped 16-bit value
* @x: value to byteswap
*/
+#ifdef __HAVE_BUILTIN_BSWAP16__
+#define __swab16(x) (__u16)__builtin_bswap16((__u16)(x))
+#else
#define __swab16(x) \
(__builtin_constant_p((__u16)(x)) ? \
___constant_swab16(x) : \
__fswab16(x))
+#endif
/**
* __swab32 - return a byteswapped 32-bit value
* @x: value to byteswap
*/
+#ifdef __HAVE_BUILTIN_BSWAP32__
+#define __swab32(x) (__u32)__builtin_bswap32((__u32)(x))
+#else
#define __swab32(x) \
(__builtin_constant_p((__u32)(x)) ? \
___constant_swab32(x) : \
__fswab32(x))
+#endif
/**
* __swab64 - return a byteswapped 64-bit value
* @x: value to byteswap
*/
+#ifdef __HAVE_BUILTIN_BSWAP64__
+#define __swab64(x) (__u64)__builtin_bswap64((__u64)(x))
+#else
#define __swab64(x) \
(__builtin_constant_p((__u64)(x)) ? \
___constant_swab64(x) : \
__fswab64(x))
+#endif
/**
* __swahw32 - return a word-swapped 32-bit value
diff --git a/include/uapi/linux/sync_file.h b/include/uapi/linux/sync_file.h
new file mode 100644
index 000000000000..413303d37b56
--- /dev/null
+++ b/include/uapi/linux/sync_file.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_SYNC_H
+#define _UAPI_LINUX_SYNC_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/**
+ * struct sync_merge_data - data passed to merge ioctl
+ * @name: name of new fence
+ * @fd2: file descriptor of second fence
+ * @fence: returns the fd of the new fence to userspace
+ * @flags: merge_data flags
+ * @pad: padding for 64-bit alignment, should always be zero
+ */
+struct sync_merge_data {
+ char name[32];
+ __s32 fd2;
+ __s32 fence;
+ __u32 flags;
+ __u32 pad;
+};
+
+/**
+ * struct sync_fence_info - detailed fence information
+ * @obj_name: name of parent sync_timeline
+* @driver_name: name of driver implementing the parent
+* @status: status of the fence 0:active 1:signaled <0:error
+ * @flags: fence_info flags
+ * @timestamp_ns: timestamp of status change in nanoseconds
+ */
+struct sync_fence_info {
+ char obj_name[32];
+ char driver_name[32];
+ __s32 status;
+ __u32 flags;
+ __u64 timestamp_ns;
+};
+
+/**
+ * struct sync_file_info - data returned from fence info ioctl
+ * @name: name of fence
+ * @status: status of fence. 1: signaled 0:active <0:error
+ * @flags: sync_file_info flags
+ * @num_fences number of fences in the sync_file
+ * @pad: padding for 64-bit alignment, should always be zero
+ * @sync_fence_info: pointer to array of structs sync_fence_info with all
+ * fences in the sync_file
+ */
+struct sync_file_info {
+ char name[32];
+ __s32 status;
+ __u32 flags;
+ __u32 num_fences;
+ __u32 pad;
+
+ __u64 sync_fence_info;
+};
+
+#define SYNC_IOC_MAGIC '>'
+
+/**
+ * Opcodes 0, 1 and 2 were burned during a API change to avoid users of the
+ * old API to get weird errors when trying to handling sync_files. The API
+ * change happened during the de-stage of the Sync Framework when there was
+ * no upstream users available.
+ */
+
+/**
+ * DOC: SYNC_IOC_MERGE - merge two fences
+ *
+ * Takes a struct sync_merge_data. Creates a new fence containing copies of
+ * the sync_pts in both the calling fd and sync_merge_data.fd2. Returns the
+ * new fence's fd in sync_merge_data.fence
+ */
+#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 3, struct sync_merge_data)
+
+/**
+ * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence
+ *
+ * Takes a struct sync_file_info_data with extra space allocated for pt_info.
+ * Caller should write the size of the buffer into len. On return, len is
+ * updated to reflect the total size of the sync_file_info_data including
+ * pt_info.
+ *
+ * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence.
+ * To iterate over the sync_pt_infos, use the sync_pt_info.len field.
+ */
+#define SYNC_IOC_FILE_INFO _IOWR(SYNC_IOC_MAGIC, 4, struct sync_file_info)
+
+#endif /* _UAPI_LINUX_SYNC_H */
diff --git a/include/uapi/linux/tc_act/Kbuild b/include/uapi/linux/tc_act/Kbuild
index 242cf0c6e33d..e3969bd939e4 100644
--- a/include/uapi/linux/tc_act/Kbuild
+++ b/include/uapi/linux/tc_act/Kbuild
@@ -10,3 +10,4 @@ header-y += tc_skbedit.h
header-y += tc_vlan.h
header-y += tc_bpf.h
header-y += tc_connmark.h
+header-y += tc_ife.h
diff --git a/include/uapi/linux/tc_act/tc_bpf.h b/include/uapi/linux/tc_act/tc_bpf.h
index 07f17cc70bb3..063d9d465119 100644
--- a/include/uapi/linux/tc_act/tc_bpf.h
+++ b/include/uapi/linux/tc_act/tc_bpf.h
@@ -26,6 +26,7 @@ enum {
TCA_ACT_BPF_OPS,
TCA_ACT_BPF_FD,
TCA_ACT_BPF_NAME,
+ TCA_ACT_BPF_PAD,
__TCA_ACT_BPF_MAX,
};
#define TCA_ACT_BPF_MAX (__TCA_ACT_BPF_MAX - 1)
diff --git a/include/uapi/linux/tc_act/tc_connmark.h b/include/uapi/linux/tc_act/tc_connmark.h
index 994b0971bce2..62a5e944c554 100644
--- a/include/uapi/linux/tc_act/tc_connmark.h
+++ b/include/uapi/linux/tc_act/tc_connmark.h
@@ -15,6 +15,7 @@ enum {
TCA_CONNMARK_UNSPEC,
TCA_CONNMARK_PARMS,
TCA_CONNMARK_TM,
+ TCA_CONNMARK_PAD,
__TCA_CONNMARK_MAX
};
#define TCA_CONNMARK_MAX (__TCA_CONNMARK_MAX - 1)
diff --git a/include/uapi/linux/tc_act/tc_csum.h b/include/uapi/linux/tc_act/tc_csum.h
index a047c49a3153..8ac8041ab5f1 100644
--- a/include/uapi/linux/tc_act/tc_csum.h
+++ b/include/uapi/linux/tc_act/tc_csum.h
@@ -10,6 +10,7 @@ enum {
TCA_CSUM_UNSPEC,
TCA_CSUM_PARMS,
TCA_CSUM_TM,
+ TCA_CSUM_PAD,
__TCA_CSUM_MAX
};
#define TCA_CSUM_MAX (__TCA_CSUM_MAX - 1)
diff --git a/include/uapi/linux/tc_act/tc_defact.h b/include/uapi/linux/tc_act/tc_defact.h
index 17dddb40f740..d2a3abb77aeb 100644
--- a/include/uapi/linux/tc_act/tc_defact.h
+++ b/include/uapi/linux/tc_act/tc_defact.h
@@ -12,6 +12,7 @@ enum {
TCA_DEF_TM,
TCA_DEF_PARMS,
TCA_DEF_DATA,
+ TCA_DEF_PAD,
__TCA_DEF_MAX
};
#define TCA_DEF_MAX (__TCA_DEF_MAX - 1)
diff --git a/include/uapi/linux/tc_act/tc_gact.h b/include/uapi/linux/tc_act/tc_gact.h
index f7bf94eed510..70b536a8f8b2 100644
--- a/include/uapi/linux/tc_act/tc_gact.h
+++ b/include/uapi/linux/tc_act/tc_gact.h
@@ -25,6 +25,7 @@ enum {
TCA_GACT_TM,
TCA_GACT_PARMS,
TCA_GACT_PROB,
+ TCA_GACT_PAD,
__TCA_GACT_MAX
};
#define TCA_GACT_MAX (__TCA_GACT_MAX - 1)
diff --git a/include/uapi/linux/tc_act/tc_ife.h b/include/uapi/linux/tc_act/tc_ife.h
index d648ff66586f..4ece02a77b9a 100644
--- a/include/uapi/linux/tc_act/tc_ife.h
+++ b/include/uapi/linux/tc_act/tc_ife.h
@@ -23,6 +23,7 @@ enum {
TCA_IFE_SMAC,
TCA_IFE_TYPE,
TCA_IFE_METALST,
+ TCA_IFE_PAD,
__TCA_IFE_MAX
};
#define TCA_IFE_MAX (__TCA_IFE_MAX - 1)
diff --git a/include/uapi/linux/tc_act/tc_ipt.h b/include/uapi/linux/tc_act/tc_ipt.h
index 130aaadf6fac..7c6e155dd981 100644
--- a/include/uapi/linux/tc_act/tc_ipt.h
+++ b/include/uapi/linux/tc_act/tc_ipt.h
@@ -14,6 +14,7 @@ enum {
TCA_IPT_CNT,
TCA_IPT_TM,
TCA_IPT_TARG,
+ TCA_IPT_PAD,
__TCA_IPT_MAX
};
#define TCA_IPT_MAX (__TCA_IPT_MAX - 1)
diff --git a/include/uapi/linux/tc_act/tc_mirred.h b/include/uapi/linux/tc_act/tc_mirred.h
index 7561750e8fd6..3d7a2b352a62 100644
--- a/include/uapi/linux/tc_act/tc_mirred.h
+++ b/include/uapi/linux/tc_act/tc_mirred.h
@@ -20,6 +20,7 @@ enum {
TCA_MIRRED_UNSPEC,
TCA_MIRRED_TM,
TCA_MIRRED_PARMS,
+ TCA_MIRRED_PAD,
__TCA_MIRRED_MAX
};
#define TCA_MIRRED_MAX (__TCA_MIRRED_MAX - 1)
diff --git a/include/uapi/linux/tc_act/tc_nat.h b/include/uapi/linux/tc_act/tc_nat.h
index 6663aeba0b9a..923457c9ebf0 100644
--- a/include/uapi/linux/tc_act/tc_nat.h
+++ b/include/uapi/linux/tc_act/tc_nat.h
@@ -10,6 +10,7 @@ enum {
TCA_NAT_UNSPEC,
TCA_NAT_PARMS,
TCA_NAT_TM,
+ TCA_NAT_PAD,
__TCA_NAT_MAX
};
#define TCA_NAT_MAX (__TCA_NAT_MAX - 1)
diff --git a/include/uapi/linux/tc_act/tc_pedit.h b/include/uapi/linux/tc_act/tc_pedit.h
index 716cfabcd5b2..6389959a5157 100644
--- a/include/uapi/linux/tc_act/tc_pedit.h
+++ b/include/uapi/linux/tc_act/tc_pedit.h
@@ -10,6 +10,7 @@ enum {
TCA_PEDIT_UNSPEC,
TCA_PEDIT_TM,
TCA_PEDIT_PARMS,
+ TCA_PEDIT_PAD,
__TCA_PEDIT_MAX
};
#define TCA_PEDIT_MAX (__TCA_PEDIT_MAX - 1)
diff --git a/include/uapi/linux/tc_act/tc_skbedit.h b/include/uapi/linux/tc_act/tc_skbedit.h
index 7a2e910a5f08..fecb5cc48c40 100644
--- a/include/uapi/linux/tc_act/tc_skbedit.h
+++ b/include/uapi/linux/tc_act/tc_skbedit.h
@@ -39,6 +39,7 @@ enum {
TCA_SKBEDIT_PRIORITY,
TCA_SKBEDIT_QUEUE_MAPPING,
TCA_SKBEDIT_MARK,
+ TCA_SKBEDIT_PAD,
__TCA_SKBEDIT_MAX
};
#define TCA_SKBEDIT_MAX (__TCA_SKBEDIT_MAX - 1)
diff --git a/include/uapi/linux/tc_act/tc_vlan.h b/include/uapi/linux/tc_act/tc_vlan.h
index f7b8d448b960..31151ff6264f 100644
--- a/include/uapi/linux/tc_act/tc_vlan.h
+++ b/include/uapi/linux/tc_act/tc_vlan.h
@@ -28,6 +28,7 @@ enum {
TCA_VLAN_PARMS,
TCA_VLAN_PUSH_VLAN_ID,
TCA_VLAN_PUSH_VLAN_PROTOCOL,
+ TCA_VLAN_PAD,
__TCA_VLAN_MAX,
};
#define TCA_VLAN_MAX (__TCA_VLAN_MAX - 1)
diff --git a/include/uapi/linux/tcp_metrics.h b/include/uapi/linux/tcp_metrics.h
index 93533926035c..80ad90d0cfc2 100644
--- a/include/uapi/linux/tcp_metrics.h
+++ b/include/uapi/linux/tcp_metrics.h
@@ -40,6 +40,7 @@ enum {
TCP_METRICS_ATTR_FOPEN_COOKIE, /* binary */
TCP_METRICS_ATTR_SADDR_IPV4, /* u32 */
TCP_METRICS_ATTR_SADDR_IPV6, /* binary */
+ TCP_METRICS_ATTR_PAD,
__TCP_METRICS_ATTR_MAX,
};
diff --git a/include/uapi/linux/tty_flags.h b/include/uapi/linux/tty_flags.h
index 072e41e45ee2..66e4d8bcb16f 100644
--- a/include/uapi/linux/tty_flags.h
+++ b/include/uapi/linux/tty_flags.h
@@ -32,7 +32,13 @@
#define ASYNCB_MAGIC_MULTIPLIER 16 /* Use special CLK or divisor */
#define ASYNCB_LAST_USER 16
-/* Internal flags used only by kernel */
+/*
+ * Internal flags used only by kernel (read-only)
+ *
+ * WARNING: These flags are no longer used and have been superceded by the
+ * TTY_PORT_ flags in the iflags field (and not userspace-visible)
+ */
+#ifndef _KERNEL_
#define ASYNCB_INITIALIZED 31 /* Serial port was initialized */
#define ASYNCB_SUSPENDED 30 /* Serial port is suspended */
#define ASYNCB_NORMAL_ACTIVE 29 /* Normal device is active */
@@ -43,7 +49,9 @@
#define ASYNCB_SHARE_IRQ 24 /* for multifunction cards, no longer used */
#define ASYNCB_CONS_FLOW 23 /* flow control for console */
#define ASYNCB_FIRST_KERNEL 22
+#endif
+/* Masks */
#define ASYNC_HUP_NOTIFY (1U << ASYNCB_HUP_NOTIFY)
#define ASYNC_SUSPENDED (1U << ASYNCB_SUSPENDED)
#define ASYNC_FOURPORT (1U << ASYNCB_FOURPORT)
@@ -72,6 +80,8 @@
#define ASYNC_SPD_WARP (ASYNC_SPD_HI|ASYNC_SPD_SHI)
#define ASYNC_SPD_MASK (ASYNC_SPD_HI|ASYNC_SPD_VHI|ASYNC_SPD_SHI)
+#ifndef _KERNEL_
+/* These flags are no longer used (and were always masked from userspace) */
#define ASYNC_INITIALIZED (1U << ASYNCB_INITIALIZED)
#define ASYNC_NORMAL_ACTIVE (1U << ASYNCB_NORMAL_ACTIVE)
#define ASYNC_BOOT_AUTOCONF (1U << ASYNCB_BOOT_AUTOCONF)
@@ -81,5 +91,6 @@
#define ASYNC_SHARE_IRQ (1U << ASYNCB_SHARE_IRQ)
#define ASYNC_CONS_FLOW (1U << ASYNCB_CONS_FLOW)
#define ASYNC_INTERNAL_FLAGS (~((1U << ASYNCB_FIRST_KERNEL) - 1))
+#endif
#endif
diff --git a/include/uapi/linux/udp.h b/include/uapi/linux/udp.h
index 16574ea18f0c..2c8180f9156f 100644
--- a/include/uapi/linux/udp.h
+++ b/include/uapi/linux/udp.h
@@ -36,6 +36,7 @@ struct udphdr {
#define UDP_ENCAP_ESPINUDP_NON_IKE 1 /* draft-ietf-ipsec-nat-t-ike-00/01 */
#define UDP_ENCAP_ESPINUDP 2 /* draft-ietf-ipsec-udp-encaps-06 */
#define UDP_ENCAP_L2TPINUDP 3 /* rfc2661 */
-
+#define UDP_ENCAP_GTP0 4 /* GSM TS 09.60 */
+#define UDP_ENCAP_GTP1U 5 /* 3GPP TS 29.060 */
#endif /* _UAPI_LINUX_UDP_H */
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index d5ce71607972..a8acc24765fe 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -105,6 +105,13 @@
#define USB_REQ_LOOPBACK_DATA_READ 0x16
#define USB_REQ_SET_INTERFACE_DS 0x17
+/* specific requests for USB Power Delivery */
+#define USB_REQ_GET_PARTNER_PDO 20
+#define USB_REQ_GET_BATTERY_STATUS 21
+#define USB_REQ_SET_PDO 22
+#define USB_REQ_GET_VDM 23
+#define USB_REQ_SEND_VDM 24
+
/* The Link Power Management (LPM) ECN defines USB_REQ_TEST_AND_SET command,
* used by hubs to put ports into a new L1 suspend state, except that it
* forgot to define its number ...
@@ -165,6 +172,22 @@
#define USB_DEV_STAT_U2_ENABLED 3 /* transition into U2 state */
#define USB_DEV_STAT_LTM_ENABLED 4 /* Latency tolerance messages */
+/*
+ * Feature selectors from Table 9-8 USB Power Delivery spec
+ */
+#define USB_DEVICE_BATTERY_WAKE_MASK 40
+#define USB_DEVICE_OS_IS_PD_AWARE 41
+#define USB_DEVICE_POLICY_MODE 42
+#define USB_PORT_PR_SWAP 43
+#define USB_PORT_GOTO_MIN 44
+#define USB_PORT_RETURN_POWER 45
+#define USB_PORT_ACCEPT_PD_REQUEST 46
+#define USB_PORT_REJECT_PD_REQUEST 47
+#define USB_PORT_PORT_PD_RESET 48
+#define USB_PORT_C_PORT_PD_CHANGE 49
+#define USB_PORT_CABLE_PD_RESET 50
+#define USB_DEVICE_CHARGING_POLICY 54
+
/**
* struct usb_ctrlrequest - SETUP data for a USB device control request
* @bRequestType: matches the USB bmRequestType field
@@ -914,6 +937,104 @@ struct usb_ssp_cap_descriptor {
} __attribute__((packed));
/*
+ * USB Power Delivery Capability Descriptor:
+ * Defines capabilities for PD
+ */
+/* Defines the various PD Capabilities of this device */
+#define USB_PD_POWER_DELIVERY_CAPABILITY 0x06
+/* Provides information on each battery supported by the device */
+#define USB_PD_BATTERY_INFO_CAPABILITY 0x07
+/* The Consumer characteristics of a Port on the device */
+#define USB_PD_PD_CONSUMER_PORT_CAPABILITY 0x08
+/* The provider characteristics of a Port on the device */
+#define USB_PD_PD_PROVIDER_PORT_CAPABILITY 0x09
+
+struct usb_pd_cap_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDevCapabilityType; /* set to USB_PD_POWER_DELIVERY_CAPABILITY */
+ __u8 bReserved;
+ __le32 bmAttributes;
+#define USB_PD_CAP_BATTERY_CHARGING (1 << 1) /* supports Battery Charging specification */
+#define USB_PD_CAP_USB_PD (1 << 2) /* supports USB Power Delivery specification */
+#define USB_PD_CAP_PROVIDER (1 << 3) /* can provide power */
+#define USB_PD_CAP_CONSUMER (1 << 4) /* can consume power */
+#define USB_PD_CAP_CHARGING_POLICY (1 << 5) /* supports CHARGING_POLICY feature */
+#define USB_PD_CAP_TYPE_C_CURRENT (1 << 6) /* supports power capabilities defined in the USB Type-C Specification */
+
+#define USB_PD_CAP_PWR_AC (1 << 8)
+#define USB_PD_CAP_PWR_BAT (1 << 9)
+#define USB_PD_CAP_PWR_USE_V_BUS (1 << 14)
+
+ __le16 bmProviderPorts; /* Bit zero refers to the UFP of the device */
+ __le16 bmConsumerPorts;
+ __le16 bcdBCVersion;
+ __le16 bcdPDVersion;
+ __le16 bcdUSBTypeCVersion;
+} __attribute__((packed));
+
+struct usb_pd_cap_battery_info_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDevCapabilityType;
+ /* Index of string descriptor shall contain the user friendly name for this battery */
+ __u8 iBattery;
+ /* Index of string descriptor shall contain the Serial Number String for this battery */
+ __u8 iSerial;
+ __u8 iManufacturer;
+ __u8 bBatteryId; /* uniquely identifies this battery in status Messages */
+ __u8 bReserved;
+ /*
+ * Shall contain the Battery Charge value above which this
+ * battery is considered to be fully charged but not necessarily
+ * “topped off.”
+ */
+ __le32 dwChargedThreshold; /* in mWh */
+ /*
+ * Shall contain the minimum charge level of this battery such
+ * that above this threshold, a device can be assured of being
+ * able to power up successfully (see Battery Charging 1.2).
+ */
+ __le32 dwWeakThreshold; /* in mWh */
+ __le32 dwBatteryDesignCapacity; /* in mWh */
+ __le32 dwBatteryLastFullchargeCapacity; /* in mWh */
+} __attribute__((packed));
+
+struct usb_pd_cap_consumer_port_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDevCapabilityType;
+ __u8 bReserved;
+ __u8 bmCapabilities;
+/* port will oerate under: */
+#define USB_PD_CAP_CONSUMER_BC (1 << 0) /* BC */
+#define USB_PD_CAP_CONSUMER_PD (1 << 1) /* PD */
+#define USB_PD_CAP_CONSUMER_TYPE_C (1 << 2) /* USB Type-C Current */
+ __le16 wMinVoltage; /* in 50mV units */
+ __le16 wMaxVoltage; /* in 50mV units */
+ __u16 wReserved;
+ __le32 dwMaxOperatingPower; /* in 10 mW - operating at steady state */
+ __le32 dwMaxPeakPower; /* in 10mW units - operating at peak power */
+ __le32 dwMaxPeakPowerTime; /* in 100ms units - duration of peak */
+#define USB_PD_CAP_CONSUMER_UNKNOWN_PEAK_POWER_TIME 0xffff
+} __attribute__((packed));
+
+struct usb_pd_cap_provider_port_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDevCapabilityType;
+ __u8 bReserved1;
+ __u8 bmCapabilities;
+/* port will oerate under: */
+#define USB_PD_CAP_PROVIDER_BC (1 << 0) /* BC */
+#define USB_PD_CAP_PROVIDER_PD (1 << 1) /* PD */
+#define USB_PD_CAP_PROVIDER_TYPE_C (1 << 2) /* USB Type-C Current */
+ __u8 bNumOfPDObjects;
+ __u8 bReserved2;
+ __le32 wPowerDataObject[];
+} __attribute__((packed));
+
+/*
* Precision time measurement capability descriptor: advertised by devices and
* hubs that support PTM
*/
diff --git a/include/uapi/linux/uuid.h b/include/uapi/linux/uuid.h
index 786f0773cc33..3738e5fb6a4d 100644
--- a/include/uapi/linux/uuid.h
+++ b/include/uapi/linux/uuid.h
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _UAPI_LINUX_UUID_H_
diff --git a/include/uapi/linux/v4l2-dv-timings.h b/include/uapi/linux/v4l2-dv-timings.h
index c039f1d68a09..086168e18ca8 100644
--- a/include/uapi/linux/v4l2-dv-timings.h
+++ b/include/uapi/linux/v4l2-dv-timings.h
@@ -183,7 +183,8 @@
#define V4L2_DV_BT_CEA_3840X2160P24 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 1276, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -191,14 +192,16 @@
#define V4L2_DV_BT_CEA_3840X2160P25 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_3840X2160P30 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -206,14 +209,16 @@
#define V4L2_DV_BT_CEA_3840X2160P50 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
594000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_3840X2160P60 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
594000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -221,7 +226,8 @@
#define V4L2_DV_BT_CEA_4096X2160P24 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 1020, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -229,14 +235,16 @@
#define V4L2_DV_BT_CEA_4096X2160P25 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_4096X2160P30 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -244,14 +252,16 @@
#define V4L2_DV_BT_CEA_4096X2160P50 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
594000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_4096X2160P60 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
594000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index e895975c5b0e..8f951917be74 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -138,10 +138,7 @@ enum v4l2_buf_type {
V4L2_BUF_TYPE_VBI_OUTPUT = 5,
V4L2_BUF_TYPE_SLICED_VBI_CAPTURE = 6,
V4L2_BUF_TYPE_SLICED_VBI_OUTPUT = 7,
-#if 1
- /* Experimental */
V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY = 8,
-#endif
V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE = 9,
V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE = 10,
V4L2_BUF_TYPE_SDR_CAPTURE = 11,
@@ -657,8 +654,7 @@ struct v4l2_fmtdesc {
#define V4L2_FMT_FLAG_COMPRESSED 0x0001
#define V4L2_FMT_FLAG_EMULATED 0x0002
-#if 1
- /* Experimental Frame Size and frame rate enumeration */
+ /* Frame Size and frame rate enumeration */
/*
* F R A M E S I Z E E N U M E R A T I O N
*/
@@ -724,7 +720,6 @@ struct v4l2_frmivalenum {
__u32 reserved[2]; /* Reserved space for future use */
};
-#endif
/*
* T I M E C O D E
@@ -1728,8 +1723,6 @@ struct v4l2_audioout {
/*
* M P E G S E R V I C E S
- *
- * NOTE: EXPERIMENTAL API
*/
#if 1
#define V4L2_ENC_IDX_FRAME_I (0)
@@ -2259,46 +2252,35 @@ struct v4l2_create_buffers {
#define VIDIOC_ENCODER_CMD _IOWR('V', 77, struct v4l2_encoder_cmd)
#define VIDIOC_TRY_ENCODER_CMD _IOWR('V', 78, struct v4l2_encoder_cmd)
-/* Experimental, meant for debugging, testing and internal use.
- Only implemented if CONFIG_VIDEO_ADV_DEBUG is defined.
- You must be root to use these ioctls. Never use these in applications! */
+/*
+ * Experimental, meant for debugging, testing and internal use.
+ * Only implemented if CONFIG_VIDEO_ADV_DEBUG is defined.
+ * You must be root to use these ioctls. Never use these in applications!
+ */
#define VIDIOC_DBG_S_REGISTER _IOW('V', 79, struct v4l2_dbg_register)
#define VIDIOC_DBG_G_REGISTER _IOWR('V', 80, struct v4l2_dbg_register)
#define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct v4l2_hw_freq_seek)
-
#define VIDIOC_S_DV_TIMINGS _IOWR('V', 87, struct v4l2_dv_timings)
#define VIDIOC_G_DV_TIMINGS _IOWR('V', 88, struct v4l2_dv_timings)
#define VIDIOC_DQEVENT _IOR('V', 89, struct v4l2_event)
#define VIDIOC_SUBSCRIBE_EVENT _IOW('V', 90, struct v4l2_event_subscription)
#define VIDIOC_UNSUBSCRIBE_EVENT _IOW('V', 91, struct v4l2_event_subscription)
-
-/* Experimental, the below two ioctls may change over the next couple of kernel
- versions */
#define VIDIOC_CREATE_BUFS _IOWR('V', 92, struct v4l2_create_buffers)
#define VIDIOC_PREPARE_BUF _IOWR('V', 93, struct v4l2_buffer)
-
-/* Experimental selection API */
#define VIDIOC_G_SELECTION _IOWR('V', 94, struct v4l2_selection)
#define VIDIOC_S_SELECTION _IOWR('V', 95, struct v4l2_selection)
-
-/* Experimental, these two ioctls may change over the next couple of kernel
- versions. */
#define VIDIOC_DECODER_CMD _IOWR('V', 96, struct v4l2_decoder_cmd)
#define VIDIOC_TRY_DECODER_CMD _IOWR('V', 97, struct v4l2_decoder_cmd)
-
-/* Experimental, these three ioctls may change over the next couple of kernel
- versions. */
#define VIDIOC_ENUM_DV_TIMINGS _IOWR('V', 98, struct v4l2_enum_dv_timings)
#define VIDIOC_QUERY_DV_TIMINGS _IOR('V', 99, struct v4l2_dv_timings)
#define VIDIOC_DV_TIMINGS_CAP _IOWR('V', 100, struct v4l2_dv_timings_cap)
-
-/* Experimental, this ioctl may change over the next couple of kernel
- versions. */
#define VIDIOC_ENUM_FREQ_BANDS _IOWR('V', 101, struct v4l2_frequency_band)
-/* Experimental, meant for debugging, testing and internal use.
- Never use these in applications! */
+/*
+ * Experimental, meant for debugging, testing and internal use.
+ * Never use this in applications!
+ */
#define VIDIOC_DBG_G_CHIP_INFO _IOWR('V', 102, struct v4l2_dbg_chip_info)
#define VIDIOC_QUERY_EXT_CTRL _IOWR('V', 103, struct v4l2_query_ext_ctrl)
diff --git a/include/uapi/linux/vt.h b/include/uapi/linux/vt.h
index 978578bd1895..f69034887e68 100644
--- a/include/uapi/linux/vt.h
+++ b/include/uapi/linux/vt.h
@@ -8,7 +8,6 @@
*/
#define MIN_NR_CONSOLES 1 /* must be at least 1 */
#define MAX_NR_CONSOLES 63 /* serial lines start at 64 */
-#define MAX_NR_USER_CONSOLES 63 /* must be root to allocate above this */
/* Note: the ioctl VT_GETSTATE does not work for
consoles 16 and higher (since it returns a short) */
diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h
index 2cd9e608d0d1..143338978b48 100644
--- a/include/uapi/linux/xfrm.h
+++ b/include/uapi/linux/xfrm.h
@@ -302,6 +302,7 @@ enum xfrm_attr_type_t {
XFRMA_SA_EXTRA_FLAGS, /* __u32 */
XFRMA_PROTO, /* __u8 */
XFRMA_ADDRESS_FILTER, /* struct xfrm_address_filter */
+ XFRMA_PAD,
__XFRMA_MAX
#define XFRMA_MAX (__XFRMA_MAX - 1)
diff --git a/include/uapi/mtd/mtd-abi.h b/include/uapi/mtd/mtd-abi.h
index 763bb6950402..0ec1da2ef652 100644
--- a/include/uapi/mtd/mtd-abi.h
+++ b/include/uapi/mtd/mtd-abi.h
@@ -228,7 +228,7 @@ struct nand_oobfree {
* complete set of ECC information. The ioctl truncates the larger internal
* structure to retain binary compatibility with the static declaration of the
* ioctl. Note that the "MTD_MAX_..._ENTRIES" macros represent the max size of
- * the user struct, not the MAX size of the internal struct nand_ecclayout.
+ * the user struct, not the MAX size of the internal OOB layout representation.
*/
struct nand_ecclayout_user {
__u32 eccbytes;
diff --git a/include/uapi/rdma/hfi/hfi1_user.h b/include/uapi/rdma/hfi/hfi1_user.h
index a533cecab14f..98bebf8bef55 100644
--- a/include/uapi/rdma/hfi/hfi1_user.h
+++ b/include/uapi/rdma/hfi/hfi1_user.h
@@ -66,7 +66,7 @@
* The major version changes when data structures change in an incompatible
* way. The driver must be the same for initialization to succeed.
*/
-#define HFI1_USER_SWMAJOR 5
+#define HFI1_USER_SWMAJOR 6
/*
* Minor version differences are always compatible
@@ -75,7 +75,12 @@
* may not be implemented; the user code must deal with this if it
* cares, or it must abort after initialization reports the difference.
*/
-#define HFI1_USER_SWMINOR 0
+#define HFI1_USER_SWMINOR 1
+
+/*
+ * We will encode the major/minor inside a single 32bit version number.
+ */
+#define HFI1_SWMAJOR_SHIFT 16
/*
* Set of HW and driver capability/feature bits.
@@ -107,19 +112,6 @@
#define HFI1_RCVHDR_ENTSIZE_16 (1UL << 1)
#define HFI1_RCVDHR_ENTSIZE_32 (1UL << 2)
-/*
- * If the unit is specified via open, HFI choice is fixed. If port is
- * specified, it's also fixed. Otherwise we try to spread contexts
- * across ports and HFIs, using different algorithms. WITHIN is
- * the old default, prior to this mechanism.
- */
-#define HFI1_ALG_ACROSS 0 /* round robin contexts across HFIs, then
- * ports; this is the default */
-#define HFI1_ALG_WITHIN 1 /* use all contexts on an HFI (round robin
- * active ports within), then next HFI */
-#define HFI1_ALG_COUNT 2 /* number of algorithm choices */
-
-
/* User commands. */
#define HFI1_CMD_ASSIGN_CTXT 1 /* allocate HFI and context */
#define HFI1_CMD_CTXT_INFO 2 /* find out what resources we got */
@@ -127,7 +119,6 @@
#define HFI1_CMD_TID_UPDATE 4 /* update expected TID entries */
#define HFI1_CMD_TID_FREE 5 /* free expected TID entries */
#define HFI1_CMD_CREDIT_UPD 6 /* force an update of PIO credit */
-#define HFI1_CMD_SDMA_STATUS_UPD 7 /* force update of SDMA status ring */
#define HFI1_CMD_RECV_CTRL 8 /* control receipt of packets */
#define HFI1_CMD_POLL_TYPE 9 /* set the kind of polling we want */
@@ -135,13 +126,46 @@
#define HFI1_CMD_SET_PKEY 11 /* set context's pkey */
#define HFI1_CMD_CTXT_RESET 12 /* reset context's HW send context */
#define HFI1_CMD_TID_INVAL_READ 13 /* read TID cache invalidations */
-/* separate EPROM commands from normal PSM commands */
-#define HFI1_CMD_EP_INFO 64 /* read EPROM device ID */
-#define HFI1_CMD_EP_ERASE_CHIP 65 /* erase whole EPROM */
-/* range 66-74 no longer used */
-#define HFI1_CMD_EP_ERASE_RANGE 75 /* erase EPROM range */
-#define HFI1_CMD_EP_READ_RANGE 76 /* read EPROM range */
-#define HFI1_CMD_EP_WRITE_RANGE 77 /* write EPROM range */
+#define HFI1_CMD_GET_VERS 14 /* get the version of the user cdev */
+
+/*
+ * User IOCTLs can not go above 128 if they do then see common.h and change the
+ * base for the snoop ioctl
+ */
+#define IB_IOCTL_MAGIC 0x1b /* See Documentation/ioctl/ioctl-number.txt */
+
+/*
+ * Make the ioctls occupy the last 0xf0-0xff portion of the IB range
+ */
+#define __NUM(cmd) (HFI1_CMD_##cmd + 0xe0)
+
+struct hfi1_cmd;
+#define HFI1_IOCTL_ASSIGN_CTXT \
+ _IOWR(IB_IOCTL_MAGIC, __NUM(ASSIGN_CTXT), struct hfi1_user_info)
+#define HFI1_IOCTL_CTXT_INFO \
+ _IOW(IB_IOCTL_MAGIC, __NUM(CTXT_INFO), struct hfi1_ctxt_info)
+#define HFI1_IOCTL_USER_INFO \
+ _IOW(IB_IOCTL_MAGIC, __NUM(USER_INFO), struct hfi1_base_info)
+#define HFI1_IOCTL_TID_UPDATE \
+ _IOWR(IB_IOCTL_MAGIC, __NUM(TID_UPDATE), struct hfi1_tid_info)
+#define HFI1_IOCTL_TID_FREE \
+ _IOWR(IB_IOCTL_MAGIC, __NUM(TID_FREE), struct hfi1_tid_info)
+#define HFI1_IOCTL_CREDIT_UPD \
+ _IO(IB_IOCTL_MAGIC, __NUM(CREDIT_UPD))
+#define HFI1_IOCTL_RECV_CTRL \
+ _IOW(IB_IOCTL_MAGIC, __NUM(RECV_CTRL), int)
+#define HFI1_IOCTL_POLL_TYPE \
+ _IOW(IB_IOCTL_MAGIC, __NUM(POLL_TYPE), int)
+#define HFI1_IOCTL_ACK_EVENT \
+ _IOW(IB_IOCTL_MAGIC, __NUM(ACK_EVENT), unsigned long)
+#define HFI1_IOCTL_SET_PKEY \
+ _IOW(IB_IOCTL_MAGIC, __NUM(SET_PKEY), __u16)
+#define HFI1_IOCTL_CTXT_RESET \
+ _IO(IB_IOCTL_MAGIC, __NUM(CTXT_RESET))
+#define HFI1_IOCTL_TID_INVAL_READ \
+ _IOWR(IB_IOCTL_MAGIC, __NUM(TID_INVAL_READ), struct hfi1_tid_info)
+#define HFI1_IOCTL_GET_VERS \
+ _IOR(IB_IOCTL_MAGIC, __NUM(GET_VERS), int)
#define _HFI1_EVENT_FROZEN_BIT 0
#define _HFI1_EVENT_LINKDOWN_BIT 1
@@ -199,9 +223,7 @@ struct hfi1_user_info {
* Should be set to HFI1_USER_SWVERSION.
*/
__u32 userversion;
- __u16 pad;
- /* HFI selection algorithm, if unit has not selected */
- __u16 hfi1_alg;
+ __u32 pad;
/*
* If two or more processes wish to share a context, each process
* must set the subcontext_cnt and subcontext_id to the same
@@ -243,12 +265,6 @@ struct hfi1_tid_info {
__u32 length;
};
-struct hfi1_cmd {
- __u32 type; /* command type */
- __u32 len; /* length of struct pointed to by add */
- __u64 addr; /* pointer to user structure */
-};
-
enum hfi1_sdma_comp_state {
FREE = 0,
QUEUED,
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 8126c143a519..b6543d73d20a 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -226,6 +226,7 @@ struct ib_uverbs_ex_query_device_resp {
struct ib_uverbs_odp_caps odp_caps;
__u64 timestamp_mask;
__u64 hca_core_clock; /* in KHZ */
+ __u64 device_cap_flags_ex;
};
struct ib_uverbs_query_port {
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h
index 6e373d151cad..02fe8390c18f 100644
--- a/include/uapi/rdma/rdma_netlink.h
+++ b/include/uapi/rdma/rdma_netlink.h
@@ -135,10 +135,12 @@ enum {
* Local service operations:
* RESOLVE - The client requests the local service to resolve a path.
* SET_TIMEOUT - The local service requests the client to set the timeout.
+ * IP_RESOLVE - The client requests the local service to resolve an IP to GID.
*/
enum {
RDMA_NL_LS_OP_RESOLVE = 0,
RDMA_NL_LS_OP_SET_TIMEOUT,
+ RDMA_NL_LS_OP_IP_RESOLVE,
RDMA_NL_LS_NUM_OPS
};
@@ -176,6 +178,10 @@ struct rdma_ls_resolve_header {
__u8 path_use;
};
+struct rdma_ls_ip_resolve_header {
+ __u32 ifindex;
+};
+
/* Local service attribute type */
#define RDMA_NLA_F_MANDATORY (1 << 13)
#define RDMA_NLA_TYPE_MASK (~(NLA_F_NESTED | NLA_F_NET_BYTEORDER | \
@@ -193,6 +199,8 @@ struct rdma_ls_resolve_header {
* TCLASS u8
* PKEY u16 cpu
* QOS_CLASS u16 cpu
+ * IPV4 u32 BE
+ * IPV6 u8[16] BE
*/
enum {
LS_NLA_TYPE_UNSPEC = 0,
@@ -204,6 +212,8 @@ enum {
LS_NLA_TYPE_TCLASS,
LS_NLA_TYPE_PKEY,
LS_NLA_TYPE_QOS_CLASS,
+ LS_NLA_TYPE_IPV4,
+ LS_NLA_TYPE_IPV6,
LS_NLA_TYPE_MAX
};
diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h
index c4cc1e40b35c..e4701a3c6331 100644
--- a/include/uapi/sound/asoc.h
+++ b/include/uapi/sound/asoc.h
@@ -116,6 +116,14 @@
#define SND_SOC_TPLG_STREAM_PLAYBACK 0
#define SND_SOC_TPLG_STREAM_CAPTURE 1
+/* vendor tuple types */
+#define SND_SOC_TPLG_TUPLE_TYPE_UUID 0
+#define SND_SOC_TPLG_TUPLE_TYPE_STRING 1
+#define SND_SOC_TPLG_TUPLE_TYPE_BOOL 2
+#define SND_SOC_TPLG_TUPLE_TYPE_BYTE 3
+#define SND_SOC_TPLG_TUPLE_TYPE_WORD 4
+#define SND_SOC_TPLG_TUPLE_TYPE_SHORT 5
+
/*
* Block Header.
* This header precedes all object and object arrays below.
@@ -132,6 +140,35 @@ struct snd_soc_tplg_hdr {
__le32 count; /* number of elements in block */
} __attribute__((packed));
+/* vendor tuple for uuid */
+struct snd_soc_tplg_vendor_uuid_elem {
+ __le32 token;
+ char uuid[16];
+} __attribute__((packed));
+
+/* vendor tuple for a bool/byte/short/word value */
+struct snd_soc_tplg_vendor_value_elem {
+ __le32 token;
+ __le32 value;
+} __attribute__((packed));
+
+/* vendor tuple for string */
+struct snd_soc_tplg_vendor_string_elem {
+ __le32 token;
+ char string[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+} __attribute__((packed));
+
+struct snd_soc_tplg_vendor_array {
+ __le32 size; /* size in bytes of the array, including all elements */
+ __le32 type; /* SND_SOC_TPLG_TUPLE_TYPE_ */
+ __le32 num_elems; /* number of elements in array */
+ union {
+ struct snd_soc_tplg_vendor_uuid_elem uuid[0];
+ struct snd_soc_tplg_vendor_value_elem value[0];
+ struct snd_soc_tplg_vendor_string_elem string[0];
+ };
+} __attribute__((packed));
+
/*
* Private data.
* All topology objects may have private data that can be used by the driver or
@@ -139,7 +176,10 @@ struct snd_soc_tplg_hdr {
*/
struct snd_soc_tplg_private {
__le32 size; /* in bytes of private data */
- char data[0];
+ union {
+ char data[0];
+ struct snd_soc_tplg_vendor_array array[0];
+ };
} __attribute__((packed));
/*
@@ -383,7 +423,7 @@ struct snd_soc_tplg_pcm {
__le32 size; /* in bytes of this structure */
char pcm_name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
char dai_name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
- __le32 pcm_id; /* unique ID - used to match */
+ __le32 pcm_id; /* unique ID - used to match with DAI link */
__le32 dai_id; /* unique ID - used to match */
__le32 playback; /* supports playback mode */
__le32 capture; /* supports capture mode */
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index 67bf49d8c944..609cadb8739d 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -672,7 +672,7 @@ enum {
/* global timers (device member) */
#define SNDRV_TIMER_GLOBAL_SYSTEM 0
-#define SNDRV_TIMER_GLOBAL_RTC 1
+#define SNDRV_TIMER_GLOBAL_RTC 1 /* unused */
#define SNDRV_TIMER_GLOBAL_HPET 2
#define SNDRV_TIMER_GLOBAL_HRTIMER 3
diff --git a/include/video/exynos5433_decon.h b/include/video/exynos5433_decon.h
index c1c1ca18abc0..0098a522d9f4 100644
--- a/include/video/exynos5433_decon.h
+++ b/include/video/exynos5433_decon.h
@@ -179,9 +179,9 @@
#define TRIGCON_TRIGMODE_W1BUF (1 << 10)
#define TRIGCON_SWTRIGCMD_W0BUF (1 << 6)
#define TRIGCON_TRIGMODE_W0BUF (1 << 5)
-#define TRIGCON_HWTRIGMASK_I80_RGB (1 << 4)
-#define TRIGCON_HWTRIGEN_I80_RGB (1 << 3)
-#define TRIGCON_HWTRIG_INV_I80_RGB (1 << 2)
+#define TRIGCON_HWTRIGMASK (1 << 4)
+#define TRIGCON_HWTRIGEN (1 << 3)
+#define TRIGCON_HWTRIG_INV (1 << 2)
#define TRIGCON_SWTRIGCMD (1 << 1)
#define TRIGCON_SWTRIGEN (1 << 0)
diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h
index ad66589f2ae6..3a2a79401789 100644
--- a/include/video/imx-ipu-v3.h
+++ b/include/video/imx-ipu-v3.h
@@ -16,6 +16,7 @@
#include <linux/videodev2.h>
#include <linux/bitmap.h>
#include <linux/fb.h>
+#include <linux/of.h>
#include <media/v4l2-mediabus.h>
#include <video/videomode.h>
@@ -345,6 +346,7 @@ struct ipu_client_platformdata {
int dc;
int dp;
int dma[2];
+ struct device_node *of_node;
};
#endif /* __DRM_IPU_H__ */
diff --git a/include/video/mipi_display.h b/include/video/mipi_display.h
index ddcc8ca7316b..19aa65a35546 100644
--- a/include/video/mipi_display.h
+++ b/include/video/mipi_display.h
@@ -115,6 +115,14 @@ enum {
MIPI_DCS_READ_MEMORY_CONTINUE = 0x3E,
MIPI_DCS_SET_TEAR_SCANLINE = 0x44,
MIPI_DCS_GET_SCANLINE = 0x45,
+ MIPI_DCS_SET_DISPLAY_BRIGHTNESS = 0x51, /* MIPI DCS 1.3 */
+ MIPI_DCS_GET_DISPLAY_BRIGHTNESS = 0x52, /* MIPI DCS 1.3 */
+ MIPI_DCS_WRITE_CONTROL_DISPLAY = 0x53, /* MIPI DCS 1.3 */
+ MIPI_DCS_GET_CONTROL_DISPLAY = 0x54, /* MIPI DCS 1.3 */
+ MIPI_DCS_WRITE_POWER_SAVE = 0x55, /* MIPI DCS 1.3 */
+ MIPI_DCS_GET_POWER_SAVE = 0x56, /* MIPI DCS 1.3 */
+ MIPI_DCS_SET_CABC_MIN_BRIGHTNESS = 0x5E, /* MIPI DCS 1.3 */
+ MIPI_DCS_GET_CABC_MIN_BRIGHTNESS = 0x5F, /* MIPI DCS 1.3 */
MIPI_DCS_READ_DDB_START = 0xA1,
MIPI_DCS_READ_DDB_CONTINUE = 0xA8,
};
diff --git a/include/video/sh_mipi_dsi.h b/include/video/sh_mipi_dsi.h
deleted file mode 100644
index a01f197e6ac1..000000000000
--- a/include/video/sh_mipi_dsi.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Public SH-mobile MIPI DSI header
- *
- * Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef VIDEO_SH_MIPI_DSI_H
-#define VIDEO_SH_MIPI_DSI_H
-
-enum sh_mipi_dsi_data_fmt {
- MIPI_RGB888,
- MIPI_RGB565,
- MIPI_RGB666_LP,
- MIPI_RGB666,
- MIPI_BGR888,
- MIPI_BGR565,
- MIPI_BGR666_LP,
- MIPI_BGR666,
- MIPI_YUYV,
- MIPI_UYVY,
- MIPI_YUV420_L,
- MIPI_YUV420,
-};
-
-#define SH_MIPI_DSI_HSABM (1 << 0)
-#define SH_MIPI_DSI_HBPBM (1 << 1)
-#define SH_MIPI_DSI_HFPBM (1 << 2)
-#define SH_MIPI_DSI_BL2E (1 << 3)
-#define SH_MIPI_DSI_VSEE (1 << 4)
-#define SH_MIPI_DSI_HSEE (1 << 5)
-#define SH_MIPI_DSI_HSAE (1 << 6)
-
-#define SH_MIPI_DSI_HSbyteCLK (1 << 24)
-#define SH_MIPI_DSI_HS6divCLK (1 << 25)
-#define SH_MIPI_DSI_HS4divCLK (1 << 26)
-
-#define SH_MIPI_DSI_SYNC_PULSES_MODE (SH_MIPI_DSI_VSEE | \
- SH_MIPI_DSI_HSEE | \
- SH_MIPI_DSI_HSAE)
-#define SH_MIPI_DSI_SYNC_EVENTS_MODE (0)
-#define SH_MIPI_DSI_SYNC_BURST_MODE (SH_MIPI_DSI_BL2E)
-
-struct sh_mipi_dsi_info {
- enum sh_mipi_dsi_data_fmt data_format;
- int channel;
- int lane;
- unsigned long flags;
- u32 clksrc;
- u32 phyctrl; /* for extra setting */
- unsigned int vsynw_offset;
- int (*set_dot_clock)(struct platform_device *pdev,
- void __iomem *base,
- int enable);
-};
-
-#endif
diff --git a/include/xen/page.h b/include/xen/page.h
index 96294ac93755..9dc46cb8a0fd 100644
--- a/include/xen/page.h
+++ b/include/xen/page.h
@@ -15,9 +15,9 @@
*/
#define xen_pfn_to_page(xen_pfn) \
- ((pfn_to_page(((unsigned long)(xen_pfn) << XEN_PAGE_SHIFT) >> PAGE_SHIFT)))
+ (pfn_to_page((unsigned long)(xen_pfn) >> (PAGE_SHIFT - XEN_PAGE_SHIFT)))
#define page_to_xen_pfn(page) \
- (((page_to_pfn(page)) << PAGE_SHIFT) >> XEN_PAGE_SHIFT)
+ ((page_to_pfn(page)) << (PAGE_SHIFT - XEN_PAGE_SHIFT))
#define XEN_PFN_PER_PAGE (PAGE_SIZE / XEN_PAGE_SIZE)